- mips: octeon: Add ethernet support (Aaron & Stefan)
- Misc mvneta changes, cleanups, fixes (Marek)
This commit is contained in:
Tom Rini 2022-05-04 09:05:03 -04:00
commit c3d451d5e6
97 changed files with 33857 additions and 2871 deletions

View file

@ -3,25 +3,6 @@
* 2022 by Marek Behún <kabel@kernel.org>
*/
/ {
mdio {
#address-cells = <1>;
#size-cells = <0>;
old_binding_phy1: ethernet-phy@1 {
reg = <1>;
};
};
};
&eth0 {
pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
/delete-property/ phy-handle;
phy = <&old_binding_phy1>;
};
/delete-node/ &mdio;
&usb3 {
vbus-supply = <&exp_usb3_vbus>;
};

View file

@ -101,6 +101,7 @@ config ARCH_JZ47XX
config ARCH_OCTEON
bool "Support Marvell Octeon CN7xxx platforms"
select ARCH_EARLY_INIT_R
select CPU_CAVIUM_OCTEON
select DISPLAY_CPUINFO
select DMA_ADDR_T_64BIT

View file

@ -267,5 +267,40 @@
interrupts = <0x6c010 4>;
};
};
/* SMI1 */
smi1: mdio@1180000003880 {
compatible = "cavium,octeon-3860-mdio";
reg = <0x11800 0x00003880 0x0 0x40>;
#address-cells = <1>;
#size-cells = <0>;
};
/* BGX 0 */
bgx0: ethernet-mac-nexus@11800e0000000 {
compatible = "cavium,octeon-7890-bgx";
reg = <0x11800 0xe0000000 0x0 0x1000000>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
/* BGX 1 */
bgx1: ethernet-mac-nexus@11800e1000000 {
compatible = "cavium,octeon-7890-bgx";
reg = <0x11800 0xe1000000 0x0 0x1000000>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
/* BGX 2*/
bgx2: ethernet-mac-nexus@11800e2000000 {
compatible = "cavium,octeon-7890-bgx";
reg = <0x11800 0xe2000000 0x0 0x1000000>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
};
};

View file

@ -201,3 +201,48 @@
cd-gpios = <&gpio 25 1>; /* active low */
};
};
/* SMI_1 -- Available on rev 2 and later boards */
&smi1 {
/**
* The phy names are broken down as follows:
* (m)phyxxyzzs
* where:
* xx = 01 for SGMII, 10 for DXAUI, 20 for RXAUI
* and 40 for XFI/LXAUI
* y = QLM/DLM number
* zz = PHY address (decimal)
* s = sub-phy number in the case of the Cortina
* PHY
* a mphy is a nexus phy that contains one or more
* sub-phys, for example the Cortina CS4223.
*/
/* QLM 2 */
phy01208: ethernet-phy@01208 {
reg = <8>;
compatible = "marvell,88e1240", "ethernet-phy-ieee802.3-c22";
marvell,reg-init = <3 0x10 0 0x8665>,
<3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>,
<3 0x13 0 0x8a08>;
interrupt-parent = <&gpio>;
interrupts = <12 8>; /* Pin 12, active low */
};
};
/* BGX 0 */
&bgx0 {
status = "okay";
phy-handle = <&phy01208>; /* put phy-handle in BGX node and MAC node */
/* SerDes 0, may differ from PCS Lane/LMAC */
eth0: ethernet-mac@D {
compatible = "cavium,octeon-7890-bgx-port";
reg = <0>;
local-mac-address = [ 00 00 00 00 00 00 ];
phy-handle = <&phy01208>;
};
};

View file

@ -118,11 +118,208 @@
&i2c0 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
sfp0eeprom: eeprom@50 {
compatible = "atmel,24c01";
reg = <0x50>;
};
sfp0alerts: eeprom@51 {
compatible = "atmel,24c01";
reg = <0x51>;
};
};
&i2c1 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
vitesse@10 {
compatible = "vitesse,vsc7224";
reg = <0x10>;
#address-cells = <1>;
#size-cells = <0>;
/* Note that reset is active high with this device */
reset = <&gpio 7 0>;
/* LoS pin can be pulled low when there is a loss of signal */
los = <&gpio 6 0>;
vitesse,reg-init =
/* Clear all masks */
/* Page select FSYNC0 (0x30) */
<0x7f 0x0030>,
/* Set FSYNC0 for 10.3125Gbps */
<0x80 0x2841>, /* See Table 3. */
<0x81 0x0008>,
<0x82 0xc000>,
<0x83 0x0010>,
<0x84 0x1d00>,
/* All channels Rx settings set equally */
<0x7f 0x0050>,
/* Shrink EQ_BUFF */
<0x82 0x0014>,
/* Set EQVGA_ADAP = 1 (enable EQVGA circuitry),
* USE_UNIT_GAIN = 1 (EQVGA is in unity gain),
* USE_LPF = 0 (VGA adapt not using LPF),
* USE_EQVGA = 1
<0x89 0x7f13>,
/* Select min DFE Delay (DFE_DELAY) */
<0x90 0x5785>,
/* Set DFE 1-3 limit (DXMAX) = 32dec,
* AP Max limit = 127 decimal
*/
<0x92 0x207f>,
/* Set AP Min limit = 32 decimal */
<0x93 0x2000>,
/* Set DFE Averaging to the slowest (DFE_AVG) */
<0x94 0x0031>,
/* Set Inductor Bypass OD_IND_BYP = 0 & fastest Rise/Fall */
<0x9c 0x0000>,
/* Setting DFE Boost = none. Must set for
* rev C (if DFE in adapt mode)
*/
<0xaa 0x0888>,
/* Setting EQ Min = 8 & Max limit = 72 dec.
* Must set for rev C, otherwise EQ is 0
* (if EQ is in adaptive mode)
*/
<0xa8 0x2408>,
/* Setting EQVGA = 96, when in EQVGA manual mode */
<0xa9 0x0060>,
/* Setting SW_BFOCM, bits 15:14 to 01 */
<0x87 0x4021>,
/* Turn off adaptive input equalization
* and VGA adaptive algorithm control.
*/
<0x89 0x7313>,
/* Turn on adaptive input equalization
* and VGA adaptive algorithm control.
*/
<0x89 0x7f13>;
vitesse-channel@0 {
compatible = "vitesse,vsc7224-channel";
reg = <0>;
direction-tx;
sfp-mac = <&eth0>;
/* TAP settings. The format of this is as
* follows:
* - cable length in meters, 0 = active or
* optical module
* - maintap value
* - pretap value
* - posttap value
*
* For the cable length, the value will apply
* for that cable length and greater until the
* next largest cable length specified. These
* values must be ordered first by channel mask
* then by cable length. These are typically
* set for the transmit channels, not the
* receive channels.
*/
taps = <0 0x0013 0x000f 0x0000>,
<1 0x001f 0x000f 0x0004>,
<3 0x0014 0x000b 0x0004>,
<5 0x0014 0x0009 0x0006>,
<7 0x0014 0x000f 0x0000>,
<10 0x0012 0x000b 0x0013>;
};
vitesse-channel@1 {
compatible = "vitesse,vsc7224-channel";
reg = <1>;
/* Ignore mod_abs and module */
direction-rx;
sfp-mac = <&eth0>;
/* Disable pre-tap */
pretap-disable;
/* Disable post-tap */
posttap-disable;
/* Taps has the following fields:
* - cable length (ignored for rx)
* - main tap value
* - pre tap value
* - post tap value
*
* NOTE: if taps are disabled then they
* are not programmed.
*/
taps = <0 0x0a 0x0b 0x10>;
};
vitesse-channel@2 {
compatible = "vitesse,vsc7224-channel";
reg = <2>;
direction-tx;
sfp-mac = <&eth1>;
/* TAP settings. The format of this is as
* follows:
* - cable length in meters, 0 = active or
* optical module
* - maintap value
* - pretap value
* - posttap value
*
* For the cable length, the value will apply
* for that cable length and greater until the
* next largest cable length specified. These
* values must be ordered first by channel mask
* then by cable length. These are typically
* set for the transmit channels, not the
* receive channels.
*/
taps = <0 0x0013 0x000f 0x0000>,
<1 0x001f 0x000f 0x0004>,
<3 0x0014 0x000b 0x0004>,
<5 0x0014 0x0009 0x0006>,
<7 0x0014 0x000f 0x0000>,
<10 0x0012 0x000b 0x0013>;
};
vitesse-channel@3 {
compatible = "vitesse,vsc7224-channel";
reg = <3>;
/* Ignore mod_abs and module */
direction-rx;
sfp-mac = <&eth1>;
/* Disable pre-tap */
pretap-disable;
/* Disable post-tap */
posttap-disable;
/* Taps has the following fields:
* - cable length (ignored for rx)
* - main tap value
* - pre tap value
* - post tap value
*
* NOTE: if taps are disabled then they
* are not programmed.
*/
taps = <0 0x0a 0x0b 0x10>;
};
};
sfp1eeprom: eeprom@50 {
compatible = "atmel,24c01";
reg = <0x50>;
};
sfp1alerts: eeprom@51 {
compatible = "atmel,24c01";
reg = <0x51>;
};
};
&mmc {
@ -151,6 +348,26 @@
compatible = "marvell,pci-bootcmd";
status = "okay";
};
sfp0: sfp-slot@0 {
compatible = "ethernet,sfp-slot";
tx_disable = <&gpio 16 0>;
mod_abs = <&gpio 17 0>;
tx_error = <&gpio 19 0>;
rx_los = <&gpio 18 0>;
eeprom = <&sfp0eeprom>;
diag = <&sfp0alerts>;
};
sfp1: sfp-slot@1 {
compatible = "ethernet,sfp-slot";
tx_disable = <&gpio 21 0>;
mod_abs = <&gpio 22 0>;
tx_error = <&gpio 24 0>;
rx_los = <&gpio 23 0>;
eeprom = <&sfp1eeprom>;
diag = <&sfp1alerts>;
};
};
&spi {
@ -160,3 +377,24 @@
reg = <0>;
};
};
/* BGX 2 */
&bgx2 {
status = "okay";
/* SerDes 0, may differ from PCS Lane/LMAC */
eth0: ethernet-mac@0 {
compatible = "cavium,octeon-7890-bgx-port";
reg = <0>;
local-mac-address = [ 00 00 00 00 00 00 ];
sfp-slot = <&sfp0>;
};
/* SerDes 1, may differ from PCS Lane/LMAC */
eth1: ethernet-mac@1 {
compatible = "cavium,octeon-7890-bgx-port";
reg = <1>;
local-mac-address = [ 00 00 00 00 00 00 ];
sfp-slot = <&sfp1>;
};
};

View file

@ -12,13 +12,46 @@ obj-y += cvmx-coremask.o
obj-y += cvmx-bootmem.o
obj-y += bootoctlinux.o
# QLM related code
# Misc Octeon C files, mostly for QLM & ethernet support
obj-y += cvmx-agl.o
obj-y += cvmx-fpa.o
obj-y += cvmx-fpa-resource.o
obj-y += cvmx-fau-compat.o
obj-y += cvmx-global-resources.o
obj-y += cvmx-cmd-queue.o
obj-y += cvmx-helper-agl.o
obj-y += cvmx-helper-bgx.o
obj-y += cvmx-helper-board.o
obj-y += cvmx-helper-cfg.o
obj-y += cvmx-helper-fdt.o
obj-y += cvmx-helper-fpa.o
obj-y += cvmx-helper-ilk.o
obj-y += cvmx-helper-ipd.o
obj-y += cvmx-helper-jtag.o
obj-y += cvmx-helper-loop.o
obj-y += cvmx-helper-npi.o
obj-y += cvmx-helper-pki.o
obj-y += cvmx-helper-pko.o
obj-y += cvmx-helper-pko3.o
obj-y += cvmx-helper-rgmii.o
obj-y += cvmx-helper-sfp.o
obj-y += cvmx-helper-sgmii.o
obj-y += cvmx-helper-util.o
obj-y += cvmx-helper-xaui.o
obj-y += cvmx-helper.o
obj-y += cvmx-ilk.o
obj-y += cvmx-ipd.o
obj-y += cvmx-pcie.o
obj-y += cvmx-pki.o
obj-y += cvmx-pki-resources.o
obj-y += cvmx-pko.o
obj-y += cvmx-pko-internal-ports-range.o
obj-y += cvmx-pko3.o
obj-y += cvmx-pko3-compat.o
obj-y += cvmx-pko3-resources.o
obj-y += cvmx-pko3-queue.o
obj-y += cvmx-range.o
obj-y += cvmx-qlm.o
obj-y += cvmx-qlm-tables.o
obj-y += octeon_fdt.o
obj-y += octeon_qlm.o

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 Marvell International Ltd.
* Copyright (C) 2020-2022 Marvell International Ltd.
*/
#include <dm.h>
@ -17,6 +17,8 @@
#include <mach/cvmx-bootmem.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-sata-defs.h>
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
DECLARE_GLOBAL_DATA_PTR;
@ -393,14 +395,55 @@ static int init_bootcmd_console(void)
return ret;
}
int arch_misc_init(void)
static void configure_lmtdma_window(void)
{
u64 tmp;
u64 addr;
u64 end_addr;
CVMX_MF_CVM_MEM_CTL(tmp);
tmp &= ~0x1ffull;
tmp |= 0x104ull;
/* enable LMTDMA */
tmp |= (1ull << 51);
/* configure scratch line 2 for LMT */
/* TODO: reserve this scratch line, so that others will not use it */
/* TODO: store LMTLINE in global var */
tmp |= (CVMX_PKO_LMTLINE << 45);
/* clear LMTLINE in scratch */
addr = CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
end_addr = addr + CVMX_CACHE_LINE_SIZE;
while (addr < end_addr) {
*CASTPTR(volatile u64, addr + CVMX_SCRATCH_BASE) = (u64)0;
addr += 8;
}
CVMX_MT_CVM_MEM_CTL(tmp);
}
int arch_early_init_r(void)
{
int ret;
/*
* Needs to be called pretty early, so that e.g. networking etc
* can access the bootmem infrastructure
*/
ret = octeon_bootmem_init();
if (ret)
return ret;
if (octeon_has_feature(OCTEON_FEATURE_PKO3))
configure_lmtdma_window();
return 0;
}
int arch_misc_init(void)
{
int ret;
ret = octeon_configure_load_memory();
if (ret)
return ret;

View file

@ -0,0 +1,216 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for RGMII (MGMT) initialization, configuration,
* and monitoring.
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-agl.h>
#include <mach/cvmx-agl-defs.h>
/*
* @param port to enable
*
* @return Zero on success, negative on failure
*/
int cvmx_agl_enable(int port)
{
cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
rxx_frm_ctl.u64 = 0;
rxx_frm_ctl.s.pre_align = 1;
/* When set, disables the length check for non-min sized pkts with
* padding in the client data
*/
rxx_frm_ctl.s.pad_len = 1;
/* When set, disables the length check for VLAN pkts */
rxx_frm_ctl.s.vlan_len = 1;
/* When set, PREAMBLE checking is less strict */
rxx_frm_ctl.s.pre_free = 1;
/* Control Pause Frames can match station SMAC */
rxx_frm_ctl.s.ctl_smac = 0;
/* Control Pause Frames can match globally assign Multicast address */
rxx_frm_ctl.s.ctl_mcst = 1;
rxx_frm_ctl.s.ctl_bck = 1; /* Forward pause information to TX block */
rxx_frm_ctl.s.ctl_drp = 1; /* Drop Control Pause Frames */
rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
/* This port is configured to send PREAMBLE+SFD to begin every frame.
* GMX checks that the PREAMBLE is sent correctly
*/
rxx_frm_ctl.s.pre_chk = 1;
csr_wr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
return 0;
}
cvmx_helper_link_info_t cvmx_agl_link_get(int port)
{
cvmx_helper_link_info_t result;
int interface, port_index;
/* Fake IPD port is used on some older models. */
if (port < 0)
return __cvmx_helper_board_link_get(port);
/* Simulator does not have PHY, use some defaults. */
interface = cvmx_helper_get_interface_num(port);
port_index = cvmx_helper_get_interface_index_num(port);
if (cvmx_helper_get_port_force_link_up(interface, port_index)) {
result.u64 = 0;
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
return result;
}
return __cvmx_helper_board_link_get(port);
}
/*
* Set MII/RGMII link based on mode.
*
* @param port interface port to set the link.
* @param link_info Link status
*
* @return 0 on success and 1 on failure
*/
int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info)
{
cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
/* Disable GMX before we make any changes. */
agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
agl_gmx_prtx.s.en = 0;
agl_gmx_prtx.s.tx_en = 0;
agl_gmx_prtx.s.rx_en = 0;
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
u64 one_second = 0x1000000; /* todo: this needs checking */
/* Wait for GMX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==,
1, one_second) ||
CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==,
1, one_second)) {
debug("AGL%d: Timeout waiting for GMX to be idle\n",
port);
return -1;
}
}
agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
/* Set duplex mode */
if (!link_info.s.link_up)
agl_gmx_prtx.s.duplex = 1; /* Force full duplex on down links */
else
agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
switch (link_info.s.speed) {
case 10:
agl_gmx_prtx.s.speed = 0;
agl_gmx_prtx.s.slottime = 0;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
agl_gmx_prtx.s.speed_msb = 1;
agl_gmx_prtx.s.burst = 1;
}
break;
case 100:
agl_gmx_prtx.s.speed = 0;
agl_gmx_prtx.s.slottime = 0;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
agl_gmx_prtx.s.speed_msb = 0;
agl_gmx_prtx.s.burst = 1;
}
break;
case 1000:
/* 1000 MBits is only supported on 6XXX chips */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
agl_gmx_prtx.s.speed_msb = 0;
agl_gmx_prtx.s.speed = 1;
agl_gmx_prtx.s.slottime =
1; /* Only matters for half-duplex */
agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
}
break;
/* No link */
case 0:
default:
break;
}
/* Write the new GMX setting with the port still disabled. */
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
/* Read GMX CFG again to make sure the config is completed. */
agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
cvmx_agl_gmx_txx_clk_t agl_clk;
cvmx_agl_prtx_ctl_t prt_ctl;
prt_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_clk.u64 = csr_rd(CVMX_AGL_GMX_TXX_CLK(port));
/* MII (both speeds) and RGMII 1000 setting */
agl_clk.s.clk_cnt = 1;
/* Check other speeds for RGMII mode */
if (prt_ctl.s.mode == 0 || OCTEON_IS_OCTEON3()) {
if (link_info.s.speed == 10)
agl_clk.s.clk_cnt = 50;
else if (link_info.s.speed == 100)
agl_clk.s.clk_cnt = 5;
}
csr_wr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
}
/* Enable transmit and receive ports */
agl_gmx_prtx.s.tx_en = 1;
agl_gmx_prtx.s.rx_en = 1;
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
/* Enable the link. */
agl_gmx_prtx.s.en = 1;
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
if (OCTEON_IS_OCTEON3()) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
/* Enable the interface, set clkrst */
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.clkrst = 1;
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.enable = 1;
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
/* Read the value back to force the previous write */
csr_rd(CVMX_AGL_PRTX_CTL(port));
}
return 0;
}

View file

@ -1189,7 +1189,7 @@ s64 cvmx_bootmem_phy_mem_list_init(u64 mem_size,
if (mem_size > OCTEON_DDR1_SIZE) {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
mem_size - OCTEON_DDR1_SIZE, 0);
mem_size - OCTEON_DDR2_BASE, 0);
} else {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
}
@ -1349,7 +1349,6 @@ s64 cvmx_bootmem_phy_mem_list_init_multi(u8 node_mask,
addr += sizeof(struct cvmx_bootmem_named_block_desc);
}
// test-only: DEBUG ifdef???
cvmx_bootmem_phy_list_print();
return 1;

View file

@ -0,0 +1,355 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Support functions for managing command queues used for
* various hardware blocks.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-fpa.h>
#include <mach/cvmx-cmd-queue.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-dpi-defs.h>
#include <mach/cvmx-npei-defs.h>
#include <mach/cvmx-pexp-defs.h>
/**
* This application uses this pointer to access the global queue
* state. It points to a bootmem named block.
*/
__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES];
/**
* @INTERNAL
* Initialize the Global queue state pointer.
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node)
{
const char *alloc_name = "cvmx_cmd_queues\0\0";
char s[4] = "_0";
const struct cvmx_bootmem_named_block_desc *block_desc = NULL;
unsigned int size;
u64 paddr_min = 0, paddr_max = 0;
void *ptr;
if (cvmx_likely(__cvmx_cmd_queue_state_ptrs[node]))
return CVMX_CMD_QUEUE_SUCCESS;
/* Add node# to block name */
if (node > 0) {
s[1] += node;
strcat((char *)alloc_name, s);
}
/* Find the named block in case it has been created already */
block_desc = cvmx_bootmem_find_named_block(alloc_name);
if (block_desc) {
__cvmx_cmd_queue_state_ptrs[node] =
(__cvmx_cmd_queue_all_state_t *)cvmx_phys_to_ptr(
block_desc->base_addr);
return CVMX_CMD_QUEUE_SUCCESS;
}
size = sizeof(*__cvmx_cmd_queue_state_ptrs[node]);
/* Rest f the code is to allocate a new named block */
/* Atomically allocate named block once, and zero it by default */
ptr = cvmx_bootmem_alloc_named_range_once(size, paddr_min, paddr_max,
128, alloc_name, NULL);
if (ptr) {
__cvmx_cmd_queue_state_ptrs[node] =
(__cvmx_cmd_queue_all_state_t *)ptr;
} else {
debug("ERROR: %s: Unable to get named block %s.\n", __func__,
alloc_name);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @param queue_id Hardware command queue to initialize.
* @param max_depth Maximum outstanding commands that can be queued.
* @param fpa_pool FPA pool the command queues should come from.
* @param pool_size Size of each buffer in the FPA pool (bytes)
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
__cvmx_cmd_queue_state_t *qstate;
cvmx_cmd_queue_result_t result;
unsigned int node;
unsigned int index;
int fpa_pool_min, fpa_pool_max;
union cvmx_fpa_ctl_status status;
void *buffer;
node = __cvmx_cmd_queue_get_node(queue_id);
index = __cvmx_cmd_queue_get_index(queue_id);
if (index >= NUM_ELEMENTS(__cvmx_cmd_queue_state_ptrs[node]->state)) {
printf("ERROR: %s: queue %#x out of range\n", __func__,
queue_id);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
result = __cvmx_cmd_queue_init_state_ptr(node);
if (result != CVMX_CMD_QUEUE_SUCCESS)
return result;
qstate = __cvmx_cmd_queue_get_state(queue_id);
if (!qstate)
return CVMX_CMD_QUEUE_INVALID_PARAM;
/*
* We artificially limit max_depth to 1<<20 words. It is an
* arbitrary limit.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
if (max_depth < 0 || max_depth > 1 << 20)
return CVMX_CMD_QUEUE_INVALID_PARAM;
} else if (max_depth != 0) {
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/* CVMX_FPA_NUM_POOLS maps to cvmx_fpa3_num_auras for FPA3 */
fpa_pool_min = node << 10;
fpa_pool_max = fpa_pool_min + CVMX_FPA_NUM_POOLS;
if (fpa_pool < fpa_pool_min || fpa_pool >= fpa_pool_max)
return CVMX_CMD_QUEUE_INVALID_PARAM;
if (pool_size < 128 || pool_size > (1 << 17))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if (pool_size & 3)
debug("WARNING: %s: pool_size %d not multiple of 8\n", __func__,
pool_size);
/* See if someone else has already initialized the queue */
if (qstate->base_paddr) {
int depth;
static const char emsg[] = /* Common error message part */
"Queue already initialized with different ";
depth = (max_depth + qstate->pool_size_m1 - 1) /
qstate->pool_size_m1;
if (depth != qstate->max_depth) {
depth = qstate->max_depth * qstate->pool_size_m1;
debug("ERROR: %s: %s max_depth (%d).\n", __func__, emsg,
depth);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (fpa_pool != qstate->fpa_pool) {
debug("ERROR: %s: %s FPA pool (%d).\n", __func__, emsg,
(int)qstate->fpa_pool);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
debug("ERROR: %s: %s FPA pool size (%u).\n", __func__,
emsg, (qstate->pool_size_m1 + 1) << 3);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
return CVMX_CMD_QUEUE_ALREADY_SETUP;
}
if (!(octeon_has_feature(OCTEON_FEATURE_FPA3))) {
status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
if (!status.s.enb) {
debug("ERROR: %s: FPA is not enabled.\n",
__func__);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
}
buffer = cvmx_fpa_alloc(fpa_pool);
if (!buffer) {
debug("ERROR: %s: allocating first buffer.\n", __func__);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
index = (pool_size >> 3) - 1;
qstate->pool_size_m1 = index;
qstate->max_depth = (max_depth + index - 1) / index;
qstate->index = 0;
qstate->fpa_pool = fpa_pool;
qstate->base_paddr = cvmx_ptr_to_phys(buffer);
/* Initialize lock */
__cvmx_cmd_queue_lock_init(queue_id);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access to the low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @param queue_id Command queue to query
*
* @return Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr && qptr->base_paddr)
return cvmx_phys_to_ptr((u64)qptr->base_paddr);
else
return NULL;
}
static u64 *__cvmx_cmd_queue_add_blk(__cvmx_cmd_queue_state_t *qptr)
{
u64 *cmd_ptr;
u64 *new_buffer;
u64 new_paddr;
/* Get base vaddr of current (full) block */
cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
/* Allocate a new block from the per-queue pool */
new_buffer = (u64 *)cvmx_fpa_alloc(qptr->fpa_pool);
/* Check for allocation failure */
if (cvmx_unlikely(!new_buffer))
return NULL;
/* Zero out the new block link pointer,
* in case this block will be filled to the rim
*/
new_buffer[qptr->pool_size_m1] = ~0ull;
/* Get physical address of the new buffer */
new_paddr = cvmx_ptr_to_phys(new_buffer);
/* Store the physical link address at the end of current full block */
cmd_ptr[qptr->pool_size_m1] = new_paddr;
/* Store the physical address in the queue state structure */
qptr->base_paddr = new_paddr;
qptr->index = 0;
/* Return the virtual base of the new block */
return new_buffer;
}
/**
* @INTERNAL
* Add command words into a queue, handles all the corener cases
* where only some of the words might fit into the current block,
* and a new block may need to be allocated.
* Locking and argument checks are done in the front-end in-line
* functions that call this one for the rare corner cases.
*/
cvmx_cmd_queue_result_t
__cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id,
__cvmx_cmd_queue_state_t *qptr, int cmd_count,
const u64 *cmds)
{
u64 *cmd_ptr;
unsigned int index;
cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
index = qptr->index;
/* Enforce queue depth limit, if enabled, once per block */
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) {
unsigned int depth = cvmx_cmd_queue_length(queue_id);
depth /= qptr->pool_size_m1;
if (cvmx_unlikely(depth > qptr->max_depth))
return CVMX_CMD_QUEUE_FULL;
}
/*
* If the block allocation fails, even the words that we wrote
* to the current block will not count because the 'index' will
* not be comitted.
* The loop is run 'count + 1' times to take care of the tail
* case, where the buffer is full to the rim, so the link
* pointer must be filled with a valid address.
*/
while (cmd_count >= 0) {
if (index >= qptr->pool_size_m1) {
/* Block is full, get another one and proceed */
cmd_ptr = __cvmx_cmd_queue_add_blk(qptr);
/* Baul on allocation error w/o comitting anything */
if (cvmx_unlikely(!cmd_ptr))
return CVMX_CMD_QUEUE_NO_MEMORY;
/* Reset index for start of new block */
index = 0;
}
/* Exit Loop on 'count + 1' iterations */
if (cmd_count <= 0)
break;
/* Store commands into queue block while there is space */
cmd_ptr[index++] = *cmds++;
cmd_count--;
} /* while cmd_count */
/* Commit added words if all is well */
qptr->index = index;
return CVMX_CMD_QUEUE_SUCCESS;
}

View file

@ -0,0 +1,53 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-hwfau.h>
u8 *cvmx_fau_regs_ptr;
void cvmx_fau_bootmem_init(void *bootmem)
{
memset(bootmem, 0, CVMX_FAU_MAX_REGISTERS_8);
}
/**
* Initializes FAU region for devices without FAU unit.
* @return 0 on success -1 on failure
*/
int cvmx_fau_init(void)
{
cvmx_fau_regs_ptr = (u8 *)cvmx_bootmem_alloc_named_range_once(
CVMX_FAU_MAX_REGISTERS_8, 0, 1ull << 31, 128,
"cvmx_fau_registers", cvmx_fau_bootmem_init);
if (cvmx_fau_regs_ptr == 0ull) {
debug("ERROR: Failed to alloc named block for software FAU.\n");
return -1;
}
return 0;
}

View file

@ -0,0 +1,219 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
static struct global_resource_tag get_fpa1_resource_tag(void)
{
return CVMX_GR_TAG_FPA;
}
static struct global_resource_tag get_fpa3_aura_resource_tag(int node)
{
return cvmx_get_gr_tag('c', 'v', 'm', '_', 'a', 'u', 'r', 'a', '_',
node + '0', '.', '.', '.', '.', '.', '.');
}
static struct global_resource_tag get_fpa3_pool_resource_tag(int node)
{
return cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'o', 'o', 'l', '_',
node + '0', '.', '.', '.', '.', '.', '.');
}
int cvmx_fpa_get_max_pools(void)
{
if (octeon_has_feature(OCTEON_FEATURE_FPA3))
return cvmx_fpa3_num_auras();
else if (OCTEON_IS_MODEL(OCTEON_CN68XX))
/* 68xx pool 8 is not available via API */
return CVMX_FPA1_NUM_POOLS;
else
return CVMX_FPA1_NUM_POOLS;
}
cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num)
{
u64 owner = cvmx_get_app_id();
int rv = 0;
struct global_resource_tag tag;
cvmx_fpa3_gaura_t aura;
if (node == -1)
node = cvmx_get_node_num();
tag = get_fpa3_aura_resource_tag(node);
if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_auras()) !=
0) {
printf("ERROR: %s: global resource create node=%u\n", __func__,
node);
return CVMX_FPA3_INVALID_GAURA;
}
if (desired_aura_num >= 0)
rv = cvmx_reserve_global_resource_range(tag, owner,
desired_aura_num, 1);
else
rv = cvmx_resource_alloc_reverse(tag, owner);
if (rv < 0) {
printf("ERROR: %s: node=%u desired aura=%d\n", __func__, node,
desired_aura_num);
return CVMX_FPA3_INVALID_GAURA;
}
aura = __cvmx_fpa3_gaura(node, rv);
return aura;
}
int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura)
{
struct global_resource_tag tag = get_fpa3_aura_resource_tag(aura.node);
int laura = aura.laura;
if (!__cvmx_fpa3_aura_valid(aura))
return -1;
return cvmx_free_global_resource_range_multiple(tag, &laura, 1);
}
/**
*/
cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num)
{
u64 owner = cvmx_get_app_id();
int rv = 0;
struct global_resource_tag tag;
cvmx_fpa3_pool_t pool;
if (node == -1)
node = cvmx_get_node_num();
tag = get_fpa3_pool_resource_tag(node);
if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
0) {
printf("ERROR: %s: global resource create node=%u\n", __func__,
node);
return CVMX_FPA3_INVALID_POOL;
}
if (desired_pool_num >= 0)
rv = cvmx_reserve_global_resource_range(tag, owner,
desired_pool_num, 1);
else
rv = cvmx_resource_alloc_reverse(tag, owner);
if (rv < 0) {
/* Desired pool is already in use */
return CVMX_FPA3_INVALID_POOL;
}
pool = __cvmx_fpa3_pool(node, rv);
return pool;
}
int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool)
{
struct global_resource_tag tag = get_fpa3_pool_resource_tag(pool.node);
int lpool = pool.lpool;
if (!__cvmx_fpa3_pool_valid(pool))
return -1;
if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
0) {
printf("ERROR: %s: global resource create node=%u\n", __func__,
pool.node);
return -1;
}
return cvmx_free_global_resource_range_multiple(tag, &lpool, 1);
}
cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(int desired_pool_num)
{
u64 owner = cvmx_get_app_id();
struct global_resource_tag tag;
int rv;
tag = get_fpa1_resource_tag();
if (cvmx_create_global_resource_range(tag, CVMX_FPA1_NUM_POOLS) != 0) {
printf("ERROR: %s: global resource not created\n", __func__);
return -1;
}
if (desired_pool_num >= 0) {
rv = cvmx_reserve_global_resource_range(tag, owner,
desired_pool_num, 1);
} else {
rv = cvmx_resource_alloc_reverse(tag, owner);
}
if (rv < 0) {
printf("ERROR: %s: FPA_POOL %d unavailable\n", __func__,
desired_pool_num);
return CVMX_RESOURCE_ALREADY_RESERVED;
}
return (cvmx_fpa1_pool_t)rv;
}
int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool)
{
struct global_resource_tag tag;
tag = get_fpa1_resource_tag();
return cvmx_free_global_resource_range_multiple(tag, &pool, 1);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,517 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-bootmem.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#define CVMX_MAX_GLOBAL_RESOURCES 128
#define CVMX_RESOURCES_ENTRIES_SIZE \
(sizeof(struct cvmx_global_resource_entry) * CVMX_MAX_GLOBAL_RESOURCES)
/**
* This macro returns a member of the data
* structure. The argument "field" is the member name of the
* structure to read. The return type is a u64.
*/
#define CVMX_GLOBAL_RESOURCES_GET_FIELD(field) \
__cvmx_struct_get_unsigned_field( \
__cvmx_global_resources_addr, \
offsetof(struct cvmx_global_resources, field), \
SIZEOF_FIELD(struct cvmx_global_resources, field))
/**
* This macro writes a member of the struct cvmx_global_resourcest
* structure. The argument "field" is the member name of the
* struct cvmx_global_resources to write.
*/
#define CVMX_GLOBAL_RESOURCES_SET_FIELD(field, value) \
__cvmx_struct_set_unsigned_field( \
__cvmx_global_resources_addr, \
offsetof(struct cvmx_global_resources, field), \
SIZEOF_FIELD(struct cvmx_global_resources, field), value)
/**
* This macro returns a member of the struct cvmx_global_resource_entry.
* The argument "field" is the member name of this structure.
* the return type is a u64. The "addr" parameter is the physical
* address of the structure.
*/
#define CVMX_RESOURCE_ENTRY_GET_FIELD(addr, field) \
__cvmx_struct_get_unsigned_field( \
addr, offsetof(struct cvmx_global_resource_entry, field), \
SIZEOF_FIELD(struct cvmx_global_resource_entry, field))
/**
* This macro writes a member of the struct cvmx_global_resource_entry
* structure. The argument "field" is the member name of the
* struct cvmx_global_resource_entry to write. The "addr" parameter
* is the physical address of the structure.
*/
#define CVMX_RESOURCE_ENTRY_SET_FIELD(addr, field, value) \
__cvmx_struct_set_unsigned_field( \
addr, offsetof(struct cvmx_global_resource_entry, field), \
SIZEOF_FIELD(struct cvmx_global_resource_entry, field), value)
#define CVMX_GET_RESOURCE_ENTRY(count) \
(__cvmx_global_resources_addr + \
offsetof(struct cvmx_global_resources, resource_entry) + \
(count * sizeof(struct cvmx_global_resource_entry)))
#define CVMX_RESOURCE_TAG_SET_FIELD(addr, field, value) \
__cvmx_struct_set_unsigned_field( \
addr, offsetof(struct global_resource_tag, field), \
SIZEOF_FIELD(struct global_resource_tag, field), value)
#define CVMX_RESOURCE_TAG_GET_FIELD(addr, field) \
__cvmx_struct_get_unsigned_field( \
addr, offsetof(struct global_resource_tag, field), \
SIZEOF_FIELD(struct global_resource_tag, field))
#define MAX_RESOURCE_TAG_LEN 16
#define CVMX_GLOBAL_RESOURCE_NO_LOCKING (1)
struct cvmx_global_resource_entry {
struct global_resource_tag tag;
u64 phys_addr;
u64 size;
};
struct cvmx_global_resources {
u32 pad;
u32 rlock;
u64 entry_cnt;
struct cvmx_global_resource_entry resource_entry[];
};
/* Not the right place, putting it here for now */
u64 cvmx_app_id;
/*
* Global named memory can be accessed anywhere even in 32-bit mode
*/
static u64 __cvmx_global_resources_addr;
/**
* This macro returns the size of a member of a structure.
*/
#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
/**
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
*
* @param base 64bit physical address of the complete structure
* @param offset from the beginning of the structure to the member being
* accessed.
* @param size Size of the structure member.
*
* @return Value of the structure member promoted into a u64.
*/
static inline u64 __cvmx_struct_get_unsigned_field(u64 base, int offset,
int size)
{
base = (1ull << 63) | (base + offset);
switch (size) {
case 4:
return cvmx_read64_uint32(base);
case 8:
return cvmx_read64_uint64(base);
default:
return 0;
}
}
/**
* This function is the implementation of the set macros defined
* for individual structure members. The argument are generated
* by the macros in order to write only the needed memory.
*
* @param base 64bit physical address of the complete structure
* @param offset from the beginning of the structure to the member being
* accessed.
* @param size Size of the structure member.
* @param value Value to write into the structure
*/
static inline void __cvmx_struct_set_unsigned_field(u64 base, int offset,
int size, u64 value)
{
base = (1ull << 63) | (base + offset);
switch (size) {
case 4:
cvmx_write64_uint32(base, value);
break;
case 8:
cvmx_write64_uint64(base, value);
break;
default:
break;
}
}
/* Get the global resource lock. */
static inline void __cvmx_global_resource_lock(void)
{
u64 lock_addr =
(1ull << 63) | (__cvmx_global_resources_addr +
offsetof(struct cvmx_global_resources, rlock));
unsigned int tmp;
__asm__ __volatile__(".set noreorder\n"
"1: ll %[tmp], 0(%[addr])\n"
" bnez %[tmp], 1b\n"
" li %[tmp], 1\n"
" sc %[tmp], 0(%[addr])\n"
" beqz %[tmp], 1b\n"
" nop\n"
".set reorder\n"
: [tmp] "=&r"(tmp)
: [addr] "r"(lock_addr)
: "memory");
}
/* Release the global resource lock. */
static inline void __cvmx_global_resource_unlock(void)
{
u64 lock_addr =
(1ull << 63) | (__cvmx_global_resources_addr +
offsetof(struct cvmx_global_resources, rlock));
CVMX_SYNCW;
__asm__ __volatile__("sw $0, 0(%[addr])\n"
:
: [addr] "r"(lock_addr)
: "memory");
CVMX_SYNCW;
}
static u64 __cvmx_alloc_bootmem_for_global_resources(int sz)
{
void *tmp;
tmp = cvmx_bootmem_alloc_range(sz, CVMX_CACHE_LINE_SIZE, 0, 0);
return cvmx_ptr_to_phys(tmp);
}
static inline void __cvmx_get_tagname(struct global_resource_tag *rtag,
char *tagname)
{
int i, j, k;
j = 0;
k = 8;
for (i = 7; i >= 0; i--, j++, k++) {
tagname[j] = (rtag->lo >> (i * 8)) & 0xff;
tagname[k] = (rtag->hi >> (i * 8)) & 0xff;
}
}
static u64 __cvmx_global_resources_init(void)
{
struct cvmx_bootmem_named_block_desc *block_desc;
int sz = sizeof(struct cvmx_global_resources) +
CVMX_RESOURCES_ENTRIES_SIZE;
s64 tmp_phys;
int count = 0;
u64 base = 0;
cvmx_bootmem_lock();
block_desc = (struct cvmx_bootmem_named_block_desc *)
__cvmx_bootmem_find_named_block_flags(
CVMX_GLOBAL_RESOURCES_DATA_NAME,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (!block_desc) {
debug("%s: allocating global resources\n", __func__);
tmp_phys = cvmx_bootmem_phy_named_block_alloc(
sz, 0, 0, CVMX_CACHE_LINE_SIZE,
CVMX_GLOBAL_RESOURCES_DATA_NAME,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (tmp_phys < 0) {
cvmx_printf(
"ERROR: %s: failed to allocate global resource name block. sz=%d\n",
__func__, sz);
goto end;
}
__cvmx_global_resources_addr = (u64)tmp_phys;
debug("%s: memset global resources %llu\n", __func__,
CAST_ULL(__cvmx_global_resources_addr));
base = (1ull << 63) | __cvmx_global_resources_addr;
for (count = 0; count < (sz / 8); count++) {
cvmx_write64_uint64(base, 0);
base += 8;
}
} else {
debug("%s:found global resource\n", __func__);
__cvmx_global_resources_addr = block_desc->base_addr;
}
end:
cvmx_bootmem_unlock();
debug("__cvmx_global_resources_addr=%llu sz=%d\n",
CAST_ULL(__cvmx_global_resources_addr), sz);
return __cvmx_global_resources_addr;
}
u64 cvmx_get_global_resource(struct global_resource_tag tag, int no_lock)
{
u64 entry_cnt = 0;
u64 resource_entry_addr = 0;
int count = 0;
u64 rphys_addr = 0;
u64 tag_lo = 0, tag_hi = 0;
if (__cvmx_global_resources_addr == 0)
__cvmx_global_resources_init();
if (!no_lock)
__cvmx_global_resource_lock();
entry_cnt = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
while (entry_cnt > 0) {
resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(count);
tag_lo = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, lo);
tag_hi = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, hi);
if (tag_lo == tag.lo && tag_hi == tag.hi) {
debug("%s: Found global resource entry\n", __func__);
break;
}
entry_cnt--;
count++;
}
if (entry_cnt == 0) {
debug("%s: no matching global resource entry found\n",
__func__);
if (!no_lock)
__cvmx_global_resource_unlock();
return 0;
}
rphys_addr =
CVMX_RESOURCE_ENTRY_GET_FIELD(resource_entry_addr, phys_addr);
if (!no_lock)
__cvmx_global_resource_unlock();
return rphys_addr;
}
u64 cvmx_create_global_resource(struct global_resource_tag tag, u64 size,
int no_lock, int *_new_)
{
u64 entry_count = 0;
u64 resource_entry_addr = 0;
u64 phys_addr;
if (__cvmx_global_resources_addr == 0)
__cvmx_global_resources_init();
if (!no_lock)
__cvmx_global_resource_lock();
phys_addr =
cvmx_get_global_resource(tag, CVMX_GLOBAL_RESOURCE_NO_LOCKING);
if (phys_addr != 0) {
/* we already have the resource, return it */
*_new_ = 0;
goto end;
}
*_new_ = 1;
entry_count = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
if (entry_count >= CVMX_MAX_GLOBAL_RESOURCES) {
char tagname[MAX_RESOURCE_TAG_LEN + 1];
__cvmx_get_tagname(&tag, tagname);
cvmx_printf(
"ERROR: %s: reached global resources limit for %s\n",
__func__, tagname);
phys_addr = 0;
goto end;
}
/* Allocate bootmem for the resource*/
phys_addr = __cvmx_alloc_bootmem_for_global_resources(size);
if (!phys_addr) {
char tagname[MAX_RESOURCE_TAG_LEN + 1];
__cvmx_get_tagname(&tag, tagname);
debug("ERROR: %s: out of memory %s, size=%d\n", __func__,
tagname, (int)size);
goto end;
}
resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(entry_count);
CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, phys_addr,
phys_addr);
CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, size, size);
CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, lo, tag.lo);
CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, hi, tag.hi);
/* update entry_cnt */
entry_count += 1;
CVMX_GLOBAL_RESOURCES_SET_FIELD(entry_cnt, entry_count);
end:
if (!no_lock)
__cvmx_global_resource_unlock();
return phys_addr;
}
int cvmx_create_global_resource_range(struct global_resource_tag tag,
int nelements)
{
int sz = cvmx_range_memory_size(nelements);
int _new_;
u64 addr;
int rv = 0;
if (__cvmx_global_resources_addr == 0)
__cvmx_global_resources_init();
__cvmx_global_resource_lock();
addr = cvmx_create_global_resource(tag, sz, 1, &_new_);
if (!addr) {
__cvmx_global_resource_unlock();
return -1;
}
if (_new_)
rv = cvmx_range_init(addr, nelements);
__cvmx_global_resource_unlock();
return rv;
}
int cvmx_allocate_global_resource_range(struct global_resource_tag tag,
u64 owner, int nelements, int alignment)
{
u64 addr = cvmx_get_global_resource(tag, 1);
int base;
if (addr == 0) {
char tagname[256];
__cvmx_get_tagname(&tag, tagname);
cvmx_printf("ERROR: %s: cannot find resource %s\n", __func__,
tagname);
return -1;
}
__cvmx_global_resource_lock();
base = cvmx_range_alloc(addr, owner, nelements, alignment);
__cvmx_global_resource_unlock();
return base;
}
int cvmx_resource_alloc_reverse(struct global_resource_tag tag, u64 owner)
{
u64 addr = cvmx_get_global_resource(tag, 1);
int rv;
if (addr == 0) {
char tagname[256];
__cvmx_get_tagname(&tag, tagname);
debug("ERROR: cannot find resource %s\n", tagname);
return -1;
}
__cvmx_global_resource_lock();
rv = cvmx_range_alloc_ordered(addr, owner, 1, 1, 1);
__cvmx_global_resource_unlock();
return rv;
}
int cvmx_reserve_global_resource_range(struct global_resource_tag tag,
u64 owner, int base, int nelements)
{
u64 addr = cvmx_get_global_resource(tag, 1);
int start;
__cvmx_global_resource_lock();
start = cvmx_range_reserve(addr, owner, base, nelements);
__cvmx_global_resource_unlock();
return start;
}
int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag,
int base, int nelements)
{
u64 addr = cvmx_get_global_resource(tag, 1);
int rv;
/* Resource was not created, nothing to release */
if (addr == 0)
return 0;
__cvmx_global_resource_lock();
rv = cvmx_range_free_with_base(addr, base, nelements);
__cvmx_global_resource_unlock();
return rv;
}
int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag,
int bases[], int nelements)
{
u64 addr = cvmx_get_global_resource(tag, 1);
int rv;
/* Resource was not created, nothing to release */
if (addr == 0)
return 0;
__cvmx_global_resource_lock();
rv = cvmx_range_free_mutiple(addr, bases, nelements);
__cvmx_global_resource_unlock();
return rv;
}
void cvmx_app_id_init(void *bootmem)
{
u64 *p = (u64 *)bootmem;
*p = 0;
}
u64 cvmx_allocate_app_id(void)
{
u64 *vptr;
vptr = (u64 *)cvmx_bootmem_alloc_named_range_once(sizeof(cvmx_app_id),
0, 1 << 31, 128,
"cvmx_app_id",
cvmx_app_id_init);
cvmx_app_id = __atomic_add_fetch(vptr, 1, __ATOMIC_SEQ_CST);
debug("CVMX_APP_ID = %lx\n", (unsigned long)cvmx_app_id);
return cvmx_app_id;
}
u64 cvmx_get_app_id(void)
{
if (cvmx_app_id == 0)
cvmx_allocate_app_id();
return cvmx_app_id;
}

View file

@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for AGL (RGMII) initialization, configuration,
* and monitoring.
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-agl.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-pko-defs.h>
int __cvmx_helper_agl_enumerate(int xiface)
{
if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
if (agl_prtx_ctl.s.mode == 0) /* RGMII */
return 1;
}
return 0;
}
/**
* @INTERNAL
* Convert interface to port to assess CSRs.
*
* @param xiface Interface to probe
* @return The port corresponding to the interface
*/
int cvmx_helper_agl_get_port(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (OCTEON_IS_MODEL(OCTEON_CN70XX))
return xi.interface - 4;
return -1;
}
/**
* @INTERNAL
* Probe a RGMII interface and determine the number of ports
* connected to it. The RGMII interface should still be down
* after this call.
*
* @param interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_agl_probe(int interface)
{
int port = cvmx_helper_agl_get_port(interface);
union cvmx_agl_gmx_bist gmx_bist;
union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
union cvmx_agl_prtx_ctl agl_prtx_ctl;
int result;
result = __cvmx_helper_agl_enumerate(interface);
if (result == 0)
return 0;
/* Check BIST status */
gmx_bist.u64 = csr_rd(CVMX_AGL_GMX_BIST);
if (gmx_bist.u64)
printf("Management port AGL failed BIST (0x%016llx) on AGL%d\n",
CAST64(gmx_bist.u64), port);
/* Disable the external input/output */
gmx_prtx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
gmx_prtx_cfg.s.en = 0;
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);
/* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
* is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
* to save cost.
*/
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.clkrst = 0;
agl_prtx_ctl.s.dllrst = 0;
agl_prtx_ctl.s.clktx_byp = 0;
if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
bool tx_enable_bypass;
int tx_delay;
agl_prtx_ctl.s.refclk_sel =
cvmx_helper_get_agl_refclk_sel(interface, port);
agl_prtx_ctl.s.clkrx_set =
cvmx_helper_get_agl_rx_clock_skew(interface, port);
agl_prtx_ctl.s.clkrx_byp =
cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
port);
cvmx_helper_cfg_get_rgmii_tx_clk_delay(
interface, port, &tx_enable_bypass, &tx_delay);
agl_prtx_ctl.s.clktx_byp = tx_enable_bypass;
agl_prtx_ctl.s.clktx_set = tx_delay;
}
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
/* Force write out before wait */
csr_rd(CVMX_AGL_PRTX_CTL(port));
udelay(500);
/* Enable the componsation controller */
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.drv_byp = 0;
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
/* Force write out before wait */
csr_rd(CVMX_AGL_PRTX_CTL(port));
if (!OCTEON_IS_OCTEON3()) {
/* Enable the interface */
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.enable = 1;
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
/* Read the value back to force the previous write */
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
}
/* Enable the compensation controller */
agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
agl_prtx_ctl.s.comp = 1;
csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
/* Force write out before wait */
csr_rd(CVMX_AGL_PRTX_CTL(port));
/* for componsation state to lock. */
udelay(500);
return result;
}
/**
* @INTERNAL
* Bringup and enable a RGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_agl_enable(int interface)
{
int port = cvmx_helper_agl_get_port(interface);
int ipd_port = cvmx_helper_get_ipd_port(interface, port);
union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
union cvmx_pko_reg_read_idx read_idx;
int do_link_set = 1;
int i;
/* Setup PKO for AGL interface. Back pressure is not supported. */
pko_mem_port_ptrs.u64 = 0;
read_idx.u64 = 0;
read_idx.s.inc = 1;
csr_wr(CVMX_PKO_REG_READ_IDX, read_idx.u64);
for (i = 0; i < 40; i++) {
pko_mem_port_ptrs.u64 = csr_rd(CVMX_PKO_MEM_PORT_PTRS);
if (pko_mem_port_ptrs.s.pid == 24) {
pko_mem_port_ptrs.s.eid = 10;
pko_mem_port_ptrs.s.bp_port = 40;
csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
break;
}
}
cvmx_agl_enable(port);
if (do_link_set)
cvmx_agl_link_set(port, cvmx_agl_link_get(ipd_port));
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_agl_link_get(int ipd_port)
{
return cvmx_agl_link_get(ipd_port);
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_agl_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int port = cvmx_helper_agl_get_port(interface);
return cvmx_agl_link_set(port, link_info);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Marvell International Ltd.
* Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper Functions for the Configuration Framework
*/
@ -100,7 +100,6 @@ static u64 cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] = {
static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
static int cvmx_pko_queue_alloc(u64 port, int count);
static void cvmx_init_port_cfg(void);
static const int dbg;
int __cvmx_helper_cfg_pknd(int xiface, int index)
{
@ -184,16 +183,6 @@ int __cvmx_helper_cfg_pko_max_engine(void)
return cvmx_cfg_max_pko_engines;
}
int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
{
if (opt >= CVMX_HELPER_CFG_OPT_MAX)
return -1;
cvmx_cfg_opts[opt] = val;
return 0;
}
uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
{
if (opt >= CVMX_HELPER_CFG_OPT_MAX)
@ -298,18 +287,6 @@ int cvmx_pko_queue_init_from_cvmx_config_non_pknd(void)
return 0;
}
int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg)
{
*cfg = __cvmx_pko_queue_static_config[node];
return 0;
}
int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg)
{
__cvmx_pko_queue_static_config[node] = *cfg;
return 0;
}
static int queue_range_init;
int init_cvmx_pko_que_range(void)
@ -376,91 +353,6 @@ static int cvmx_pko_queue_alloc(u64 port, int count)
return 0;
}
/*
* return the queues for "port"
*
* @param port the port for which the queues are returned
*
* Return: 0 on success
* -1 on failure
*/
int cvmx_pko_queue_free(uint64_t port)
{
int ret_val = -1;
init_cvmx_pko_que_range();
if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
debug("ERROR: %s port=%d > %d", __func__, (int)port,
CVMX_HELPER_CFG_MAX_PKO_QUEUES);
return -1;
}
ret_val = cvmx_free_global_resource_range_with_base(
CVMX_GR_TAG_PKO_QUEUES, cvmx_pko_queue_table[port].ccppp_queue_base,
cvmx_pko_queue_table[port].ccppp_num_queues);
if (ret_val != 0)
return ret_val;
cvmx_pko_queue_table[port].ccppp_num_queues = 0;
cvmx_pko_queue_table[port].ccppp_queue_base = CVMX_HELPER_CFG_INVALID_VALUE;
ret_val = 0;
return ret_val;
}
void cvmx_pko_queue_free_all(void)
{
int i;
for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
if (cvmx_pko_queue_table[i].ccppp_queue_base !=
CVMX_HELPER_CFG_INVALID_VALUE)
cvmx_pko_queue_free(i);
}
void cvmx_pko_queue_show(void)
{
int i;
cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_QUEUES);
for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
if (cvmx_pko_queue_table[i].ccppp_queue_base !=
CVMX_HELPER_CFG_INVALID_VALUE)
debug("port=%d que_base=%d que_num=%d\n", i,
(int)cvmx_pko_queue_table[i].ccppp_queue_base,
(int)cvmx_pko_queue_table[i].ccppp_num_queues);
}
void cvmx_helper_cfg_show_cfg(void)
{
int i, j;
for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
debug("%s: interface%d mode %10s nports%4d\n", __func__, i,
cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
cvmx_helper_interface_enumerate(i));
for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
debug("\tpknd[%i][%d]%d", i, j,
__cvmx_helper_cfg_pknd(i, j));
debug(" pko_port_base[%i][%d]%d", i, j,
__cvmx_helper_cfg_pko_port_base(i, j));
debug(" pko_port_num[%i][%d]%d\n", i, j,
__cvmx_helper_cfg_pko_port_num(i, j));
}
}
for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
if (__cvmx_helper_cfg_pko_queue_base(i) !=
CVMX_HELPER_CFG_INVALID_VALUE) {
debug("%s: pko_port%d qbase%d nqueues%d interface%d index%d\n",
__func__, i, __cvmx_helper_cfg_pko_queue_base(i),
__cvmx_helper_cfg_pko_queue_num(i),
__cvmx_helper_cfg_pko_port_interface(i),
__cvmx_helper_cfg_pko_port_index(i));
}
}
}
/*
* initialize cvmx_cfg_pko_port_map
*/
@ -515,141 +407,6 @@ void cvmx_helper_cfg_init_pko_port_map(void)
cvmx_cfg_max_pko_engines = pko_eid;
}
void cvmx_helper_cfg_set_jabber_and_frame_max(void)
{
int interface, port;
/*Set the frame max size and jabber size to 65535. */
const unsigned int max_frame = 65535;
// FIXME: should support node argument for remote node init
if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
int ipd_port;
int node = cvmx_get_node_num();
for (interface = 0;
interface < cvmx_helper_get_number_of_interfaces();
interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
int num_ports = cvmx_helper_ports_on_interface(xiface);
// FIXME: should be an easier way to determine
// that an interface is Ethernet/BGX
switch (imode) {
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_XAUI:
case CVMX_HELPER_INTERFACE_MODE_RXAUI:
case CVMX_HELPER_INTERFACE_MODE_XLAUI:
case CVMX_HELPER_INTERFACE_MODE_XFI:
case CVMX_HELPER_INTERFACE_MODE_10G_KR:
case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
for (port = 0; port < num_ports; port++) {
ipd_port = cvmx_helper_get_ipd_port(xiface, port);
cvmx_pki_set_max_frm_len(ipd_port, max_frame);
cvmx_helper_bgx_set_jabber(xiface, port, max_frame);
}
break;
default:
break;
}
}
} else {
/*Set the frame max size and jabber size to 65535. */
for (interface = 0; interface < cvmx_helper_get_number_of_interfaces();
interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(cvmx_get_node_num(),
interface);
/*
* Set the frame max size and jabber size to 65535, as the defaults
* are too small.
*/
cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
int num_ports = cvmx_helper_ports_on_interface(xiface);
switch (imode) {
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_QSGMII:
case CVMX_HELPER_INTERFACE_MODE_XAUI:
case CVMX_HELPER_INTERFACE_MODE_RXAUI:
for (port = 0; port < num_ports; port++)
csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
/* Set max and min value for frame check */
cvmx_pip_set_frame_check(interface, -1);
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
/* Set max and min value for frame check */
cvmx_pip_set_frame_check(interface, -1);
for (port = 0; port < num_ports; port++) {
csr_wr(CVMX_GMXX_RXX_FRM_MAX(port, interface), 65535);
csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
}
break;
case CVMX_HELPER_INTERFACE_MODE_ILK:
/* Set max and min value for frame check */
cvmx_pip_set_frame_check(interface, -1);
for (port = 0; port < num_ports; port++) {
int ipd_port = cvmx_helper_get_ipd_port(interface, port);
cvmx_ilk_enable_la_header(ipd_port, 0);
}
break;
case CVMX_HELPER_INTERFACE_MODE_SRIO:
/* Set max and min value for frame check */
cvmx_pip_set_frame_check(interface, -1);
break;
case CVMX_HELPER_INTERFACE_MODE_AGL:
/* Set max and min value for frame check */
cvmx_pip_set_frame_check(interface, -1);
csr_wr(CVMX_AGL_GMX_RXX_FRM_MAX(0), 65535);
csr_wr(CVMX_AGL_GMX_RXX_JABBER(0), 65535);
break;
default:
break;
}
}
}
}
/**
* Enable storing short packets only in the WQE
* unless NO_WPTR is set, which already has the same effect
*/
void cvmx_helper_cfg_store_short_packets_in_wqe(void)
{
int interface, port;
cvmx_ipd_ctl_status_t ipd_ctl_status;
unsigned int dyn_rs = 1;
if (octeon_has_feature(OCTEON_FEATURE_PKI))
return;
/* NO_WPTR combines WQE with 1st MBUF, RS is redundant */
ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
if (ipd_ctl_status.s.no_wptr) {
dyn_rs = 0;
/* Note: consider also setting 'ignrs' wtn NO_WPTR is set */
}
for (interface = 0; interface < cvmx_helper_get_number_of_interfaces(); interface++) {
int num_ports = cvmx_helper_ports_on_interface(interface);
for (port = 0; port < num_ports; port++) {
cvmx_pip_port_cfg_t port_cfg;
int pknd = port;
if (octeon_has_feature(OCTEON_FEATURE_PKND))
pknd = cvmx_helper_get_pknd(interface, port);
else
pknd = cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
port_cfg.s.dyn_rs = dyn_rs;
csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
}
}
}
int __cvmx_helper_cfg_pko_port_interface(int pko_port)
{
return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
@ -716,16 +473,6 @@ int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port)
return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_base_port;
}
int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
{
int ipd_y, ipd_x;
ipd_y = IPD2PKO_CACHE_Y(ipd_port);
ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_nports;
}
/**
* Return the number of queues to be assigned to this pko_port
*
@ -980,8 +727,6 @@ int __cvmx_helper_init_port_valid(void)
rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
if (!rc)
rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
if (!rc)
rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
@ -1030,44 +775,6 @@ int __cvmx_helper_init_port_valid(void)
return 0;
}
typedef int (*cvmx_import_config_t)(void);
cvmx_import_config_t cvmx_import_app_config;
int __cvmx_helper_init_port_config_data_local(void)
{
int rv = 0;
int dbg = 0;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
if (cvmx_import_app_config) {
rv = (*cvmx_import_app_config)();
if (rv != 0) {
debug("failed to import config\n");
return -1;
}
}
cvmx_helper_cfg_init_pko_port_map();
__cvmx_helper_cfg_init_ipd2pko_cache();
} else {
if (cvmx_import_app_config) {
rv = (*cvmx_import_app_config)();
if (rv != 0) {
debug("failed to import config\n");
return -1;
}
}
}
if (dbg) {
cvmx_helper_cfg_show_cfg();
cvmx_pko_queue_show();
}
return rv;
}
/*
* This call is made from Linux octeon_ethernet driver
* to setup the PKO with a specific queue count and
@ -1077,9 +784,8 @@ int cvmx_pko_alloc_iport_and_queues(int interface, int port, int port_cnt, int q
{
int rv, p, port_start, cnt;
if (dbg)
debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
queue_cnt);
debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
queue_cnt);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
@ -1122,6 +828,7 @@ static void cvmx_init_port_cfg(void)
struct cvmx_srio_port_param *sr;
pcfg = &cvmx_cfg_port[node][i][j];
memset(pcfg, 0, sizeof(*pcfg));
pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
@ -1188,8 +895,7 @@ int __cvmx_helper_init_port_config_data(int node)
int pknd = 0, bpid = 0;
const int use_static_config = 1;
if (dbg)
printf("%s:\n", __func__);
debug("%s:\n", __func__);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
@ -1295,10 +1001,11 @@ int __cvmx_helper_init_port_config_data(int node)
__cvmx_helper_cfg_init_ipd2pko_cache();
}
if (dbg) {
cvmx_helper_cfg_show_cfg();
cvmx_pko_queue_show();
}
#ifdef DEBUG
cvmx_helper_cfg_show_cfg();
cvmx_pko_queue_show();
#endif
return rv;
}
@ -1336,39 +1043,6 @@ int cvmx_helper_get_port_fdt_node_offset(int xiface, int index)
return cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node;
}
/**
* Search for a port based on its FDT node offset
*
* @param of_offset Node offset of port to search for
* @param[out] xiface xinterface of match
* @param[out] index port index of match
*
* Return: 0 if found, -1 if not found
*/
int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
{
int iface;
int i;
int node;
struct cvmx_cfg_port_param *pcfg = NULL;
*xiface = -1;
*index = -1;
for (node = 0; node < CVMX_MAX_NODES; node++) {
for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
pcfg = &cvmx_cfg_port[node][iface][i];
if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
*xiface = cvmx_helper_node_interface_to_xiface(node, iface);
*index = i;
return 0;
}
}
}
}
return -1;
}
/**
* @INTERNAL
* Store the FDT node offset in the device tree of a phy
@ -1439,23 +1113,6 @@ bool cvmx_helper_get_port_autonegotiation(int xiface, int index)
return !cvmx_cfg_port[xi.node][xi.interface][index].disable_an;
}
/**
* @INTERNAL
* Override default forward error correction for a port
*
* @param xiface node and interface
* @param index port index
* @param enable true to enable fec, false to disable it
*/
void cvmx_helper_set_port_fec(int xiface, int index, bool enable)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].enable_fec = enable;
}
/**
* @INTERNAL
* Returns if forward error correction is enabled or not.
@ -1474,161 +1131,6 @@ bool cvmx_helper_get_port_fec(int xiface, int index)
return cvmx_cfg_port[xi.node][xi.interface][index].enable_fec;
}
/**
* @INTERNAL
* Configure the SRIO RX interface AGC settings for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param agc_override true to put AGC in manual mode
* @param ctle_zero RX equalizer peaking control (default 0x6)
* @param agc_pre_ctle AGC pre-CTLE gain (default 0x5)
* @param agc_post_ctle AGC post-CTLE gain (default 0x4)
*
* NOTE: This must be called before SRIO is initialized to take effect
*/
void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
u8 ctle_zero, bool agc_override, uint8_t agc_pre_ctle,
uint8_t agc_post_ctle)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
sr->srio_rx_ctle_zero_override = ctle_zero_override;
sr->srio_rx_ctle_zero = ctle_zero;
sr->srio_rx_ctle_agc_override = agc_override;
sr->srio_rx_agc_pre_ctle = agc_pre_ctle;
sr->srio_rx_agc_post_ctle = agc_post_ctle;
}
/**
* @INTERNAL
* Get the SRIO RX interface AGC settings for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param[out] agc_override true to put AGC in manual mode
* @param[out] ctle_zero RX equalizer peaking control (default 0x6)
* @param[out] agc_pre_ctle AGC pre-CTLE gain (default 0x5)
* @param[out] agc_post_ctle AGC post-CTLE gain (default 0x4)
*/
void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
u8 *ctle_zero, bool *agc_override, uint8_t *agc_pre_ctle,
uint8_t *agc_post_ctle)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
if (ctle_zero_override)
*ctle_zero_override = sr->srio_rx_ctle_zero_override;
if (ctle_zero)
*ctle_zero = sr->srio_rx_ctle_zero;
if (agc_override)
*agc_override = sr->srio_rx_ctle_agc_override;
if (agc_pre_ctle)
*agc_pre_ctle = sr->srio_rx_agc_pre_ctle;
if (agc_post_ctle)
*agc_post_ctle = sr->srio_rx_agc_post_ctle;
}
/**
* @INTERNAL
* Configure the SRIO TX interface for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param tx_swing tx swing value to use (default 0x7), -1 to not
* override.
* @param tx_gain PCS SDS TX gain (default 0x3), -1 to not
* override
* @param tx_premptap_override true to override preemphasis control
* @param tx_premptap_pre preemphasis pre tap value (default 0x0)
* @param tx_premptap_post preemphasis post tap value (default 0xF)
* @param tx_vboost vboost enable (1 = enable, -1 = don't override)
* hardware default is 1.
*
* NOTE: This must be called before SRIO is initialized to take effect
*/
void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
bool tx_premptap_override, uint8_t tx_premptap_pre,
u8 tx_premptap_post, int tx_vboost)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
sr->srio_tx_swing_override = (tx_swing != -1);
sr->srio_tx_swing = tx_swing != -1 ? tx_swing : 0x7;
sr->srio_tx_gain_override = (tx_gain != -1);
sr->srio_tx_gain = tx_gain != -1 ? tx_gain : 0x3;
sr->srio_tx_premptap_override = tx_premptap_override;
sr->srio_tx_premptap_pre = tx_premptap_override ? tx_premptap_pre : 0;
sr->srio_tx_premptap_post = tx_premptap_override ? tx_premptap_post : 0xF;
sr->srio_tx_vboost_override = tx_vboost != -1;
sr->srio_tx_vboost = (tx_vboost != -1) ? tx_vboost : 1;
}
/**
* @INTERNAL
* Get the SRIO TX interface settings for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param[out] tx_swing_override true to override pcs_sds_txX_swing
* @param[out] tx_swing tx swing value to use (default 0x7)
* @param[out] tx_gain_override true to override default gain
* @param[out] tx_gain PCS SDS TX gain (default 0x3)
* @param[out] tx_premptap_override true to override preemphasis control
* @param[out] tx_premptap_pre preemphasis pre tap value (default 0x0)
* @param[out] tx_premptap_post preemphasis post tap value (default 0xF)
* @param[out] tx_vboost_override override vboost setting
* @param[out] tx_vboost vboost enable (default true)
*/
void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
u8 *tx_swing, bool *tx_gain_override, uint8_t *tx_gain,
bool *tx_premptap_override, uint8_t *tx_premptap_pre,
u8 *tx_premptap_post, bool *tx_vboost_override, bool *tx_vboost)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
if (tx_swing_override)
*tx_swing_override = sr->srio_tx_swing_override;
if (tx_swing)
*tx_swing = sr->srio_tx_swing;
if (tx_gain_override)
*tx_gain_override = sr->srio_tx_gain_override;
if (tx_gain)
*tx_gain = sr->srio_tx_gain;
if (tx_premptap_override)
*tx_premptap_override = sr->srio_tx_premptap_override;
if (tx_premptap_pre)
*tx_premptap_pre = sr->srio_tx_premptap_pre;
if (tx_premptap_post)
*tx_premptap_post = sr->srio_tx_premptap_post;
if (tx_vboost_override)
*tx_vboost_override = sr->srio_tx_vboost_override;
if (tx_vboost)
*tx_vboost = sr->srio_tx_vboost;
}
/**
* @INTERNAL
* Sets the PHY info data structure
@ -1683,23 +1185,6 @@ struct cvmx_phy_gpio_leds *cvmx_helper_get_port_phy_leds(int xiface, int index)
return cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds;
}
/**
* @INTERNAL
* Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
*
* @param xiface node and interface
* @param index portindex
* @param leds pointer to led data structure
*/
void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds = leds;
}
/**
* @INTERNAL
* Disables RGMII TX clock bypass and sets delay value
@ -1743,59 +1228,6 @@ void cvmx_helper_cfg_get_rgmii_tx_clk_delay(int xiface, int index, bool *bypass,
*clk_delay = cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay;
}
/**
* @INTERNAL
* Retrieve the SFP node offset in the device tree
*
* @param xiface node and interface
* @param index port index
*
* Return: offset in device tree or -1 if error or not defined.
*/
int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
return cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset;
}
/**
* @INTERNAL
* Sets the SFP node offset
*
* @param xiface node and interface
* @param index port index
* @param sfp_of_offset Offset of SFP node in device tree
*/
void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset = sfp_of_offset;
}
/**
* Get data structure defining the Microsemi VSC7224 channel info
* or NULL if not present
*
* @param xiface node and interface
* @param index port index
*
* Return: pointer to vsc7224 data structure or NULL if not present
*/
struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
return cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan;
}
/**
* Sets the Microsemi VSC7224 channel info data structure
*
@ -1813,40 +1245,6 @@ void cvmx_helper_cfg_set_vsc7224_chan_info(int xiface, int index,
cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan = vsc7224_chan_info;
}
/**
* Get data structure defining the Avago AVSP5410 phy info
* or NULL if not present
*
* @param xiface node and interface
* @param index port index
*
* Return: pointer to avsp5410 data structure or NULL if not present
*/
struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
return cvmx_cfg_port[xi.node][xi.interface][index].avsp5410;
}
/**
* Sets the Avago AVSP5410 phy info data structure
*
* @param xiface node and interface
* @param index port index
* @param[in] avsp5410_info Avago AVSP5410 data structure
*/
void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].avsp5410 = avsp5410_info;
}
/**
* Gets the SFP data associated with a port
*
@ -1879,36 +1277,3 @@ void cvmx_helper_cfg_set_sfp_info(int xiface, int index, struct cvmx_fdt_sfp_inf
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].sfp_info = sfp_info;
}
/**
* Returns a pointer to the phy device associated with a port
*
* @param xiface node and interface
* @param index port index
*
* return pointer to phy device or NULL if none
*/
struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
return cvmx_cfg_port[xi.node][xi.interface][index].phydev;
}
/**
* Sets the phy device associated with a port
*
* @param xiface node and interface
* @param index port index
* @param[in] phydev phy device to assiciate
*/
void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].phydev = phydev;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,76 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Helper functions for FPA setup.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-pip.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
#include <mach/cvmx-helper-pko.h>
/**
* @INTERNAL
* OBSOLETE
*
* Allocate memory for and initialize a single FPA pool.
*
* @param pool Pool to initialize
* @param buffer_size Size of buffers to allocate in bytes
* @param buffers Number of buffers to put in the pool. Zero is allowed
* @param name String name of the pool for debugging purposes
* @return Zero on success, non-zero on failure
*
* This function is only for transition, will be removed.
*/
int __cvmx_helper_initialize_fpa_pool(int pool, u64 buffer_size, u64 buffers,
const char *name)
{
return cvmx_fpa_setup_pool(pool, name, NULL, buffer_size, buffers);
}

View file

@ -0,0 +1,902 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for ILK initialization, configuration,
* and monitoring.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
int __cvmx_helper_ilk_enumerate(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
xi.interface -= CVMX_ILK_GBL_BASE();
return cvmx_ilk_chans[xi.node][xi.interface];
}
/**
* @INTERNAL
* Initialize all tx calendar entries to the xoff state.
* Initialize all rx calendar entries to the xon state. The rx calendar entries
* must be in the xon state to allow new pko pipe assignments. If a calendar
* entry is assigned a different pko pipe while in the xoff state, the old pko
* pipe will stay in the xoff state even when no longer used by ilk.
*
* @param intf Interface whose calendar are to be initialized.
*/
static void __cvmx_ilk_clear_cal_cn78xx(int intf)
{
cvmx_ilk_txx_cal_entryx_t tx_entry;
cvmx_ilk_rxx_cal_entryx_t rx_entry;
int i;
int node = (intf >> 4) & 0xf;
int interface = (intf & 0xf);
/* Initialize all tx calendar entries to off */
tx_entry.u64 = 0;
tx_entry.s.ctl = XOFF;
for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface),
tx_entry.u64);
}
/* Initialize all rx calendar entries to on */
rx_entry.u64 = 0;
rx_entry.s.ctl = XOFF;
for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface),
rx_entry.u64);
}
}
/**
* @INTERNAL
* Initialize all tx calendar entries to the xoff state.
* Initialize all rx calendar entries to the xon state. The rx calendar entries
* must be in the xon state to allow new pko pipe assignments. If a calendar
* entry is assigned a different pko pipe while in the xoff state, the old pko
* pipe will stay in the xoff state even when no longer used by ilk.
*
* @param interface whose calendar are to be initialized.
*/
static void __cvmx_ilk_clear_cal_cn68xx(int interface)
{
cvmx_ilk_txx_idx_cal_t tx_idx;
cvmx_ilk_txx_mem_cal0_t tx_cal0;
cvmx_ilk_txx_mem_cal1_t tx_cal1;
cvmx_ilk_rxx_idx_cal_t rx_idx;
cvmx_ilk_rxx_mem_cal0_t rx_cal0;
cvmx_ilk_rxx_mem_cal1_t rx_cal1;
int i;
/*
* First we initialize the tx calendar starting from entry 0,
* incrementing the entry with every write.
*/
tx_idx.u64 = 0;
tx_idx.s.inc = 1;
csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
/* Set state to xoff for all entries */
tx_cal0.u64 = 0;
tx_cal0.s.entry_ctl0 = XOFF;
tx_cal0.s.entry_ctl1 = XOFF;
tx_cal0.s.entry_ctl2 = XOFF;
tx_cal0.s.entry_ctl3 = XOFF;
tx_cal1.u64 = 0;
tx_cal1.s.entry_ctl4 = XOFF;
tx_cal1.s.entry_ctl5 = XOFF;
tx_cal1.s.entry_ctl6 = XOFF;
tx_cal1.s.entry_ctl7 = XOFF;
/* Write all 288 entries */
for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
}
/*
* Next we initialize the rx calendar starting from entry 0,
* incrementing the entry with every write.
*/
rx_idx.u64 = 0;
rx_idx.s.inc = 1;
csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
/* Set state to xon for all entries */
rx_cal0.u64 = 0;
rx_cal0.s.entry_ctl0 = XON;
rx_cal0.s.entry_ctl1 = XON;
rx_cal0.s.entry_ctl2 = XON;
rx_cal0.s.entry_ctl3 = XON;
rx_cal1.u64 = 0;
rx_cal1.s.entry_ctl4 = XON;
rx_cal1.s.entry_ctl5 = XON;
rx_cal1.s.entry_ctl6 = XON;
rx_cal1.s.entry_ctl7 = XON;
/* Write all 288 entries */
for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
}
}
/**
* @INTERNAL
* Initialize all calendar entries.
*
* @param interface whose calendar is to be initialized.
*/
void __cvmx_ilk_clear_cal(int interface)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
__cvmx_ilk_clear_cal_cn68xx(interface);
else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
__cvmx_ilk_clear_cal_cn78xx(interface);
}
void __cvmx_ilk_write_tx_cal_entry_cn68xx(int interface, int channel,
unsigned char bpid)
{
cvmx_ilk_txx_idx_cal_t tx_idx;
cvmx_ilk_txx_mem_cal0_t tx_cal0;
cvmx_ilk_txx_mem_cal1_t tx_cal1;
int entry;
int window;
int window_entry;
/*
* The calendar has 288 entries. Each calendar entry represents a
* channel's flow control state or the link flow control state.
* Starting with the first entry, every sixteenth entry is used for the
* link flow control state. The other 15 entries are used for the
* channels flow control state:
* entry 0 ----> link flow control state
* entry 1 ----> channel 0 flow control state
* entry 2 ----> channel 1 flow control state
* ...
* entry 15 ----> channel 14 flow control state
* entry 16 ----> link flow control state
* entry 17 ----> channel 15 flow control state
*
* Also, the calendar is accessed via windows into it. Each window maps
* to 8 entries.
*/
entry = 1 + channel + (channel / 15);
window = entry / 8;
window_entry = entry % 8;
/* Indicate the window we need to access */
tx_idx.u64 = 0;
tx_idx.s.index = window;
csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
/* Get the window's current value */
tx_cal0.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
tx_cal1.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
/* Force every sixteenth entry as link flow control state */
if ((window & 1) == 0)
tx_cal0.s.entry_ctl0 = LINK;
/* Update the entry */
switch (window_entry) {
case 0:
tx_cal0.s.entry_ctl0 = 0;
tx_cal0.s.bpid0 = bpid;
break;
case 1:
tx_cal0.s.entry_ctl1 = 0;
tx_cal0.s.bpid1 = bpid;
break;
case 2:
tx_cal0.s.entry_ctl2 = 0;
tx_cal0.s.bpid2 = bpid;
break;
case 3:
tx_cal0.s.entry_ctl3 = 0;
tx_cal0.s.bpid3 = bpid;
break;
case 4:
tx_cal1.s.entry_ctl4 = 0;
tx_cal1.s.bpid4 = bpid;
break;
case 5:
tx_cal1.s.entry_ctl5 = 0;
tx_cal1.s.bpid5 = bpid;
break;
case 6:
tx_cal1.s.entry_ctl6 = 0;
tx_cal1.s.bpid6 = bpid;
break;
case 7:
tx_cal1.s.entry_ctl7 = 0;
tx_cal1.s.bpid7 = bpid;
break;
}
/* Write the window new value */
csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
}
void __cvmx_ilk_write_tx_cal_entry_cn78xx(int intf, int channel,
unsigned char bpid)
{
cvmx_ilk_txx_cal_entryx_t tx_cal;
int calender_16_block = channel / 15;
int calender_16_index = channel % 15 + 1;
int index = calender_16_block * 16 + calender_16_index;
int node = (intf >> 4) & 0xf;
int interface = intf & 0xf;
/* Program the link status on first channel */
if (calender_16_index == 1) {
tx_cal.u64 = 0;
tx_cal.s.ctl = 1;
csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index - 1, interface),
tx_cal.u64);
}
tx_cal.u64 = 0;
tx_cal.s.ctl = 0;
tx_cal.s.channel = channel;
csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index, interface),
tx_cal.u64);
}
/**
* @INTERNAL
* Setup the channel's tx calendar entry.
*
* @param interface channel belongs to
* @param channel whose calendar entry is to be updated
* @param bpid assigned to the channel
*/
void __cvmx_ilk_write_tx_cal_entry(int interface, int channel,
unsigned char bpid)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
__cvmx_ilk_write_tx_cal_entry_cn68xx(interface, channel, bpid);
else
__cvmx_ilk_write_tx_cal_entry_cn78xx(interface, channel, bpid);
}
void __cvmx_ilk_write_rx_cal_entry_cn78xx(int intf, int channel,
unsigned char bpid)
{
cvmx_ilk_rxx_cal_entryx_t rx_cal;
int calender_16_block = channel / 15;
int calender_16_index = channel % 15 + 1;
int index = calender_16_block * 16 + calender_16_index;
int node = (intf >> 4) & 0xf;
int interface = intf & 0xf;
/* Program the link status on first channel */
if (calender_16_index == 1) {
rx_cal.u64 = 0;
rx_cal.s.ctl = 1;
csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index - 1, interface),
rx_cal.u64);
}
rx_cal.u64 = 0;
rx_cal.s.ctl = 0;
rx_cal.s.channel = channel;
csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index, interface),
rx_cal.u64);
}
void __cvmx_ilk_write_rx_cal_entry_cn68xx(int interface, int channel,
unsigned char pipe)
{
cvmx_ilk_rxx_idx_cal_t rx_idx;
cvmx_ilk_rxx_mem_cal0_t rx_cal0;
cvmx_ilk_rxx_mem_cal1_t rx_cal1;
int entry;
int window;
int window_entry;
/*
* The calendar has 288 entries. Each calendar entry represents a
* channel's flow control state or the link flow control state.
* Starting with the first entry, every sixteenth entry is used for the
* link flow control state. The other 15 entries are used for the
* channels flow control state:
* entry 0 ----> link flow control state
* entry 1 ----> channel 0 flow control state
* entry 2 ----> channel 1 flow control state
* ...
* entry 15 ----> channel 14 flow control state
* entry 16 ----> link flow control state
* entry 17 ----> channel 15 flow control state
*
* Also, the calendar is accessed via windows into it. Each window maps
* to 8 entries.
*/
entry = 1 + channel + (channel / 15);
window = entry / 8;
window_entry = entry % 8;
/* Indicate the window we need to access */
rx_idx.u64 = 0;
rx_idx.s.index = window;
csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
/* Get the window's current value */
rx_cal0.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
rx_cal1.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
/* Force every sixteenth entry as link flow control state */
if ((window & 1) == 0)
rx_cal0.s.entry_ctl0 = LINK;
/* Update the entry */
switch (window_entry) {
case 0:
rx_cal0.s.entry_ctl0 = 0;
rx_cal0.s.port_pipe0 = pipe;
break;
case 1:
rx_cal0.s.entry_ctl1 = 0;
rx_cal0.s.port_pipe1 = pipe;
break;
case 2:
rx_cal0.s.entry_ctl2 = 0;
rx_cal0.s.port_pipe2 = pipe;
break;
case 3:
rx_cal0.s.entry_ctl3 = 0;
rx_cal0.s.port_pipe3 = pipe;
break;
case 4:
rx_cal1.s.entry_ctl4 = 0;
rx_cal1.s.port_pipe4 = pipe;
break;
case 5:
rx_cal1.s.entry_ctl5 = 0;
rx_cal1.s.port_pipe5 = pipe;
break;
case 6:
rx_cal1.s.entry_ctl6 = 0;
rx_cal1.s.port_pipe6 = pipe;
break;
case 7:
rx_cal1.s.entry_ctl7 = 0;
rx_cal1.s.port_pipe7 = pipe;
break;
}
/* Write the window new value */
csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
}
/**
* @INTERNAL
* Setup the channel's rx calendar entry.
*
* @param interface channel belongs to
* @param channel whose calendar entry is to be updated
* @param pipe PKO assigned to the channel
*/
void __cvmx_ilk_write_rx_cal_entry(int interface, int channel,
unsigned char pipe)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
__cvmx_ilk_write_rx_cal_entry_cn68xx(interface, channel, pipe);
else
__cvmx_ilk_write_rx_cal_entry_cn78xx(interface, channel, pipe);
}
/**
* @INTERNAL
* Probe a ILK interface and determine the number of ports
* connected to it. The ILK interface should still be down
* after this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_ilk_probe(int xiface)
{
int res = 0;
int interface;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!octeon_has_feature(OCTEON_FEATURE_ILK))
return res;
interface = xi.interface - CVMX_ILK_GBL_BASE();
if (interface >= CVMX_NUM_ILK_INTF)
return 0;
/* the configuration should be done only once */
if (cvmx_ilk_get_intf_ena(xiface))
return cvmx_ilk_chans[xi.node][interface];
/* configure lanes and enable the link */
res = cvmx_ilk_start_interface(((xi.node << 4) | interface),
cvmx_ilk_lane_mask[xi.node][interface]);
if (res < 0)
return 0;
res = __cvmx_helper_ilk_enumerate(xiface);
return res;
}
static int __cvmx_helper_ilk_init_port_cn68xx(int xiface)
{
int i, j, res = -1;
static int pipe_base = 0, pknd_base;
static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
int enable_rx_cal = 1;
int interface;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int intf;
int num_chans;
interface = xi.interface - CVMX_ILK_GBL_BASE();
intf = (xi.node << 4) | interface;
if (interface >= CVMX_NUM_ILK_INTF)
return 0;
num_chans = cvmx_ilk_chans[0][interface];
/* set up channel to pkind mapping */
if (pknd_base == 0)
pknd_base = cvmx_helper_get_pknd(xiface, 0);
/* set up the group of pipes available to ilk */
if (pipe_base == 0)
pipe_base =
__cvmx_pko_get_pipe(interface + CVMX_ILK_GBL_BASE(), 0);
if (pipe_base == -1) {
pipe_base = 0;
return 0;
}
res = cvmx_ilk_set_pipe(xiface, pipe_base,
cvmx_ilk_chans[0][interface]);
if (res < 0)
return 0;
/* set up pipe to channel mapping */
i = pipe_base;
if (!pch) {
pch = (cvmx_ilk_pipe_chan_t *)cvmx_bootmem_alloc(
num_chans * sizeof(cvmx_ilk_pipe_chan_t),
sizeof(cvmx_ilk_pipe_chan_t));
if (!pch)
return 0;
}
memset(pch, 0, num_chans * sizeof(cvmx_ilk_pipe_chan_t));
tmp = pch;
for (j = 0; j < num_chans; j++) {
tmp->pipe = i++;
tmp->chan = j;
tmp++;
}
res = cvmx_ilk_tx_set_channel(interface, pch,
cvmx_ilk_chans[0][interface]);
if (res < 0) {
res = 0;
goto err_free_pch;
}
pipe_base += cvmx_ilk_chans[0][interface];
i = pknd_base;
if (!chpknd) {
chpknd = (cvmx_ilk_chan_pknd_t *)cvmx_bootmem_alloc(
CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
sizeof(cvmx_ilk_chan_pknd_t));
if (!chpknd) {
pipe_base -= cvmx_ilk_chans[xi.node][interface];
res = 0;
goto err_free_pch;
}
}
memset(chpknd, 0, CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
tmp1 = chpknd;
for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
tmp1->chan = j;
tmp1->pknd = i++;
tmp1++;
}
res = cvmx_ilk_rx_set_pknd(xiface, chpknd,
cvmx_ilk_chans[xi.node][interface]);
if (res < 0) {
pipe_base -= cvmx_ilk_chans[xi.node][interface];
res = 0;
goto err_free_chpknd;
}
pknd_base += cvmx_ilk_chans[xi.node][interface];
/* Set up tx calendar */
if (!calent) {
calent = (cvmx_ilk_cal_entry_t *)cvmx_bootmem_alloc(
CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t),
sizeof(cvmx_ilk_cal_entry_t));
if (!calent) {
pipe_base -= cvmx_ilk_chans[xi.node][interface];
pknd_base -= cvmx_ilk_chans[xi.node][interface];
res = 0;
goto err_free_chpknd;
}
}
memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
tmp1 = chpknd;
tmp2 = calent;
for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
tmp2->pipe_bpid = tmp1->pknd;
tmp2->ent_ctrl = PIPE_BPID;
tmp1++;
tmp2++;
}
res = cvmx_ilk_cal_setup_tx(intf, cvmx_ilk_chans[xi.node][interface],
calent, 1);
if (res < 0) {
pipe_base -= cvmx_ilk_chans[xi.node][interface];
pknd_base -= cvmx_ilk_chans[xi.node][interface];
res = 0;
goto err_free_calent;
}
/* set up rx calendar. allocated memory can be reused.
* this is because max pkind is always less than max pipe
*/
memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
tmp = pch;
tmp2 = calent;
for (j = 0; j < cvmx_ilk_chans[0][interface]; j++) {
tmp2->pipe_bpid = tmp->pipe;
tmp2->ent_ctrl = PIPE_BPID;
tmp++;
tmp2++;
}
if (cvmx_ilk_use_la_mode(interface, 0))
enable_rx_cal = cvmx_ilk_la_mode_enable_rx_calendar(interface);
else
enable_rx_cal = 1;
res = cvmx_ilk_cal_setup_rx(intf, cvmx_ilk_chans[xi.node][interface],
calent, CVMX_ILK_RX_FIFO_WM, enable_rx_cal);
if (res < 0) {
pipe_base -= cvmx_ilk_chans[xi.node][interface];
pknd_base -= cvmx_ilk_chans[xi.node][interface];
res = 0;
goto err_free_calent;
}
goto out;
err_free_calent:
/* no free() for cvmx_bootmem_alloc() */
err_free_chpknd:
/* no free() for cvmx_bootmem_alloc() */
err_free_pch:
/* no free() for cvmx_bootmem_alloc() */
out:
return res;
}
static int __cvmx_helper_ilk_init_port_cn78xx(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface;
int intf;
interface = xi.interface - CVMX_ILK_GBL_BASE();
intf = (xi.node << 4) | interface;
if (interface >= CVMX_NUM_ILK_INTF)
return 0;
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
struct cvmx_pki_style_config style_cfg;
int num_channels = cvmx_ilk_chans[xi.node][interface];
int index, i;
for (i = 0; i < num_channels; i++) {
int pknd;
index = (i % 8);
/* Set jabber to allow max sized packets */
if (i == 0)
csr_wr_node(xi.node,
CVMX_ILK_RXX_JABBER(interface),
0xfff8);
/* Setup PKND */
pknd = cvmx_helper_get_pknd(xiface, index);
csr_wr_node(xi.node, CVMX_ILK_RXX_CHAX(i, interface),
pknd);
cvmx_pki_read_style_config(
0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
style_cfg.parm_cfg.qpg_port_sh = 0;
/* 256 channels */
style_cfg.parm_cfg.qpg_port_msb = 8;
cvmx_pki_write_style_config(
0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
}
cvmx_ilk_cal_setup_tx(intf, num_channels, NULL, 1);
cvmx_ilk_cal_setup_rx(intf, num_channels, NULL,
CVMX_ILK_RX_FIFO_WM, 1);
}
return 0;
}
static int __cvmx_helper_ilk_init_port(int xiface)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return __cvmx_helper_ilk_init_port_cn68xx(xiface);
else
return __cvmx_helper_ilk_init_port_cn78xx(xiface);
}
/**
* @INTERNAL
* Bringup and enable ILK interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_enable(int xiface)
{
if (__cvmx_helper_ilk_init_port(xiface) < 0)
return -1;
return cvmx_ilk_enable(xiface);
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by ILK link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int xiface = cvmx_helper_get_interface_num(ipd_port);
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface;
int retry_count = 0;
cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
cvmx_ilk_rxx_int_t ilk_rxx_int;
int lane_mask = 0;
int i;
int node = xi.node;
result.u64 = 0;
interface = xi.interface - CVMX_ILK_GBL_BASE();
retry:
retry_count++;
if (retry_count > 200)
goto fail;
/* Read RX config and status bits */
ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0) {
/* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
* channel better
*/
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
int qlm, lane_mask;
for (qlm = 4; qlm < 8; qlm++) {
lane_mask = 1 << (qlm - 4) * 4;
if (lane_mask &
cvmx_ilk_lane_mask[node][interface]) {
if (__cvmx_qlm_rx_equalization(
node, qlm, -1))
goto retry;
}
}
}
/* Clear the boundary lock status bit */
ilk_rxx_int.u64 = 0;
ilk_rxx_int.s.word_sync_done = 1;
csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
/* We need to start looking for word boundary lock */
ilk_rxx_cfg1.s.rx_bdry_lock_ena =
cvmx_ilk_lane_mask[node][interface];
ilk_rxx_cfg1.s.rx_align_ena = 0;
csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
ilk_rxx_cfg1.u64);
//debug("ILK%d: Looking for word boundary lock\n", interface);
udelay(50);
goto retry;
}
if (ilk_rxx_cfg1.s.rx_align_ena == 0) {
if (ilk_rxx_int.s.word_sync_done) {
/* Clear the lane align status bits */
ilk_rxx_int.u64 = 0;
ilk_rxx_int.s.lane_align_fail = 1;
ilk_rxx_int.s.lane_align_done = 1;
csr_wr_node(node, CVMX_ILK_RXX_INT(interface),
ilk_rxx_int.u64);
ilk_rxx_cfg1.s.rx_align_ena = 1;
csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
ilk_rxx_cfg1.u64);
//printf("ILK%d: Looking for lane alignment\n", interface);
}
udelay(50);
goto retry;
}
if (ilk_rxx_int.s.lane_align_fail) {
ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
ilk_rxx_cfg1.s.rx_align_ena = 0;
csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
ilk_rxx_cfg1.u64);
//debug("ILK%d: Lane alignment failed\n", interface);
goto fail;
}
lane_mask = ilk_rxx_cfg1.s.rx_bdry_lock_ena;
if (ilk_rxx_cfg1.s.pkt_ena == 0 && ilk_rxx_int.s.lane_align_done) {
cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
ilk_txx_cfg1.u64 =
csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
ilk_rxx_cfg1.u64 =
csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
ilk_rxx_cfg1.u64);
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
/*
* Enable rxf_ctl_perr, rxf_lnk0_perr, rxf_lnk1_perr,
* pop_empty, push_full.
*/
csr_wr(CVMX_ILK_GBL_INT_EN, 0x1f);
/* Enable bad_pipe, bad_seq, txf_err */
csr_wr(CVMX_ILK_TXX_INT_EN(interface), 0x7);
/*
* Enable crc24_err, lane_bad_word,
* pkt_drop_{rid,rxf,sop}
*/
csr_wr(CVMX_ILK_RXX_INT_EN(interface), 0x1e2);
}
/* Need to enable ILK interrupts for 78xx */
for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
if ((1 << i) & lane_mask) {
/* clear pending interrupts, before enabling. */
csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
0x1ff);
/* Enable bad_64b67b, bdry_sync_loss, crc32_err,
* dskew_fifo_ovfl, scrm_sync_loss,
* serdes_lock_loss, stat_msg, ukwn_cntl_word
*/
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
0x1ff);
}
}
//debug("ILK%d: Lane alignment complete\n", interface);
}
/* Enable error interrupts, now link is up */
cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK,
node | (interface << 2) | (lane_mask << 4));
result.s.link_up = 1;
result.s.full_duplex = 1;
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
int qlm = cvmx_qlm_lmac(xiface, 0);
result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 64 / 67;
} else {
result.s.speed =
cvmx_qlm_get_gbaud_mhz(1 + interface) * 64 / 67;
}
result.s.speed *= cvmx_pop(lane_mask);
return result;
fail:
if (ilk_rxx_cfg1.s.pkt_ena) {
/* Disable the interface */
ilk_rxx_cfg1.s.pkt_ena = 0;
csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
ilk_rxx_cfg1.u64);
/* Disable error interrupts */
for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
/* Disable bad_64b67b, bdry_sync_loss, crc32_err,
* dskew_fifo_ovfl, scrm_sync_loss, serdes_lock_loss,
* stat_msg, ukwn_cntl_word
*/
if ((1 << i) & lane_mask) {
csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
0x1ff);
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
~0x1ff);
}
}
/* Disable error interrupts */
cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK, 0);
}
return result;
}
/**
* @INTERNAL
* Set the link state of an IPD/PKO port.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
/* Do nothing */
return 0;
}

View file

@ -0,0 +1,286 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* IPD helper functions.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-pip.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
/** It allocate pools for packet and wqe pools
* and sets up the FPA hardware
*/
int __cvmx_helper_ipd_setup_fpa_pools(void)
{
cvmx_fpa_global_initialize();
if (cvmx_ipd_cfg.packet_pool.buffer_count == 0)
return 0;
__cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.packet_pool.pool_num,
cvmx_ipd_cfg.packet_pool.buffer_size,
cvmx_ipd_cfg.packet_pool.buffer_count,
"Packet Buffers");
if (cvmx_ipd_cfg.wqe_pool.buffer_count == 0)
return 0;
__cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.wqe_pool.pool_num,
cvmx_ipd_cfg.wqe_pool.buffer_size,
cvmx_ipd_cfg.wqe_pool.buffer_count,
"WQE Buffers");
return 0;
}
/**
* @INTERNAL
* Setup global setting for IPD/PIP not related to a specific
* interface or port. This must be called before IPD is enabled.
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_ipd_global_setup(void)
{
/* Setup the packet and wqe pools*/
__cvmx_helper_ipd_setup_fpa_pools();
/* Setup the global packet input options */
cvmx_ipd_config(cvmx_ipd_cfg.packet_pool.buffer_size / 8,
cvmx_ipd_cfg.first_mbuf_skip / 8,
cvmx_ipd_cfg.not_first_mbuf_skip / 8,
/* The +8 is to account for the next ptr */
(cvmx_ipd_cfg.first_mbuf_skip + 8) / 128,
/* The +8 is to account for the next ptr */
(cvmx_ipd_cfg.not_first_mbuf_skip + 8) / 128,
cvmx_ipd_cfg.wqe_pool.pool_num,
(cvmx_ipd_mode_t)(cvmx_ipd_cfg.cache_mode), 1);
return 0;
}
/**
* Enable or disable FCS stripping for all the ports on an interface.
*
* @param xiface
* @param nports number of ports
* @param has_fcs 0 for disable and !0 for enable
*/
static int cvmx_helper_fcs_op(int xiface, int nports, int has_fcs)
{
u64 port_bit;
int index;
int pknd;
union cvmx_pip_sub_pkind_fcsx pkind_fcsx;
union cvmx_pip_prt_cfgx port_cfg;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (!octeon_has_feature(OCTEON_FEATURE_PKND))
return 0;
if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
cvmx_helper_pki_set_fcs_op(xi.node, xi.interface, nports,
has_fcs);
return 0;
}
port_bit = 0;
for (index = 0; index < nports; index++)
port_bit |= ((u64)1 << cvmx_helper_get_pknd(xiface, index));
pkind_fcsx.u64 = csr_rd(CVMX_PIP_SUB_PKIND_FCSX(0));
if (has_fcs)
pkind_fcsx.s.port_bit |= port_bit;
else
pkind_fcsx.s.port_bit &= ~port_bit;
csr_wr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
for (pknd = 0; pknd < 64; pknd++) {
if ((1ull << pknd) & port_bit) {
port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
}
}
return 0;
}
/**
* @INTERNAL
* Configure the IPD/PIP tagging and QoS options for a specific
* port. This function determines the POW work queue entry
* contents for a port. The setup performed here is controlled by
* the defines in executive-config.h.
*
* @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
* not the per interface numbering
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_ipd_port_setup(int ipd_port)
{
union cvmx_pip_prt_cfgx port_config;
union cvmx_pip_prt_tagx tag_config;
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
int xiface, index, pknd;
union cvmx_pip_prt_cfgbx prt_cfgbx;
xiface = cvmx_helper_get_interface_num(ipd_port);
index = cvmx_helper_get_interface_index_num(ipd_port);
pknd = cvmx_helper_get_pknd(xiface, index);
port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(pknd));
port_config.s.qos = pknd & 0x7;
/* Default BPID to use for packets on this port-kind */
prt_cfgbx.u64 = csr_rd(CVMX_PIP_PRT_CFGBX(pknd));
prt_cfgbx.s.bpid = pknd;
csr_wr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
} else {
port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(ipd_port));
/* Have each port go to a different POW queue */
port_config.s.qos = ipd_port & 0x7;
}
/* Process the headers and place the IP header in the work queue */
port_config.s.mode =
(cvmx_pip_port_parse_mode_t)cvmx_ipd_cfg.port_config.parse_mode;
tag_config.s.ip6_src_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_ip;
tag_config.s.ip6_dst_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_ip;
tag_config.s.ip6_sprt_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_port;
tag_config.s.ip6_dprt_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_port;
tag_config.s.ip6_nxth_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv6_next_header;
tag_config.s.ip4_src_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_ip;
tag_config.s.ip4_dst_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_ip;
tag_config.s.ip4_sprt_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_port;
tag_config.s.ip4_dprt_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_port;
tag_config.s.ip4_pctl_flag =
cvmx_ipd_cfg.port_config.tag_fields.ipv4_protocol;
tag_config.s.inc_prt_flag =
cvmx_ipd_cfg.port_config.tag_fields.input_port;
tag_config.s.tcp6_tag_type =
(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
tag_config.s.tcp4_tag_type =
(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
tag_config.s.ip6_tag_type =
(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
tag_config.s.ip4_tag_type =
(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
tag_config.s.non_tag_type =
(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
/* Put all packets in group 0. Other groups can be used by the app */
tag_config.s.grp = 0;
cvmx_pip_config_port(ipd_port, port_config, tag_config);
/* Give the user a chance to override our setting for each port */
if (cvmx_override_ipd_port_setup)
cvmx_override_ipd_port_setup(ipd_port);
return 0;
}
/**
* @INTERNAL
* Setup the IPD/PIP for the ports on an interface. Packet
* classification and tagging are set for every port on the
* interface. The number of ports on the interface must already
* have been probed.
*
* @param xiface to setup IPD/PIP for
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ipd_setup_interface(int xiface)
{
cvmx_helper_interface_mode_t mode;
int num_ports = cvmx_helper_ports_on_interface(xiface);
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
int delta;
if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
return 0;
delta = 1;
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
if (xi.interface < CVMX_HELPER_MAX_GMX)
delta = 16;
}
while (num_ports--) {
if (!cvmx_helper_is_port_valid(xiface, num_ports))
continue;
if (octeon_has_feature(OCTEON_FEATURE_PKI))
__cvmx_helper_pki_port_setup(xi.node, ipd_port);
else
__cvmx_helper_ipd_port_setup(ipd_port);
ipd_port += delta;
}
/* FCS settings */
cvmx_helper_fcs_op(xiface, cvmx_helper_ports_on_interface(xiface),
__cvmx_helper_get_has_fcs(xiface));
mode = cvmx_helper_interface_get_mode(xiface);
if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
__cvmx_helper_loop_enable(xiface);
return 0;
}

View file

@ -0,0 +1,178 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for LOOP initialization, configuration,
* and monitoring.
*/
#include <log.h>
#include <malloc.h>
#include <net.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/octeon_fdt.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-fdt.h>
#include <mach/cvmx-helper-gpio.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-lbk-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
int __cvmx_helper_loop_enumerate(int xiface)
{
return OCTEON_IS_MODEL(OCTEON_CN68XX) ?
8 : (OCTEON_IS_MODEL(OCTEON_CNF71XX) ? 2 : 4);
}
/**
* @INTERNAL
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down
* after this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_loop_probe(int xiface)
{
return __cvmx_helper_loop_enumerate(xiface);
}
/**
* @INTERNAL
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_loop_enable(int xiface)
{
cvmx_pip_prt_cfgx_t port_cfg;
int num_ports, index;
unsigned long offset;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
num_ports = __cvmx_helper_get_num_ipd_ports(xiface);
/*
* We need to disable length checking so packet < 64 bytes and jumbo
* frames don't get errors
*/
for (index = 0; index < num_ports; index++) {
offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ?
cvmx_helper_get_pknd(xiface, index) :
cvmx_helper_get_ipd_port(xiface, index));
if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
cvmx_pki_endis_l2_errs(xi.node, offset, 1, 0, 0);
cvmx_pki_endis_fcs_check(xi.node, offset, 0, 0);
} else {
port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(offset));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
csr_wr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64);
}
}
/*
* Disable FCS stripping for loopback ports
*/
if (!octeon_has_feature(OCTEON_FEATURE_PKND)) {
cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
ipd_sub_port_fcs.u64 = csr_rd(CVMX_IPD_SUB_PORT_FCS);
ipd_sub_port_fcs.s.port_bit2 = 0;
csr_wr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
}
/*
* Set PKND and BPID for loopback ports.
*/
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
cvmx_pko_reg_loopback_pkind_t lp_pknd;
cvmx_pko_reg_loopback_bpid_t lp_bpid;
for (index = 0; index < num_ports; index++) {
int pknd = cvmx_helper_get_pknd(xiface, index);
int bpid = cvmx_helper_get_bpid(xiface, index);
lp_pknd.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_PKIND);
lp_bpid.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_BPID);
if (index == 0)
lp_pknd.s.num_ports = num_ports;
switch (index) {
case 0:
lp_pknd.s.pkind0 = pknd;
lp_bpid.s.bpid0 = bpid;
break;
case 1:
lp_pknd.s.pkind1 = pknd;
lp_bpid.s.bpid1 = bpid;
break;
case 2:
lp_pknd.s.pkind2 = pknd;
lp_bpid.s.bpid2 = bpid;
break;
case 3:
lp_pknd.s.pkind3 = pknd;
lp_bpid.s.bpid3 = bpid;
break;
case 4:
lp_pknd.s.pkind4 = pknd;
lp_bpid.s.bpid4 = bpid;
break;
case 5:
lp_pknd.s.pkind5 = pknd;
lp_bpid.s.bpid5 = bpid;
break;
case 6:
lp_pknd.s.pkind6 = pknd;
lp_bpid.s.bpid6 = bpid;
break;
case 7:
lp_pknd.s.pkind7 = pknd;
lp_bpid.s.bpid7 = bpid;
break;
}
csr_wr(CVMX_PKO_REG_LOOPBACK_PKIND, lp_pknd.u64);
csr_wr(CVMX_PKO_REG_LOOPBACK_BPID, lp_bpid.u64);
}
} else if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
cvmx_lbk_chx_pkind_t lbk_pkind;
for (index = 0; index < num_ports; index++) {
lbk_pkind.u64 = 0;
lbk_pkind.s.pkind = cvmx_helper_get_pknd(xiface, index);
csr_wr_node(xi.node, CVMX_LBK_CHX_PKIND(index),
lbk_pkind.u64);
}
}
return 0;
}

View file

@ -0,0 +1,137 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for NPI initialization, configuration,
* and monitoring.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pexp-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-sli-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
static int cvmx_npi_num_pipes = -1;
/**
* @INTERNAL
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down
* after this call.
*
* @param interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_npi_probe(int interface)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 32;
else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
return 128;
else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 64;
return 0;
}
/**
* @INTERNAL
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_npi_enable(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
int port;
int num_ports = cvmx_helper_ports_on_interface(interface);
/*
* On CN50XX, CN52XX, and CN56XX we need to disable length
* checking so packet < 64 bytes and jumbo frames don't get
* errors.
*/
for (port = 0; port < num_ports; port++) {
union cvmx_pip_prt_cfgx port_cfg;
int ipd_port =
(octeon_has_feature(OCTEON_FEATURE_PKND)) ?
cvmx_helper_get_pknd(interface, port) :
cvmx_helper_get_ipd_port(interface, port);
if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
unsigned int node = cvmx_get_node_num();
cvmx_pki_endis_l2_errs(node, ipd_port, 0, 0, 0);
} else {
port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.lenerr_en = 0;
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
csr_wr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
}
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
/* Set up pknd and bpid */
union cvmx_sli_portx_pkind config;
config.u64 = csr_rd(CVMX_PEXP_SLI_PORTX_PKIND(port));
config.s.bpkind = cvmx_helper_get_bpid(interface, port);
config.s.pkind = cvmx_helper_get_pknd(interface, port);
csr_wr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64);
}
}
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
/*
* Set up pko pipes.
*/
union cvmx_sli_tx_pipe config;
config.u64 = csr_rd(CVMX_PEXP_SLI_TX_PIPE);
config.s.base = __cvmx_pko_get_pipe(interface, 0);
config.s.nump =
cvmx_npi_num_pipes < 0 ? num_ports : cvmx_npi_num_pipes;
csr_wr(CVMX_PEXP_SLI_TX_PIPE, config.u64);
}
/* Enables are controlled by the remote host, so nothing to do here */
return 0;
}

View file

@ -0,0 +1,549 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* PKI helper functions.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pexp-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-sli-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-pki.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-pko-internal-ports-range.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pip.h>
static int pki_helper_debug;
bool cvmx_pki_dflt_init[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] = 1 };
static bool cvmx_pki_dflt_bp_en[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] =
true };
static struct cvmx_pki_cluster_grp_config pki_dflt_clgrp[CVMX_MAX_NODES] = {
{ 0, 0xf },
{ 0, 0xf }
};
struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES] = {
[0 ... CVMX_MAX_NODES -
1] = { .pool_num = -1, .buffer_size = 2048, .buffer_count = 0 }
};
struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES] = {
[0 ... CVMX_MAX_NODES -
1] = { .aura_num = 0, .pool_num = -1, .buffer_count = 0 }
};
struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES] = {
[0 ... CVMX_MAX_NODES - 1] = { .parm_cfg = { .lenerr_en = 1,
.maxerr_en = 1,
.minerr_en = 1,
.fcs_strip = 1,
.fcs_chk = 1,
.first_skip = 40,
.mbuff_size = 2048 } }
};
struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
struct cvmx_pki_qpg_config pki_dflt_qpg[CVMX_MAX_NODES];
struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND] = {
[0 ... CVMX_MAX_NODES -
1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }
};
/* To store the qos watcher values before they are written to pcam when watcher
* is enabled. There is no cvmx-pip.c file exist so it ended up here
*/
struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
/** @INTERNAL
* This function setsup default ltype map
* @param node node number
*/
void __cvmx_helper_pki_set_dflt_ltype_map(int node)
{
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NONE,
CVMX_PKI_BELTYPE_NONE);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ENET,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_VLAN,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SNAP_PAYLD,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ARP,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_RARP,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4,
CVMX_PKI_BELTYPE_IP4);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4_OPT,
CVMX_PKI_BELTYPE_IP4);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6,
CVMX_PKI_BELTYPE_IP6);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6_OPT,
CVMX_PKI_BELTYPE_IP6);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPSEC_ESP,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPFRAG,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPCOMP,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_TCP,
CVMX_PKI_BELTYPE_TCP);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP,
CVMX_PKI_BELTYPE_UDP);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SCTP,
CVMX_PKI_BELTYPE_SCTP);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP_VXLAN,
CVMX_PKI_BELTYPE_UDP);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GRE,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NVGRE,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GTP,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW28,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW29,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW30,
CVMX_PKI_BELTYPE_MISC);
cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW31,
CVMX_PKI_BELTYPE_MISC);
}
/** @INTERNAL
* This function installs the default VLAN entries to identify
* the VLAN and set WQE[vv], WQE[vs] if VLAN is found. In 78XX
* hardware (PKI) is not hardwired to recognize any 802.1Q VLAN
* Ethertypes
*
* @param node node number
*/
int __cvmx_helper_pki_install_dflt_vlan(int node)
{
struct cvmx_pki_pcam_input pcam_input;
struct cvmx_pki_pcam_action pcam_action;
enum cvmx_pki_term field;
int index;
int bank;
u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
memset(&pcam_input, 0, sizeof(pcam_input));
memset(&pcam_action, 0, sizeof(pcam_action));
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
/* PKI-20858 */
int i;
for (i = 0; i < 4; i++) {
union cvmx_pki_clx_ecc_ctl ecc_ctl;
ecc_ctl.u64 =
csr_rd_node(node, CVMX_PKI_CLX_ECC_CTL(i));
ecc_ctl.s.pcam_en = 0;
ecc_ctl.s.pcam0_cdis = 1;
ecc_ctl.s.pcam1_cdis = 1;
csr_wr_node(node, CVMX_PKI_CLX_ECC_CTL(i), ecc_ctl.u64);
}
}
for (field = CVMX_PKI_PCAM_TERM_ETHTYPE0;
field < CVMX_PKI_PCAM_TERM_ETHTYPE2; field++) {
bank = field & 0x01;
index = cvmx_pki_pcam_entry_alloc(
node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
if (index < 0) {
debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
node, bank);
return -1;
}
pcam_input.style = 0;
pcam_input.style_mask = 0;
pcam_input.field = field;
pcam_input.field_mask = 0xfd;
pcam_input.data = 0x81000000;
pcam_input.data_mask = 0xffff0000;
pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_VLAN;
pcam_action.style_add = 0;
pcam_action.pointer_advance = 4;
cvmx_pki_pcam_write_entry(
node, index, cl_mask, pcam_input,
pcam_action); /*cluster_mask in pass2*/
index = cvmx_pki_pcam_entry_alloc(
node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
if (index < 0) {
debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
node, bank);
return -1;
}
pcam_input.data = 0x88a80000;
cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
pcam_action);
index = cvmx_pki_pcam_entry_alloc(
node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
if (index < 0) {
debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
node, bank);
return -1;
}
pcam_input.data = 0x92000000;
cvmx_pki_pcam_write_entry(
node, index, cl_mask, pcam_input,
pcam_action); /* cluster_mask in pass2*/
index = cvmx_pki_pcam_entry_alloc(
node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
if (index < 0) {
debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
node, bank);
return -1;
}
pcam_input.data = 0x91000000;
cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
pcam_action);
}
return 0;
}
static int __cvmx_helper_setup_pki_cluster_groups(int node)
{
u64 cl_mask;
int cl_group;
cl_group =
cvmx_pki_cluster_grp_alloc(node, pki_dflt_clgrp[node].grp_num);
if (cl_group == CVMX_RESOURCE_ALLOC_FAILED)
return -1;
else if (cl_group == CVMX_RESOURCE_ALREADY_RESERVED) {
if (pki_dflt_clgrp[node].grp_num == -1)
return -1;
else
return 0; /* cluster already configured, share it */
}
cl_mask = pki_dflt_clgrp[node].cluster_mask;
if (pki_helper_debug)
debug("pki-helper: setup pki cluster grp %d with cl_mask 0x%llx\n",
(int)cl_group, (unsigned long long)cl_mask);
cvmx_pki_attach_cluster_to_group(node, cl_group, cl_mask);
return 0;
}
/**
* This function sets up pools/auras to be used by PKI
* @param node node number
*/
static int __cvmx_helper_pki_setup_fpa_pools(int node)
{
u64 buffer_count;
u64 buffer_size;
if (__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura))
return 0; /* aura already configured, share it */
buffer_count = pki_dflt_pool[node].buffer_count;
buffer_size = pki_dflt_pool[node].buffer_size;
if (buffer_count != 0) {
pki_dflt_pool[node].pool = cvmx_fpa3_setup_fill_pool(
node, pki_dflt_pool[node].pool_num, "PKI POOL DFLT",
buffer_size, buffer_count, NULL);
if (!__cvmx_fpa3_pool_valid(pki_dflt_pool[node].pool)) {
cvmx_printf("ERROR: %s: Failed to allocate pool %d\n",
__func__, pki_dflt_pool[node].pool_num);
return -1;
}
pki_dflt_pool[node].pool_num = pki_dflt_pool[node].pool.lpool;
if (pki_helper_debug)
debug("%s pool %d with buffer size %d cnt %d\n",
__func__, pki_dflt_pool[node].pool_num,
(int)buffer_size, (int)buffer_count);
pki_dflt_aura[node].pool_num = pki_dflt_pool[node].pool_num;
pki_dflt_aura[node].pool = pki_dflt_pool[node].pool;
}
buffer_count = pki_dflt_aura[node].buffer_count;
if (buffer_count != 0) {
pki_dflt_aura[node].aura = cvmx_fpa3_set_aura_for_pool(
pki_dflt_aura[node].pool, pki_dflt_aura[node].aura_num,
"PKI DFLT AURA", buffer_size, buffer_count);
if (!__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura)) {
debug("ERROR: %sL Failed to allocate aura %d\n",
__func__, pki_dflt_aura[node].aura_num);
return -1;
}
}
return 0;
}
static int __cvmx_helper_setup_pki_qpg_table(int node)
{
int offset;
offset = cvmx_pki_qpg_entry_alloc(node, pki_dflt_qpg[node].qpg_base, 1);
if (offset == CVMX_RESOURCE_ALLOC_FAILED)
return -1;
else if (offset == CVMX_RESOURCE_ALREADY_RESERVED)
return 0; /* share the qpg table entry */
if (pki_helper_debug)
debug("pki-helper: set qpg entry at offset %d with port add %d aura %d grp_ok %d grp_bad %d\n",
offset, pki_dflt_qpg[node].port_add,
pki_dflt_qpg[node].aura_num, pki_dflt_qpg[node].grp_ok,
pki_dflt_qpg[node].grp_bad);
cvmx_pki_write_qpg_entry(node, offset, &pki_dflt_qpg[node]);
return 0;
}
int __cvmx_helper_pki_port_setup(int node, int ipd_port)
{
int xiface, index;
int pknd, style_num;
int rs;
struct cvmx_pki_pkind_config pkind_cfg;
if (!cvmx_pki_dflt_init[node])
return 0;
xiface = cvmx_helper_get_interface_num(ipd_port);
index = cvmx_helper_get_interface_index_num(ipd_port);
pknd = cvmx_helper_get_pknd(xiface, index);
style_num = pkind_style_map[node][pknd];
/* try to reserve the style, if it is not configured already, reserve
and configure it */
rs = cvmx_pki_style_alloc(node, style_num);
if (rs < 0) {
if (rs == CVMX_RESOURCE_ALLOC_FAILED)
return -1;
} else {
if (pki_helper_debug)
debug("pki-helper: set style %d with default parameters\n",
style_num);
pkind_style_map[node][pknd] = style_num;
/* configure style with default parameters */
cvmx_pki_write_style_config(node, style_num,
CVMX_PKI_CLUSTER_ALL,
&pki_dflt_style[node]);
}
if (pki_helper_debug)
debug("pki-helper: set pkind %d with initial style %d\n", pknd,
style_num);
/* write pkind configuration */
pkind_cfg = pki_dflt_pkind[node];
pkind_cfg.initial_style = style_num;
cvmx_pki_write_pkind_config(node, pknd, &pkind_cfg);
return 0;
}
int __cvmx_helper_pki_global_setup(int node)
{
__cvmx_helper_pki_set_dflt_ltype_map(node);
if (!cvmx_pki_dflt_init[node])
return 0;
/* Setup the packet pools*/
__cvmx_helper_pki_setup_fpa_pools(node);
/*set up default cluster*/
__cvmx_helper_setup_pki_cluster_groups(node);
//__cvmx_helper_pki_setup_sso_groups(node);
__cvmx_helper_setup_pki_qpg_table(node);
/*
* errata PKI-19103 backward compat has only 1 aura
* no head line blocking
*/
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
cvmx_pki_buf_ctl_t buf_ctl;
buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
buf_ctl.s.fpa_wait = 1;
csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
}
return 0;
}
/**
* This function Enabled the PKI hardware to
* start accepting/processing packets.
*
* @param node node number
*/
void cvmx_helper_pki_enable(int node)
{
if (pki_helper_debug)
debug("enable PKI on node %d\n", node);
__cvmx_helper_pki_install_dflt_vlan(node);
cvmx_pki_setup_clusters(node);
if (cvmx_pki_dflt_bp_en[node])
cvmx_pki_enable_backpressure(node);
cvmx_pki_parse_enable(node, 0);
cvmx_pki_enable(node);
}
/**
* This function setups the qos table by allocating qpg entry and writing
* the provided parameters to that entry (offset).
* @param node node number.
* @param qpg_cfg pointer to struct containing qpg configuration
*/
int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg)
{
int offset;
offset = cvmx_pki_qpg_entry_alloc(node, qpg_cfg->qpg_base, 1);
if (pki_helper_debug)
debug("pki-helper:set qpg entry at offset %d\n", offset);
if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
debug("INFO:setup_qpg_table: offset %d already reserved\n",
qpg_cfg->qpg_base);
return CVMX_RESOURCE_ALREADY_RESERVED;
} else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
debug("ERROR:setup_qpg_table: no more entries available\n");
return CVMX_RESOURCE_ALLOC_FAILED;
}
qpg_cfg->qpg_base = offset;
cvmx_pki_write_qpg_entry(node, offset, qpg_cfg);
return offset;
}
/**
* This function gets all the PKI parameters related to that
* particular port from hardware.
* @param xipd_port xipd_port port number with node to get parameter of
* @param port_cfg pointer to structure where to store read parameters
*/
void cvmx_pki_get_port_config(int xipd_port,
struct cvmx_pki_port_config *port_cfg)
{
int xiface, index, pknd;
int style, cl_mask;
cvmx_pki_icgx_cfg_t pki_cl_msk;
struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
/* get the pkind used by this ipd port */
xiface = cvmx_helper_get_interface_num(xipd_port);
index = cvmx_helper_get_interface_index_num(xipd_port);
pknd = cvmx_helper_get_pknd(xiface, index);
cvmx_pki_read_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg);
style = port_cfg->pkind_cfg.initial_style;
pki_cl_msk.u64 = csr_rd_node(
xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
cl_mask = pki_cl_msk.s.clusters;
cvmx_pki_read_style_config(xp.node, style, cl_mask,
&port_cfg->style_cfg);
}
/**
* This function sets all the PKI parameters related to that
* particular port in hardware.
* @param xipd_port ipd port number with node to get parameter of
* @param port_cfg pointer to structure containing port parameters
*/
void cvmx_pki_set_port_config(int xipd_port,
struct cvmx_pki_port_config *port_cfg)
{
int xiface, index, pknd;
int style, cl_mask;
cvmx_pki_icgx_cfg_t pki_cl_msk;
struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
/* get the pkind used by this ipd port */
xiface = cvmx_helper_get_interface_num(xipd_port);
index = cvmx_helper_get_interface_index_num(xipd_port);
pknd = cvmx_helper_get_pknd(xiface, index);
if (cvmx_pki_write_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg))
return;
style = port_cfg->pkind_cfg.initial_style;
pki_cl_msk.u64 = csr_rd_node(
xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
cl_mask = pki_cl_msk.s.clusters;
cvmx_pki_write_style_config(xp.node, style, cl_mask,
&port_cfg->style_cfg);
}
/**
* This function sets up all th eports of particular interface
* for chosen fcs mode. (only use for backward compatibility).
* New application can control it via init_interface calls.
* @param node node number.
* @param interface interface number.
* @param nports number of ports
* @param has_fcs 1 -- enable fcs check and fcs strip.
* 0 -- disable fcs check.
*/
void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports,
int has_fcs)
{
int xiface, index;
int pknd;
unsigned int cluster = 0;
cvmx_pki_clx_pkindx_cfg_t pkind_cfg;
xiface = cvmx_helper_node_interface_to_xiface(node, interface);
for (index = 0; index < nports; index++) {
pknd = cvmx_helper_get_pknd(xiface, index);
while (cluster < CVMX_PKI_NUM_CLUSTER) {
/*find the cluster in use pass2*/
pkind_cfg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster));
pkind_cfg.s.fcs_pres = has_fcs;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster),
pkind_cfg.u64);
cluster++;
}
/* make sure fcs_strip and fcs_check is also enable/disable
* for the style used by that port
*/
cvmx_pki_endis_fcs_check(node, pknd, has_fcs, has_fcs);
cluster = 0;
}
}

View file

@ -0,0 +1,203 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Helper Functions for the PKO
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
static s64 pko_fpa_config_pool = -1;
static u64 pko_fpa_config_size = 1024;
/**
* cvmx_override_pko_queue_priority(int pko_port, u64
* priorities[16]) is a function pointer. It is meant to allow
* customization of the PKO queue priorities based on the port
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
void (*cvmx_override_pko_queue_priority)(int ipd_port,
uint8_t *priorities) = NULL;
int64_t cvmx_fpa_get_pko_pool(void)
{
return pko_fpa_config_pool;
}
/**
* Gets the buffer size of pko pool
*/
u64 cvmx_fpa_get_pko_pool_block_size(void)
{
return pko_fpa_config_size;
}
/**
* Initialize PKO command queue buffer pool
*/
static int cvmx_helper_pko_pool_init(void)
{
u8 pool;
unsigned int buf_count;
unsigned int pkt_buf_count;
int rc;
/* Reserve pool */
pool = cvmx_fpa_get_pko_pool();
/* Avoid redundant pool creation */
if (cvmx_fpa_get_block_size(pool) > 0) {
#ifdef DEBUG
debug("WARNING: %s: pool %d already initialized\n", __func__,
pool);
#endif
/* It is up to the app to have sufficient buffer count */
return pool;
}
/* Calculate buffer count: one per queue + 3-word-cmds * max_pkts */
pkt_buf_count = cvmx_fpa_get_packet_pool_buffer_count();
buf_count = CVMX_PKO_MAX_OUTPUT_QUEUES + (pkt_buf_count * 3) / 8;
/* Allocate pools for pko command queues */
rc = __cvmx_helper_initialize_fpa_pool(pool,
cvmx_fpa_get_pko_pool_block_size(),
buf_count, "PKO Cmd-bufs");
if (rc < 0)
debug("%s: ERROR: in PKO buffer pool\n", __func__);
pool = rc;
return pool;
}
/**
* Initialize the PKO
*
*/
int cvmx_helper_pko_init(void)
{
int rc;
rc = cvmx_helper_pko_pool_init();
if (rc < 0)
return rc;
__cvmx_helper_init_port_config_data(0);
cvmx_pko_hw_init(cvmx_fpa_get_pko_pool(),
cvmx_fpa_get_pko_pool_block_size());
return 0;
}
/**
* @INTERNAL
* Setup the PKO for the ports on an interface. The number of
* queues per port and the priority of each PKO output queue
* is set here. PKO must be disabled when this function is called.
*
* @param interface to setup PKO for
*
* @return Zero on success, negative on failure
*
* @note This is for PKO1/PKO2, and is not used for PKO3.
*/
int __cvmx_helper_interface_setup_pko(int interface)
{
/*
* Each packet output queue has an associated priority. The
* higher the priority, the more often it can send a packet. A
* priority of 8 means it can send in all 8 rounds of
* contention. We're going to make each queue one less than
* the last. The vector of priorities has been extended to
* support CN5xxx CPUs, where up to 16 queues can be
* associated to a port. To keep backward compatibility we
* don't change the initial 8 priorities and replicate them in
* the second half. With per-core PKO queues (PKO lockless
* operation) all queues have the same priority.
*/
/* uint8_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */
u8 priorities[16] = { [0 ... 15] = 8 };
/*
* Setup the IPD/PIP and PKO for the ports discovered
* above. Here packet classification, tagging and output
* priorities are set.
*/
int num_ports = cvmx_helper_ports_on_interface(interface);
while (num_ports--) {
int ipd_port;
if (!cvmx_helper_is_port_valid(interface, num_ports))
continue;
ipd_port = cvmx_helper_get_ipd_port(interface, num_ports);
/*
* Give the user a chance to override the per queue
* priorities.
*/
if (cvmx_override_pko_queue_priority)
cvmx_override_pko_queue_priority(ipd_port, priorities);
cvmx_pko_config_port(ipd_port,
cvmx_pko_get_base_queue(ipd_port),
cvmx_pko_get_num_queues(ipd_port),
priorities);
ipd_port++;
}
return 0;
/* NOTE:
* Now this function is called for all chips including 68xx,
* but on the 68xx it does not enable multiple pko_iports per
* eport, while before it was doing 3 pko_iport per eport
* buf the reason for that is not clear.
*/
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,398 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-asxx-defs.h>
#include <mach/cvmx-dbg-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-npi-defs.h>
#include <mach/cvmx-pko-defs.h>
/**
* @INTERNAL
* Probe RGMII ports and determine the number present
*
* @param xiface Interface to probe
*
* @return Number of RGMII/GMII/MII ports (0-4).
*/
int __cvmx_helper_rgmii_probe(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int num_ports = 0;
union cvmx_gmxx_inf_mode mode;
mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
if (mode.s.type)
debug("ERROR: Unsupported Octeon model in %s\n", __func__);
else
debug("ERROR: Unsupported Octeon model in %s\n", __func__);
return num_ports;
}
/**
* @INTERNAL
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @param xiface PKO Interface to configure (0 or 1)
*
* @return Zero on success
*/
int __cvmx_helper_rgmii_enable(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
union cvmx_gmxx_inf_mode mode;
union cvmx_asxx_tx_prt_en asx_tx;
union cvmx_asxx_rx_prt_en asx_rx;
mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
if (num_ports == -1)
return -1;
if (mode.s.en == 0)
return -1;
/* Configure the ASX registers needed to use the RGMII ports */
asx_tx.u64 = 0;
asx_tx.s.prt_en = cvmx_build_mask(num_ports);
csr_wr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
asx_rx.u64 = 0;
asx_rx.s.prt_en = cvmx_build_mask(num_ports);
csr_wr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
/* Configure the GMX registers needed to use the RGMII ports */
for (port = 0; port < num_ports; port++) {
/*
* Configure more flexible RGMII preamble
* checking. Pass 1 doesn't support this feature.
*/
union cvmx_gmxx_rxx_frm_ctl frm_ctl;
frm_ctl.u64 = csr_rd(CVMX_GMXX_RXX_FRM_CTL(port, interface));
/* New field, so must be compile time */
frm_ctl.s.pre_free = 1;
csr_wr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
/*
* Each pause frame transmitted will ask for about 10M
* bit times before resume. If buffer space comes
* available before that time has expired, an XON
* pause frame (0 time) will be transmitted to restart
* the flow.
*/
csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface),
19000);
csr_wr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
csr_wr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
}
__cvmx_helper_setup_gmx(interface, num_ports);
/* enable the ports now */
for (port = 0; port < num_ports; port++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
cvmx_helper_link_autoconf(
cvmx_helper_get_ipd_port(interface, port));
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(port, interface));
gmx_cfg.s.en = 1;
csr_wr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_asxx_prt_loop asxx_prt_loop;
asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
if (asxx_prt_loop.s.int_loop & (1 << index)) {
/* Force 1Gbps full duplex on internal loopback */
cvmx_helper_link_info_t result;
result.u64 = 0;
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
return result;
} else {
return __cvmx_helper_board_link_get(ipd_port);
}
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_gmii_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (index == 0) {
result = __cvmx_helper_rgmii_link_get(ipd_port);
} else {
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_rgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info)
{
int result = 0;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_gmxx_prtx_cfg original_gmx_cfg;
union cvmx_gmxx_prtx_cfg new_gmx_cfg;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
int i;
/* Read the current settings so we know the current enable state */
original_gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
new_gmx_cfg = original_gmx_cfg;
/* Disable the lowest level RX */
csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index));
memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
/* Disable all queues so that TX should become idle */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
csr_wr(CVMX_PKO_REG_READ_IDX, queue);
pko_mem_queue_qos.u64 = csr_rd(CVMX_PKO_MEM_QUEUE_QOS);
pko_mem_queue_qos.s.pid = ipd_port;
pko_mem_queue_qos.s.qid = queue;
pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
pko_mem_queue_qos.s.qos_mask = 0;
csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
}
/* Disable backpressure */
gmx_tx_ovr_bp.u64 = csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
gmx_tx_ovr_bp.s.bp &= ~(1 << index);
gmx_tx_ovr_bp.s.en |= 1 << index;
csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
/*
* Poll the GMX state machine waiting for it to become
* idle. Preferably we should only change speed when it is
* idle. If it doesn't become idle we will still do the speed
* change, but there is a slight chance that GMX will
* lockup.
*/
csr_wr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 7, ==, 0,
10000);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 0xf, ==, 0,
10000);
/* Disable the port before we make any changes */
new_gmx_cfg.s.en = 0;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
/* Set full/half duplex */
if (!link_info.s.link_up)
/* Force full duplex on down links */
new_gmx_cfg.s.duplex = 1;
else
new_gmx_cfg.s.duplex = link_info.s.full_duplex;
/* Set the link speed. Anything unknown is set to 1Gbps */
if (link_info.s.speed == 10) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else if (link_info.s.speed == 100) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else {
new_gmx_cfg.s.slottime = 1;
new_gmx_cfg.s.speed = 1;
}
/* Adjust the clocks */
if (link_info.s.speed == 10) {
csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 50);
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else if (link_info.s.speed == 100) {
csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 5);
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else {
csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
/* Do a read to make sure all setup stuff is complete */
csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
/* Save the new GMX setting without enabling the port */
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
/* Enable the lowest level RX */
if (link_info.s.link_up)
csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index));
/* Re-enable the TX path */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
csr_wr(CVMX_PKO_REG_READ_IDX, queue);
csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
}
/* Restore backpressure */
csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
/* Restore the GMX enable state. Port config is complete */
new_gmx_cfg.s.en = original_gmx_cfg.s.en;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
return result;
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
int original_enable;
union cvmx_gmxx_prtx_cfg gmx_cfg;
union cvmx_asxx_prt_loop asxx_prt_loop;
/* Read the current enable state and save it */
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
original_enable = gmx_cfg.s.en;
/* Force port to be disabled */
gmx_cfg.s.en = 0;
if (enable_internal) {
/* Force speed if we're doing internal loopback */
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
/* Set the loopback bits */
asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
if (enable_internal)
asxx_prt_loop.s.int_loop |= 1 << index;
else
asxx_prt_loop.s.int_loop &= ~(1 << index);
if (enable_external)
asxx_prt_loop.s.ext_loop |= 1 << index;
else
asxx_prt_loop.s.ext_loop &= ~(1 << index);
csr_wr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
/* Force enables in internal loopback */
if (enable_internal) {
u64 tmp;
tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
csr_wr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
csr_wr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
original_enable = 1;
}
/* Restore the enable state */
gmx_cfg.s.en = original_enable;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,781 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for SGMII initialization, configuration,
* and monitoring.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
/**
* @INTERNAL
* Perform initialization required only once for an SGMII port.
*
* @param interface to init
* @param index Index of prot on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
{
const u64 clock_mhz = 1200; /* todo: fixme */
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
if (!cvmx_helper_is_port_valid(interface, index))
return 0;
/* Disable GMX */
gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 0;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/*
* Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
* appropriate value. 1000BASE-X specifies a 10ms
* interval. SGMII specifies a 1.6ms interval.
*/
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/* Adjust the MAC mode if requested by device tree */
pcsx_miscx_ctl_reg.s.mac_phy =
cvmx_helper_get_mac_phy_mode(interface, index);
pcsx_miscx_ctl_reg.s.mode =
cvmx_helper_get_1000x_mode(interface, index);
csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
pcsx_linkx_timer_count_reg.u64 =
csr_rd(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mode)
/* 1000BASE-X */
pcsx_linkx_timer_count_reg.s.count =
(10000ull * clock_mhz) >> 10;
else
/* SGMII */
pcsx_linkx_timer_count_reg.s.count =
(1600ull * clock_mhz) >> 10;
csr_wr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
pcsx_linkx_timer_count_reg.u64);
/*
* Write the advertisement register to be used as the
* tx_Config_Reg<D15:D0> of the autonegotiation. In
* 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
* In SGMII PHY mode, tx_Config_Reg<D15:D0> is
* PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
* tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
* step can be skipped.
*/
if (pcsx_miscx_ctl_reg.s.mode) {
/* 1000BASE-X */
union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
pcsx_anx_adv_reg.u64 =
csr_rd(CVMX_PCSX_ANX_ADV_REG(index, interface));
pcsx_anx_adv_reg.s.rem_flt = 0;
pcsx_anx_adv_reg.s.pause = 3;
pcsx_anx_adv_reg.s.hfd = 1;
pcsx_anx_adv_reg.s.fd = 1;
csr_wr(CVMX_PCSX_ANX_ADV_REG(index, interface),
pcsx_anx_adv_reg.u64);
} else {
if (pcsx_miscx_ctl_reg.s.mac_phy) {
/* PHY Mode */
union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
pcsx_sgmx_an_adv_reg.u64 = csr_rd(
CVMX_PCSX_SGMX_AN_ADV_REG(index, interface));
pcsx_sgmx_an_adv_reg.s.dup = 1;
pcsx_sgmx_an_adv_reg.s.speed = 2;
csr_wr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface),
pcsx_sgmx_an_adv_reg.u64);
} else {
/* MAC Mode - Nothing to do */
}
}
return 0;
}
static int __cvmx_helper_need_g15618(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) ||
OCTEON_IS_MODEL(OCTEON_CN68XX))
return 1;
else
return 0;
}
/**
* @INTERNAL
* Initialize the SERTES link for the first time or after a loss
* of link.
*
* @param interface to init
* @param index Index of prot on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
{
union cvmx_pcsx_mrx_control_reg control_reg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
bool phy_mode;
bool an_disable; /** Disable autonegotiation */
bool mode_1000x; /** 1000Base-X mode */
if (!cvmx_helper_is_port_valid(interface, index))
return 0;
/*
* Take PCS through a reset sequence.
* PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
* Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
* value of the other PCS*_MR*_CONTROL_REG bits). Read
* PCS*_MR*_CONTROL_REG[RESET] until it changes value to
* zero.
*/
control_reg.u64 = csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
/*
* Errata G-15618 requires disabling PCS soft reset in CN63XX
* pass upto 2.1.
*/
if (!__cvmx_helper_need_g15618()) {
control_reg.s.reset = 1;
csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
if (CVMX_WAIT_FOR_FIELD64(
CVMX_PCSX_MRX_CONTROL_REG(index, interface),
cvmx_pcsx_mrx_control_reg_t, reset, ==, 0, 10000)) {
debug("SGMII%x: Timeout waiting for port %d to finish reset\n",
interface, index);
return -1;
}
}
/*
* Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
* sgmii negotiation starts.
*/
phy_mode = cvmx_helper_get_mac_phy_mode(interface, index);
an_disable = (phy_mode ||
!cvmx_helper_get_port_autonegotiation(interface, index));
control_reg.s.an_en = !an_disable;
/* Force a PCS reset by powering down the PCS interface
* This is needed to deal with broken Qualcomm/Atheros PHYs and switches
* which never recover if PCS is not power cycled. The alternative
* is to power cycle or hardware reset the Qualcomm devices whenever
* SGMII is initialized.
*
* This is needed for the QCA8033 PHYs as well as the QCA833X switches
* to work. The QCA8337 switch has additional SGMII problems and is
* best avoided if at all possible. Failure to power cycle PCS prevents
* any traffic from flowing between Octeon and Qualcomm devices if there
* is a warm reset. Even a software reset to the Qualcomm device will
* not work.
*
* Note that this problem has been reported between Qualcomm and other
* vendor's processors as well so this problem is not unique to
* Qualcomm and Octeon.
*
* Power cycling PCS doesn't hurt anything with non-Qualcomm devices
* other than adding a 25ms delay during initialization.
*/
control_reg.s.pwr_dn = 1;
csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
/* 25ms should be enough, 10ms is too short */
mdelay(25);
control_reg.s.pwr_dn = 0;
csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
/* The Cortina PHY runs in 1000base-X mode */
mode_1000x = cvmx_helper_get_1000x_mode(interface, index);
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_miscx_ctl_reg.s.mode = mode_1000x;
pcsx_miscx_ctl_reg.s.mac_phy = phy_mode;
csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
if (an_disable)
/* In PHY mode we can't query the link status so we just
* assume that the link is up.
*/
return 0;
/*
* Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
* that sgmii autonegotiation is complete. In MAC mode this
* isn't an ethernet link, but a link between Octeon and the
* PHY.
*/
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
10000)) {
debug("SGMII%x: Port %d link timeout\n", interface, index);
return -1;
}
return 0;
}
/**
* @INTERNAL
* Configure an SGMII link to the specified speed after the SERTES
* link is up.
*
* @param interface to init
* @param index Index of prot on the interface
* @param link_info Link state to configure
*
* @return Zero on success, negative on failure
*/
static int
__cvmx_helper_sgmii_hardware_init_link_speed(int interface, int index,
cvmx_helper_link_info_t link_info)
{
int is_enabled;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
if (!cvmx_helper_is_port_valid(interface, index))
return 0;
/* Disable GMX before we make any changes. Remember the enable state */
gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
is_enabled = gmxx_prtx_cfg.s.en;
gmxx_prtx_cfg.s.en = 0;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Wait for GMX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1,
10000) ||
CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1,
10000)) {
debug("SGMII%d: Timeout waiting for port %d to be idle\n",
interface, index);
return -1;
}
/* Read GMX CFG again to make sure the disable completed */
gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
/*
* Get the misc control for PCS. We will need to set the
* duplication amount.
*/
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/*
* Use GMXENO to force the link down if the status we get says
* it should be down.
*/
pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
/* Only change the duplex setting if the link is up */
if (link_info.s.link_up)
gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
/* Do speed based setting for GMX */
switch (link_info.s.speed) {
case 10:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 1;
gmxx_prtx_cfg.s.slottime = 0;
/* Setting from GMX-603 */
pcsx_miscx_ctl_reg.s.samp_pt = 25;
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 100:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 0;
pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 1000:
gmxx_prtx_cfg.s.speed = 1;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 1;
pcsx_miscx_ctl_reg.s.samp_pt = 1;
csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
if (gmxx_prtx_cfg.s.duplex)
/* full duplex */
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
else
/* half duplex */
csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
break;
default:
break;
}
/* Write the new misc control for PCS */
csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
/* Write the new GMX settings with the port still disabled */
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Read GMX CFG again to make sure the config completed */
gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
/* Restore the enabled / disabled state */
gmxx_prtx_cfg.s.en = is_enabled;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
return 0;
}
/**
* @INTERNAL
* Bring up the SGMII interface to be ready for packet I/O but
* leave I/O disabled using the GMX override. This function
* follows the bringup documented in 10.6.3 of the manual.
*
* @param interface to bringup
* @param num_ports Number of ports on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
{
int index;
int do_link_set = 1;
/*
* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
* be programmed.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
union cvmx_ciu_qlm2 ciu_qlm;
ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0xf;
ciu_qlm.s.txmargin = 0xd;
csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
/*
* CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
* be programmed when using a 156.25Mhz ref clock.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
/* Read the QLM speed pins */
union cvmx_mio_rst_boot mio_rst_boot;
mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
if (mio_rst_boot.cn63xx.qlm2_spd == 4) {
union cvmx_ciu_qlm2 ciu_qlm;
ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0x0;
ciu_qlm.s.txmargin = 0xf;
csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
}
__cvmx_helper_setup_gmx(interface, num_ports);
for (index = 0; index < num_ports; index++) {
int ipd_port = cvmx_helper_get_ipd_port(interface, index);
if (!cvmx_helper_is_port_valid(interface, index))
continue;
__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
if (do_link_set)
__cvmx_helper_sgmii_link_set(ipd_port,
__cvmx_helper_sgmii_link_get(ipd_port));
}
return 0;
}
int __cvmx_helper_sgmii_enumerate(int xiface)
{
if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
return 2;
if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
struct cvmx_xiface xi =
cvmx_helper_xiface_to_node_interface(xiface);
enum cvmx_qlm_mode qlm_mode =
cvmx_qlm_get_dlm_mode(0, xi.interface);
if (qlm_mode == CVMX_QLM_MODE_SGMII)
return 1;
else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
return 4;
return 0;
}
return 4;
}
/**
* @INTERNAL
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_sgmii_probe(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
union cvmx_gmxx_inf_mode mode;
int ports;
/*
* Check if QLM is configured correct for SGMII, verify the
* speed as well as mode.
*/
if (OCTEON_IS_OCTEON2()) {
int qlm = cvmx_qlm_interface(xiface);
if (cvmx_qlm_get_mode(qlm) != CVMX_QLM_MODE_SGMII)
return 0;
}
/* Do not enable the interface if is not in SGMII mode */
ports = __cvmx_helper_sgmii_enumerate(xiface);
if (ports <= 0)
return 0;
/*
* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
* interface needs to be enabled before IPD otherwise per port
* backpressure may not work properly.
*/
mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
return ports;
}
/**
* @INTERNAL
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_enable(int xiface)
{
int num_ports = cvmx_helper_ports_on_interface(xiface);
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
int index;
/* Setup PKND and BPID */
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_bpid_msk bpid_msk;
union cvmx_gmxx_bpid_mapx bpid_map;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
if (!cvmx_helper_is_port_valid(interface, index))
continue;
/* Setup PKIND */
gmxx_prtx_cfg.u64 =
csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.pknd =
cvmx_helper_get_pknd(interface, index);
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface),
gmxx_prtx_cfg.u64);
/* Setup BPID */
bpid_map.u64 =
csr_rd(CVMX_GMXX_BPID_MAPX(index, interface));
bpid_map.s.val = 1;
bpid_map.s.bpid =
cvmx_helper_get_bpid(interface, index);
csr_wr(CVMX_GMXX_BPID_MAPX(index, interface),
bpid_map.u64);
bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
bpid_msk.s.msk_or |= (1 << index);
bpid_msk.s.msk_and &= ~(1 << index);
csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
}
}
__cvmx_helper_sgmii_hardware_init(interface, num_ports);
/* CN68XX adds the padding and FCS in PKO, not GMX */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
for (index = 0; index < num_ports; index++) {
if (!cvmx_helper_is_port_valid(interface, index))
continue;
gmxx_txx_append_cfg.u64 =
csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
gmxx_txx_append_cfg.s.fcs = 0;
gmxx_txx_append_cfg.s.pad = 0;
csr_wr(CVMX_GMXX_TXX_APPEND(index, interface),
gmxx_txx_append_cfg.u64);
}
}
/* Enable running disparity check for QSGMII interface */
if (OCTEON_IS_MODEL(OCTEON_CN70XX) && num_ports > 1) {
union cvmx_gmxx_qsgmii_ctl qsgmii_ctl;
qsgmii_ctl.u64 = 0;
qsgmii_ctl.s.disparity = 1;
csr_wr(CVMX_GMXX_QSGMII_CTL(interface), qsgmii_ctl.u64);
}
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_txx_append append_cfg;
union cvmx_gmxx_txx_sgmii_ctl sgmii_ctl;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
if (!cvmx_helper_is_port_valid(interface, index))
continue;
/*
* Clear the align bit if preamble is set to attain
* maximum tx rate.
*/
append_cfg.u64 = csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
sgmii_ctl.u64 =
csr_rd(CVMX_GMXX_TXX_SGMII_CTL(index, interface));
sgmii_ctl.s.align = append_cfg.s.preamble ? 0 : 1;
csr_wr(CVMX_GMXX_TXX_SGMII_CTL(index, interface),
sgmii_ctl.u64);
gmxx_prtx_cfg.u64 =
csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 1;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
int speed = 1000;
int qlm;
result.u64 = 0;
if (!cvmx_helper_is_port_valid(interface, index))
return result;
if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_gmxx_inf_mode inf_mode;
inf_mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
if (inf_mode.s.rate & (1 << index))
speed = 2500;
else
speed = 1000;
} else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
qlm = cvmx_qlm_interface(interface);
speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
} else if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
if (cvmx_qlm_get_dlm_mode(0, interface) == CVMX_QLM_MODE_SGMII)
speed >>= 1;
else
speed >>= 2;
}
pcsx_mrx_control_reg.u64 =
csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (pcsx_mrx_control_reg.s.loopbck1) {
/* Force 1Gbps full duplex link for internal loopback */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = speed;
return result;
}
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy ||
cvmx_helper_get_port_force_link_up(interface, index)) {
/* PHY Mode */
/* Note that this also works for 1000base-X mode */
result.s.speed = speed;
result.s.full_duplex = 1;
result.s.link_up = 1;
return result;
}
/* MAC Mode */
return __cvmx_helper_board_link_get(ipd_port);
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info)
{
union cvmx_pcsx_mrx_control_reg control_reg;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (!cvmx_helper_is_port_valid(interface, index))
return 0;
/* For some devices, i.e. the Qualcomm QCA8337 switch we need to power
* down the PCS interface when the link goes down and power it back
* up when the link returns.
*/
if (link_info.s.link_up || !__cvmx_helper_need_g15618()) {
__cvmx_helper_sgmii_hardware_init_link(interface, index);
} else {
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/* Disable autonegotiation when MAC mode is enabled or
* autonegotiation is disabled.
*/
control_reg.u64 =
csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy == 0 ||
!cvmx_helper_get_port_autonegotiation(interface, index)) {
control_reg.s.an_en = 0;
control_reg.s.spdmsb = 1;
control_reg.s.spdlsb = 0;
control_reg.s.dup = 1;
}
csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
/*
* Use GMXENO to force the link down it will get
* reenabled later...
*/
pcsx_miscx_ctl_reg.s.gmxeno = 1;
csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
return 0;
}
return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
link_info);
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
if (!cvmx_helper_is_port_valid(interface, index))
return 0;
pcsx_mrx_control_reg.u64 =
csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
pcsx_mrx_control_reg.u64);
pcsx_miscx_ctl_reg.u64 =
csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
__cvmx_helper_sgmii_hardware_init_link(interface, index);
return 0;
}

View file

@ -181,127 +181,6 @@ const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mo
return "UNKNOWN";
}
/**
* Debug routine to dump the packet structure to the console
*
* @param work Work queue entry containing the packet to dump
* @return
*/
int cvmx_helper_dump_packet(cvmx_wqe_t *work)
{
u64 count;
u64 remaining_bytes;
union cvmx_buf_ptr buffer_ptr;
cvmx_buf_ptr_pki_t bptr;
cvmx_wqe_78xx_t *wqe = (void *)work;
u64 start_of_buffer;
u8 *data_address;
u8 *end_of_data;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
cvmx_pki_dump_wqe(wqe);
cvmx_wqe_pki_errata_20776(work);
} else {
debug("WORD0 = %lx\n", (unsigned long)work->word0.u64);
debug("WORD1 = %lx\n", (unsigned long)work->word1.u64);
debug("WORD2 = %lx\n", (unsigned long)work->word2.u64);
debug("Packet Length: %u\n", cvmx_wqe_get_len(work));
debug(" Input Port: %u\n", cvmx_wqe_get_port(work));
debug(" QoS: %u\n", cvmx_wqe_get_qos(work));
debug(" Buffers: %u\n", cvmx_wqe_get_bufs(work));
}
if (cvmx_wqe_get_bufs(work) == 0) {
int wqe_pool;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
debug("%s: ERROR: Unexpected bufs==0 in WQE\n", __func__);
return -1;
}
wqe_pool = (int)cvmx_fpa_get_wqe_pool();
buffer_ptr.u64 = 0;
buffer_ptr.s.pool = wqe_pool;
buffer_ptr.s.size = 128;
buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
if (cvmx_likely(!work->word2.s.not_IP)) {
union cvmx_pip_ip_offset pip_ip_offset;
pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
buffer_ptr.s.addr +=
(pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset;
buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
} else {
/*
* WARNING: This code assume that the packet
* is not RAW. If it was, we would use
* PIP_GBL_CFG[RAW_SHF] instead of
* PIP_GBL_CFG[NIP_SHF].
*/
union cvmx_pip_gbl_cfg pip_gbl_cfg;
pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
}
} else {
buffer_ptr = work->packet_ptr;
}
remaining_bytes = cvmx_wqe_get_len(work);
while (remaining_bytes) {
/* native cn78xx buffer format, unless legacy-translated */
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) && !wqe->pki_wqe_translated) {
bptr.u64 = buffer_ptr.u64;
/* XXX- assumes cache-line aligned buffer */
start_of_buffer = (bptr.addr >> 7) << 7;
debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
debug(" Buffer Data: %llx\n", (unsigned long long)bptr.addr);
debug(" Buffer Size: %u\n", bptr.size);
data_address = (uint8_t *)cvmx_phys_to_ptr(bptr.addr);
end_of_data = data_address + bptr.size;
} else {
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
debug(" Buffer I : %u\n", buffer_ptr.s.i);
debug(" Buffer Back: %u\n", buffer_ptr.s.back);
debug(" Buffer Pool: %u\n", buffer_ptr.s.pool);
debug(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
debug(" Buffer Size: %u\n", buffer_ptr.s.size);
data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
end_of_data = data_address + buffer_ptr.s.size;
}
debug("\t\t");
count = 0;
while (data_address < end_of_data) {
if (remaining_bytes == 0)
break;
remaining_bytes--;
debug("%02x", (unsigned int)*data_address);
data_address++;
if (remaining_bytes && count == 7) {
debug("\n\t\t");
count = 0;
} else {
count++;
}
}
debug("\n");
if (remaining_bytes) {
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
!wqe->pki_wqe_translated)
buffer_ptr.u64 = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
else
buffer_ptr.u64 =
*(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
}
}
return 0;
}
/**
* @INTERNAL
*
@ -678,68 +557,6 @@ void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
}
}
void cvmx_helper_setup_legacy_red(int pass_thresh, int drop_thresh)
{
unsigned int node = cvmx_get_node_num();
int aura, bpid;
int buf_cnt;
bool ena_red = 0, ena_drop = 0, ena_bp = 0;
#define FPA_RED_AVG_DLY 1
#define FPA_RED_LVL_DLY 3
#define FPA_QOS_AVRG 0
/* Trying to make it backward compatible with older chips */
/* Setting up avg_dly and prb_dly, enable bits */
if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG,
FPA_RED_LVL_DLY, FPA_RED_AVG_DLY);
}
/* Disable backpressure on queued buffers which is aura in 78xx*/
/*
* Assumption is that all packets from all interface and ports goes
* in same poolx/aurax for backward compatibility
*/
aura = cvmx_fpa_get_packet_pool();
buf_cnt = cvmx_fpa_get_packet_pool_buffer_count();
pass_thresh = buf_cnt - pass_thresh;
drop_thresh = buf_cnt - drop_thresh;
/* Map aura to bpid 0*/
bpid = 0;
cvmx_pki_write_aura_bpid(node, aura, bpid);
/* Don't enable back pressure */
ena_bp = 0;
/* enable RED */
ena_red = 1;
/*
* This will enable RED on all interfaces since
* they all have packet buffer coming from same aura
*/
cvmx_helper_setup_aura_qos(node, aura, ena_red, ena_drop, pass_thresh,
drop_thresh, ena_bp, 0);
}
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* Return: Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
{
if (octeon_has_feature(OCTEON_FEATURE_PKI))
cvmx_helper_setup_legacy_red(pass_thresh, drop_thresh);
else
cvmx_ipd_setup_red(pass_thresh, drop_thresh);
return 0;
}
/**
* @INTERNAL
* Setup the common GMX settings that determine the number of
@ -979,35 +796,6 @@ int cvmx_helper_get_bpid(int interface, int port)
return CVMX_INVALID_BPID;
}
/**
* Display interface statistics.
*
* @param port IPD/PKO port number
*
* Return: none
*/
void cvmx_helper_show_stats(int port)
{
cvmx_pip_port_status_t status;
cvmx_pko_port_status_t pko_status;
/* ILK stats */
if (octeon_has_feature(OCTEON_FEATURE_ILK))
__cvmx_helper_ilk_show_stats();
/* PIP stats */
cvmx_pip_get_port_stats(port, 0, &status);
debug("port %d: the number of packets - ipd: %d\n", port,
(int)status.packets);
/* PKO stats */
cvmx_pko_get_port_status(port, 0, &pko_status);
debug("port %d: the number of packets - pko: %d\n", port,
(int)pko_status.packets);
/* TODO: other stats */
}
/**
* Returns the interface number for an IPD/PKO port number.
*
@ -1187,39 +975,3 @@ int cvmx_helper_get_interface_index_num(int ipd_port)
return -1;
}
/**
* Prints out a buffer with the address, hex bytes, and ASCII
*
* @param addr Start address to print on the left
* @param[in] buffer array of bytes to print
* @param count Number of bytes to print
*/
void cvmx_print_buffer_u8(unsigned int addr, const uint8_t *buffer,
size_t count)
{
uint i;
while (count) {
unsigned int linelen = count < 16 ? count : 16;
debug("%08x:", addr);
for (i = 0; i < linelen; i++)
debug(" %0*x", 2, buffer[i]);
while (i++ < 17)
debug(" ");
for (i = 0; i < linelen; i++) {
if (buffer[i] >= 0x20 && buffer[i] < 0x7f)
debug("%c", buffer[i]);
else
debug(".");
}
debug("\n");
addr += linelen;
buffer += linelen;
count -= linelen;
}
}

View file

@ -0,0 +1,518 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for XAUI initialization, configuration,
* and monitoring.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
int __cvmx_helper_xaui_enumerate(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
union cvmx_gmxx_hg2_control gmx_hg2_control;
if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
enum cvmx_qlm_mode qlm_mode =
cvmx_qlm_get_dlm_mode(0, interface);
if (qlm_mode == CVMX_QLM_MODE_RXAUI)
return 1;
return 0;
}
/* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
gmx_hg2_control.u64 = csr_rd(CVMX_GMXX_HG2_CONTROL(interface));
if (gmx_hg2_control.s.hg2tx_en)
return 16;
else
return 1;
}
/**
* @INTERNAL
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_xaui_probe(int xiface)
{
int i, ports;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
union cvmx_gmxx_inf_mode mode;
/*
* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
* be programmed.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
union cvmx_ciu_qlm2 ciu_qlm;
ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0x5;
ciu_qlm.s.txmargin = 0x1a;
csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
/*
* CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
* be programmed when using a 156.25Mhz ref clock.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
/* Read the QLM speed pins */
union cvmx_mio_rst_boot mio_rst_boot;
mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
if (mio_rst_boot.cn63xx.qlm2_spd == 0xb) {
union cvmx_ciu_qlm2 ciu_qlm;
ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0xa;
ciu_qlm.s.txmargin = 0x1f;
csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
}
/*
* Check if QLM is configured correct for XAUI/RXAUI, verify
* the speed as well as mode.
*/
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
int qlm = cvmx_qlm_interface(xiface);
enum cvmx_qlm_mode mode = cvmx_qlm_get_mode(qlm);
if (mode != CVMX_QLM_MODE_XAUI && mode != CVMX_QLM_MODE_RXAUI)
return 0;
}
ports = __cvmx_helper_xaui_enumerate(xiface);
if (ports <= 0)
return 0;
/*
* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
* interface needs to be enabled before IPD otherwise per port
* backpressure may not work properly.
*/
mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
if (!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
!OCTEON_IS_MODEL(OCTEON_CN70XX)) {
/*
* Setup PKO to support 16 ports for HiGig2 virtual
* ports. We're pointing all of the PKO packet ports
* for this interface to the XAUI. This allows us to
* use HiGig2 backpressure per port.
*/
for (i = 0; i < 16; i++) {
union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
pko_mem_port_ptrs.u64 = 0;
/*
* We set each PKO port to have equal priority
* in a round robin fashion.
*/
pko_mem_port_ptrs.s.static_p = 0;
pko_mem_port_ptrs.s.qos_mask = 0xff;
/* All PKO ports map to the same XAUI hardware port */
pko_mem_port_ptrs.s.eid = interface * 4;
pko_mem_port_ptrs.s.pid = interface * 16 + i;
pko_mem_port_ptrs.s.bp_port = interface * 16 + i;
csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
}
}
return ports;
}
/**
* @INTERNAL
* Bringup XAUI interface. After this call packet I/O should be
* fully functional.
*
* @param interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_link_init(int interface)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
union cvmx_pcsxx_control1_reg xaui_ctl;
union cvmx_pcsxx_misc_ctl_reg misc_ctl;
union cvmx_gmxx_tx_xaui_ctl tx_ctl;
/* (1) Interface has already been enabled. */
/* (2) Disable GMX. */
misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
misc_ctl.s.gmxeno = 1;
csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
/* (3) Disable GMX and PCSX interrupts. */
csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
/* (4) Bring up the PCSX and GMX reconciliation layer. */
/* (4)a Set polarity and lane swapping. */
/* (4)b */
tx_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
/* Enable better IFG packing and improves performance */
tx_ctl.s.dic_en = 1;
tx_ctl.s.uni_en = 0;
csr_wr(CVMX_GMXX_TX_XAUI_CTL(interface), tx_ctl.u64);
/* (4)c Aply reset sequence */
xaui_ctl.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
xaui_ctl.s.lo_pwr = 0;
/*
* Errata G-15618 requires disabling PCS soft reset in some
* OCTEON II models.
*/
if (!OCTEON_IS_MODEL(OCTEON_CN63XX) &&
!OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) &&
!OCTEON_IS_MODEL(OCTEON_CN68XX))
xaui_ctl.s.reset = 1;
csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), xaui_ctl.u64);
if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) && interface != 1) {
/*
* Note that GMX 1 was skipped as GMX0 is on the same
* QLM and will always be done first
*
* Workaround for Errata (G-16467).
*/
int qlm = interface;
#ifdef CVMX_QLM_DUMP_STATE
debug("%s:%d: XAUI%d: Applying workaround for Errata G-16467\n",
__func__, __LINE__, qlm);
cvmx_qlm_display_registers(qlm);
debug("\n");
#endif
/*
* This workaround only applies to QLMs running XAUI
* at 6.25Ghz
*/
if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
(cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
/* Wait 100us for links to stabalize */
udelay(100);
cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
/* Allow the QLM to exit reset */
cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
/* Wait 100us for links to stabalize */
udelay(100);
/* Allow TX on QLM */
cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 0);
}
#ifdef CVMX_QLM_DUMP_STATE
debug("%s:%d: XAUI%d: Done applying workaround for Errata G-16467\n",
__func__, __LINE__, qlm);
cvmx_qlm_display_registers(qlm);
debug("\n\n");
#endif
}
/* Wait for PCS to come out of reset */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface),
cvmx_pcsxx_control1_reg_t, reset, ==, 0,
10000))
return -1;
/* Wait for PCS to be aligned */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface),
cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1,
10000))
return -1;
/* Wait for RX to be ready */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface),
cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0,
10000))
return -1;
/* (6) Configure GMX */
/* Wait for GMX RX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
return -1;
/* Wait for GMX TX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
return -1;
/* GMX configure */
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.speed = 1;
gmx_cfg.s.speed_msb = 0;
gmx_cfg.s.slottime = 1;
csr_wr(CVMX_GMXX_TX_PRTS(interface), 1);
csr_wr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
csr_wr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
/* Wait for receive link */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface),
cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1,
10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0,
10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0,
10000))
return -1;
/* (8) Enable packet reception */
misc_ctl.s.gmxeno = 0;
csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
/* Clear all error interrupts before enabling the interface. */
csr_wr(CVMX_GMXX_RXX_INT_REG(0, interface), ~0x0ull);
csr_wr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
csr_wr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
/* Enable GMX */
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.en = 1;
csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
return 0;
}
/**
* @INTERNAL
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_enable(int xiface)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int interface = xi.interface;
__cvmx_helper_setup_gmx(interface, 1);
/* Setup PKND and BPID */
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
union cvmx_gmxx_bpid_msk bpid_msk;
union cvmx_gmxx_bpid_mapx bpid_map;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
/* Setup PKIND */
gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
/* Setup BPID */
bpid_map.u64 = csr_rd(CVMX_GMXX_BPID_MAPX(0, interface));
bpid_map.s.val = 1;
bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
csr_wr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
bpid_msk.s.msk_or |= 1;
bpid_msk.s.msk_and &= ~1;
csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
/* CN68XX adds the padding and FCS in PKO, not GMX */
gmxx_txx_append_cfg.u64 =
csr_rd(CVMX_GMXX_TXX_APPEND(0, interface));
gmxx_txx_append_cfg.s.fcs = 0;
gmxx_txx_append_cfg.s.pad = 0;
csr_wr(CVMX_GMXX_TXX_APPEND(0, interface),
gmxx_txx_append_cfg.u64);
}
/* 70XX eval boards use Marvel phy, set disparity accordingly. */
if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
union cvmx_gmxx_rxaui_ctl rxaui_ctl;
rxaui_ctl.u64 = csr_rd(CVMX_GMXX_RXAUI_CTL(interface));
rxaui_ctl.s.disparity = 1;
csr_wr(CVMX_GMXX_RXAUI_CTL(interface), rxaui_ctl.u64);
}
__cvmx_helper_xaui_link_init(interface);
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
cvmx_helper_link_info_t result;
gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
pcsxx_status1_reg.u64 = csr_rd(CVMX_PCSXX_STATUS1_REG(interface));
result.u64 = 0;
/* Only return a link if both RX and TX are happy */
if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0 &&
pcsxx_status1_reg.s.rcv_lnk == 1) {
union cvmx_pcsxx_misc_ctl_reg misc_ctl;
result.s.link_up = 1;
result.s.full_duplex = 1;
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_mio_qlmx_cfg qlm_cfg;
int lanes;
int qlm = (interface == 1) ? 0 : interface;
qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
result.s.speed *= lanes;
} else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
int qlm = cvmx_qlm_interface(interface);
result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
result.s.speed *= 4;
} else {
result.s.speed = 10000;
}
misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
if (misc_ctl.s.gmxeno)
__cvmx_helper_xaui_link_init(interface);
} else {
/* Disable GMX and PCSX interrupts. */
csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
/* If the link shouldn't be up, then just return */
if (!link_info.s.link_up)
return 0;
/* Do nothing if both RX and TX are happy */
if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0)
return 0;
/* Bring the link up */
return __cvmx_helper_xaui_link_init(interface);
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
int enable_internal,
int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
/* Set the internal loop */
pcsxx_control1_reg.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
pcsxx_control1_reg.s.loopbck1 = enable_internal;
csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
/* Set the external loop */
gmxx_xaui_ext_loopback.u64 =
csr_rd(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
gmxx_xaui_ext_loopback.s.en = enable_external;
csr_wr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
gmxx_xaui_ext_loopback.u64);
/* Take the link through a reset */
return __cvmx_helper_xaui_link_init(interface);
}

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Marvell International Ltd.
* Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper functions for common, but complicated tasks.
*/
@ -302,20 +302,6 @@ static const struct iface_ops iface_ops_npi = {
.enable = __cvmx_helper_npi_enable,
};
/**
* @INTERNAL
* This structure specifies the interface methods used by interfaces
* configured as srio.
*/
static const struct iface_ops iface_ops_srio = {
.mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
.enumerate = __cvmx_helper_srio_probe,
.probe = __cvmx_helper_srio_probe,
.enable = __cvmx_helper_srio_enable,
.link_get = __cvmx_helper_srio_link_get,
.link_set = __cvmx_helper_srio_link_set,
};
/**
* @INTERNAL
* This structure specifies the interface methods used by interfaces
@ -442,35 +428,6 @@ int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
return 0;
}
/*
* Shut down the interfaces; free the resources.
* @INTERNAL
*/
void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
{
int i;
int nifaces; /* number of interfaces */
struct cvmx_iface *piface;
nifaces = cvmx_helper_get_number_of_interfaces();
for (i = 0; i < nifaces; i++) {
piface = &cvmx_interfaces[node][i];
/*
* For SE apps, bootmem was meant to be allocated and never
* freed.
*/
piface->cvif_ipd_port_link_info = 0;
}
}
void __cvmx_helper_shutdown_interfaces(void)
{
unsigned int node = cvmx_get_node_num();
__cvmx_helper_shutdown_interfaces_node(node);
}
int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
@ -525,27 +482,12 @@ u64 cvmx_rgmii_backpressure_dis = 1;
typedef int (*cvmx_export_config_t)(void);
cvmx_export_config_t cvmx_export_app_config;
void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
{
cvmx_rgmii_backpressure_dis = backpressure_dis;
}
/*
* internal functions that are not exported in the .h file but must be
* declared to make gcc happy.
*/
extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
/**
* cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
* It is meant to allow customization of interfaces which do not have a PHY.
*
* @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
*
* If this function pointer is NULL then it defaults to the MAC.
*/
int (*cvmx_override_iface_phy_mode)(int interface, int index);
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD
@ -607,7 +549,7 @@ int __cvmx_helper_early_ports_on_interface(int interface)
* chip and configuration, this can be 1-16. A value of 0
* specifies that the interface doesn't exist or isn't usable.
*
* @param xiface xiface to get the port count for
* @param xiface to get the port count for
*
* Return: Number of ports on interface. Can be Zero.
*/
@ -919,15 +861,9 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
break;
}
} else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
cvmx_sriox_status_reg_t sriox_status_reg;
int srio_port = interface - 1;
sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
if (sriox_status_reg.s.srio)
iface_ops[interface] = &iface_ops_srio;
else
iface_ops[interface] = &iface_ops_dis;
/* SRIO is disabled for now */
printf("SRIO disabled for now!\n");
iface_ops[interface] = &iface_ops_dis;
} else if (interface == 3) { /* DPI */
iface_ops[interface] = &iface_ops_npi;
} else if (interface == 4) { /* LOOP */
@ -1046,7 +982,6 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
(OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
interface <= 7)) {
/* Only present in CN63XX & CN66XX Octeon model */
union cvmx_sriox_status_reg sriox_status_reg;
/* cn66xx pass1.0 has only 2 SRIO interfaces. */
if ((interface == 5 || interface == 7) &&
@ -1059,12 +994,9 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
*/
iface_ops[interface] = &iface_ops_dis;
} else {
sriox_status_reg.u64 =
csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
if (sriox_status_reg.s.srio)
iface_ops[interface] = &iface_ops_srio;
else
iface_ops[interface] = &iface_ops_dis;
/* SRIO is disabled for now */
printf("SRIO disabled for now!\n");
iface_ops[interface] = &iface_ops_dis;
}
} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_mio_qlmx_cfg mio_qlm_cfg;
@ -1438,16 +1370,6 @@ static int __cvmx_helper_global_setup_backpressure(int node)
return 0;
}
/**
* @INTERNAL
* Verify the per port IPD backpressure is aligned properly.
* Return: Zero if working, non zero if misaligned
*/
int __cvmx_helper_backpressure_is_misaligned(void)
{
return 0;
}
/**
* @INTERNAL
* Enable packet input/output from the hardware. This function is
@ -1467,7 +1389,7 @@ int __cvmx_helper_packet_hardware_enable(int xiface)
if (iface_node_ops[xi.node][xi.interface]->enable)
result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
result |= __cvmx_helper_board_hardware_enable(xiface);
return result;
}
@ -1609,7 +1531,8 @@ int cvmx_helper_initialize_packet_io_node(unsigned int node)
/* Skip invalid/disabled interfaces */
if (cvmx_helper_ports_on_interface(xiface) <= 0)
continue;
printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
debug("Node %d Interface %d has %d ports (%s)\n",
node, interface,
cvmx_helper_ports_on_interface(xiface),
cvmx_helper_interface_mode_to_string(
cvmx_helper_interface_get_mode(xiface)));
@ -1730,445 +1653,6 @@ int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask)
return 0;
}
/**
* Helper function for global packet IO shutdown
*/
int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
{
int num_interfaces = cvmx_helper_get_number_of_interfaces();
cvmx_wqe_t *work;
int interface;
int result = 0;
/* Shut down all interfaces and disable TX and RX on all ports */
for (interface = 0; interface < num_interfaces; interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
int index;
int num_ports = cvmx_helper_ports_on_interface(xiface);
if (num_ports > 4)
num_ports = 4;
cvmx_bgx_set_backpressure_override(xiface, 0);
for (index = 0; index < num_ports; index++) {
cvmx_helper_link_info_t link_info;
if (!cvmx_helper_is_port_valid(xiface, index))
continue;
cvmx_helper_bgx_shutdown_port(xiface, index);
/* Turn off link LEDs */
link_info.u64 = 0;
cvmx_helper_update_link_led(xiface, index, link_info);
}
}
/* Stop input first */
cvmx_helper_pki_shutdown(node);
/* Retrieve all packets from the SSO and free them */
result = 0;
while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
cvmx_helper_free_pki_pkt_data(work);
cvmx_wqe_pki_free(work);
result++;
}
if (result > 0)
debug("%s: Purged %d packets from SSO\n", __func__, result);
/*
* No need to wait for PKO queues to drain,
* dq_close() drains the queues to NULL.
*/
/* Shutdown PKO interfaces */
for (interface = 0; interface < num_interfaces; interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
cvmx_helper_pko3_shut_interface(xiface);
}
/* Disable MAC address filtering */
for (interface = 0; interface < num_interfaces; interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
switch (cvmx_helper_interface_get_mode(xiface)) {
case CVMX_HELPER_INTERFACE_MODE_XAUI:
case CVMX_HELPER_INTERFACE_MODE_RXAUI:
case CVMX_HELPER_INTERFACE_MODE_XLAUI:
case CVMX_HELPER_INTERFACE_MODE_XFI:
case CVMX_HELPER_INTERFACE_MODE_10G_KR:
case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_MIXED: {
int index;
int num_ports = cvmx_helper_ports_on_interface(xiface);
for (index = 0; index < num_ports; index++) {
if (!cvmx_helper_is_port_valid(xiface, index))
continue;
/* Reset MAC filtering */
cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
}
break;
}
default:
break;
}
}
for (interface = 0; interface < num_interfaces; interface++) {
int index;
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
int num_ports = cvmx_helper_ports_on_interface(xiface);
for (index = 0; index < num_ports; index++) {
/* Doing this twice should clear it since no packets
* can be received.
*/
cvmx_update_rx_activity_led(xiface, index, false);
cvmx_update_rx_activity_led(xiface, index, false);
}
}
/* Shutdown the PKO unit */
result = cvmx_helper_pko3_shutdown(node);
/* Release interface structures */
__cvmx_helper_shutdown_interfaces();
return result;
}
/**
* Undo the initialization performed in
* cvmx_helper_initialize_packet_io_global(). After calling this routine and the
* local version on each core, packet IO for Octeon will be disabled and placed
* in the initial reset state. It will then be safe to call the initialize
* later on. Note that this routine does not empty the FPA pools. It frees all
* buffers used by the packet IO hardware to the FPA so a function emptying the
* FPA after shutdown should find all packet buffers in the FPA.
*
* Return: Zero on success, negative on failure.
*/
int cvmx_helper_shutdown_packet_io_global(void)
{
const int timeout = 5; /* Wait up to 5 seconds for timeouts */
int result = 0;
int num_interfaces = cvmx_helper_get_number_of_interfaces();
int interface;
int num_ports;
int index;
struct cvmx_buffer_list *pool0_buffers;
struct cvmx_buffer_list *pool0_buffers_tail;
cvmx_wqe_t *work;
union cvmx_ipd_ctl_status ipd_ctl_status;
int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
int node = cvmx_get_node_num();
cvmx_pcsx_mrx_control_reg_t control_reg;
if (octeon_has_feature(OCTEON_FEATURE_BGX))
return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
/* Step 1: Disable all backpressure */
for (interface = 0; interface < num_interfaces; interface++) {
cvmx_helper_interface_mode_t mode =
cvmx_helper_interface_get_mode(interface);
if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
cvmx_agl_set_backpressure_override(interface, 0x1);
else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
cvmx_gmx_set_backpressure_override(interface, 0xf);
}
/* Step 2: Wait for the PKO queues to drain */
result = __cvmx_helper_pko_drain();
if (result < 0) {
debug("WARNING: %s: Failed to drain some PKO queues\n",
__func__);
}
/* Step 3: Disable TX and RX on all ports */
for (interface = 0; interface < num_interfaces; interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node,
interface);
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
/* Not a packet interface */
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_SRIO:
case CVMX_HELPER_INTERFACE_MODE_ILK:
/*
* We don't handle the NPI/NPEI/SRIO packet
* engines. The caller must know these are
* idle.
*/
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
/*
* Nothing needed. Once PKO is idle, the
* loopback devices must be idle.
*/
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
/*
* SPI cannot be disabled from Octeon. It is
* the responsibility of the caller to make
* sure SPI is idle before doing shutdown.
*
* Fall through and do the same processing as
* RGMII/GMII.
*/
fallthrough;
case CVMX_HELPER_INTERFACE_MODE_GMII:
case CVMX_HELPER_INTERFACE_MODE_RGMII:
/* Disable outermost RX at the ASX block */
csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
num_ports = cvmx_helper_ports_on_interface(xiface);
if (num_ports > 4)
num_ports = 4;
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
if (!cvmx_helper_is_port_valid(interface, index))
continue;
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
/* Poll the GMX state machine waiting for it to become idle */
csr_wr(CVMX_NPI_DBG_SELECT,
interface * 0x800 + index * 0x100 + 0x880);
if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
data & 7, ==, 0, timeout * 1000000)) {
debug("GMX RX path timeout waiting for idle\n");
result = -1;
}
if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
data & 0xf, ==, 0, timeout * 1000000)) {
debug("GMX TX path timeout waiting for idle\n");
result = -1;
}
}
/* Disable outermost TX at the ASX block */
csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
/* Disable interrupts for interface */
csr_wr(CVMX_ASXX_INT_EN(interface), 0);
csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
case CVMX_HELPER_INTERFACE_MODE_RXAUI:
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_QSGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
num_ports = cvmx_helper_ports_on_interface(xiface);
if (num_ports > 4)
num_ports = 4;
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
if (!cvmx_helper_is_port_valid(interface, index))
continue;
gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
timeout * 1000000)) {
debug("GMX RX path timeout waiting for idle\n");
result = -1;
}
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
timeout * 1000000)) {
debug("GMX TX path timeout waiting for idle\n");
result = -1;
}
/* For SGMII some PHYs require that the PCS
* interface be powered down and reset (i.e.
* Atheros/Qualcomm PHYs).
*/
if (cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_SGMII) {
u64 reg;
reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
/* Power down the interface */
control_reg.u64 = csr_rd(reg);
control_reg.s.pwr_dn = 1;
csr_wr(reg, control_reg.u64);
csr_rd(reg);
}
}
break;
case CVMX_HELPER_INTERFACE_MODE_AGL: {
int port = cvmx_helper_agl_get_port(interface);
union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
agl_gmx_cfg.s.en = 0;
csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
timeout * 1000000)) {
debug("AGL RX path timeout waiting for idle\n");
result = -1;
}
if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
timeout * 1000000)) {
debug("AGL TX path timeout waiting for idle\n");
result = -1;
}
} break;
default:
break;
}
}
/* Step 4: Retrieve all packets from the POW and free them */
while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
cvmx_helper_free_packet_data(work);
cvmx_fpa1_free(work, wqe_pool, 0);
}
/* Step 5 */
cvmx_ipd_disable();
/*
* Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
* have not been reset yet
*/
__cvmx_ipd_free_ptr();
/* Step 7: Free the PKO command buffers and put PKO in reset */
cvmx_pko_shutdown();
/* Step 8: Disable MAC address filtering */
for (interface = 0; interface < num_interfaces; interface++) {
int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
case CVMX_HELPER_INTERFACE_MODE_SRIO:
case CVMX_HELPER_INTERFACE_MODE_ILK:
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
case CVMX_HELPER_INTERFACE_MODE_RXAUI:
case CVMX_HELPER_INTERFACE_MODE_GMII:
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_SPI:
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_QSGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
num_ports = cvmx_helper_ports_on_interface(xiface);
if (num_ports > 4)
num_ports = 4;
for (index = 0; index < num_ports; index++) {
if (!cvmx_helper_is_port_valid(interface, index))
continue;
csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
}
break;
case CVMX_HELPER_INTERFACE_MODE_AGL: {
int port = cvmx_helper_agl_get_port(interface);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
} break;
default:
break;
}
}
/*
* Step 9: Drain all FPA buffers out of pool 0 before we reset
* IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
* sync. We temporarily keep the buffers in the pool0_buffers
* list.
*/
pool0_buffers = NULL;
pool0_buffers_tail = NULL;
while (1) {
struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
if (buffer) {
buffer->next = NULL;
if (!pool0_buffers)
pool0_buffers = buffer;
else
pool0_buffers_tail->next = buffer;
pool0_buffers_tail = buffer;
} else {
break;
}
}
/* Step 10: Reset IPD and PIP */
ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
ipd_ctl_status.s.reset = 1;
csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
/* Make sure IPD has finished reset. */
if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
==, 0, 1000)) {
debug("IPD reset timeout waiting for idle\n");
result = -1;
}
}
/* Step 11: Restore the FPA buffers into pool 0 */
while (pool0_buffers) {
struct cvmx_buffer_list *n = pool0_buffers->next;
cvmx_fpa1_free(pool0_buffers, 0, 0);
pool0_buffers = n;
}
/* Step 12: Release interface structures */
__cvmx_helper_shutdown_interfaces();
return result;
}
/**
* Does core local shutdown of packet io
*
* Return: Zero on success, non-zero on failure
*/
int cvmx_helper_shutdown_packet_io_local(void)
{
/*
* Currently there is nothing to do per core. This may change
* in the future.
*/
return 0;
}
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
@ -2290,50 +1774,6 @@ int cvmx_helper_link_set(int xipd_port, cvmx_helper_link_info_t link_info)
return result;
}
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param xipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* Return: Zero on success, negative on failure.
*/
int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
{
int result = -1;
int xiface = cvmx_helper_get_interface_num(xipd_port);
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
int index = cvmx_helper_get_interface_index_num(xipd_port);
if (index >= cvmx_helper_ports_on_interface(xiface))
return -1;
cvmx_helper_interface_get_mode(xiface);
if (iface_node_ops[xi.node][xi.interface]->loopback)
result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
enable_external);
return result;
}
void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
{
if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
} else {
cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
}
}
void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
{
s64 paddr;
@ -2346,266 +1786,3 @@ void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
}
return cvmx_phys_to_ptr(paddr);
}
void cvmx_helper_mem_free(void *buffer, uint64_t size)
{
__cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
}
int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
{
int i;
memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT; /* One Pool per BGX:LMAC.*/
qos_cfg->pktbuf_size = 2048; /* Fit WQE + MTU in one buffer.*/
qos_cfg->aura_size = 1024; /* 1K buffers typically enough for any application.*/
qos_cfg->pko_pfc_en = 1; /* Enable PKO layout for PFC feature. */
qos_cfg->vlan_num = 1; /* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
qos_cfg->qpg_base = -1; /* QPG Table index is undefined.*/
qos_cfg->p_time = 0x60; /* PAUSE packets time window.*/
qos_cfg->p_interval = 0x10; /* PAUSE packets interval.*/
for (i = 0; i < CVMX_QOS_NUM; i++) {
qos_cfg->groups[i] = i; /* SSO Groups = 0...7 */
qos_cfg->group_prio[i] = i; /* SSO Group priority = QOS. */
qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
qos_cfg->red_thresh[i] = 90; /* 90% of the Aura size.*/
qos_cfg->bp_thresh[i] = 70; /* 70% of the Aura size.*/
}
return 0;
}
int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
{
cvmx_user_static_pko_queue_config_t pkocfg;
cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
int xiface = cvmx_helper_get_interface_num(xipdport);
cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
/* Configure PKO port for PFC SQ layout: */
cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
return 0;
}
int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
{
const int channles = CVMX_QOS_NUM;
int bufsize = qos_cfg->pktbuf_size;
int aura_size = qos_cfg->aura_size;
cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
int node = xp.node;
int ipdport = xp.port;
int port = cvmx_helper_get_interface_index_num(xp.port);
int xiface = cvmx_helper_get_interface_num(xipdport);
cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
cvmx_fpa3_pool_t gpool;
cvmx_fpa3_gaura_t gaura;
cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
struct cvmx_pki_qpg_config qpgcfg;
struct cvmx_pki_style_config stcfg, stcfg_dflt;
struct cvmx_pki_pkind_config pkcfg;
int chan, bpid, group, qpg;
int bpen, reden, dropen, passthr, dropthr, bpthr;
int nbufs, pkind, style;
char name[32];
if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
/* Allocate and setup packet Pool: */
nbufs = aura_size * channles;
sprintf(name, "QOS.P%d", ipdport);
gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
if (!__cvmx_fpa3_pool_valid(gpool)) {
printf("%s: Failed to setup FPA Pool\n", __func__);
return -1;
}
for (chan = 0; chan < channles; chan++)
qos_cfg->gpools[chan] = gpool;
} else {
printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
return -1;
}
/* Allocate QPG entries: */
qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
if (qos_cfg->qpg_base < 0) {
printf("%s: Failed to allocate QPG entry\n", __func__);
return -1;
}
for (chan = 0; chan < channles; chan++) {
/* Allocate and setup Aura, setup BP threshold: */
gpool = qos_cfg->gpools[chan];
sprintf(name, "QOS.A%d", ipdport + chan);
gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
if (!__cvmx_fpa3_aura_valid(gaura)) {
printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
return -1;
}
qos_cfg->gauras[chan] = gaura;
bpen = 1;
reden = 1;
dropen = 1;
dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
/* Allocate BPID, link Aura and Channel using BPID: */
bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
if (bpid < 0) {
printf("%s: Failed to allocate BPID for channel %d\n",
__func__, chan);
return -1;
}
qos_cfg->bpids[chan] = bpid;
cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
/* Setup QPG entries: */
group = qos_cfg->groups[chan];
qpg = qos_cfg->qpg_base + chan;
cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
qpgcfg.port_add = chan;
qpgcfg.aura_num = gaura.laura;
qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
}
/* Allocate and setup STYLE: */
cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
style = cvmx_pki_style_alloc(node, -1 /*auto*/);
cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
stcfg.tag_cfg = stcfg_dflt.tag_cfg;
stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
stcfg.parm_cfg.qpg_port_msb = 0;
stcfg.parm_cfg.qpg_port_sh = 0;
stcfg.parm_cfg.qpg_dis_grptag = 1;
stcfg.parm_cfg.fcs_strip = 1;
stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
stcfg.parm_cfg.force_drop = 0;
stcfg.parm_cfg.nodrop = 0;
stcfg.parm_cfg.rawdrp = 0;
stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
/* Setup PKIND: */
pkind = cvmx_helper_get_pknd(xiface, port);
cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
pkcfg.initial_style = style;
pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
/* Setup parameters of the QOS packet and enable QOS flow-control: */
cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
qos_cfg->p_interval);
cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
/* Enable PKI channel backpressure in the BGX: */
ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
ovrbp.s.en &= ~(1 << port);
ovrbp.s.ign_fifo_bp &= ~(1 << port);
csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
return 0;
}
int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
{
const int channels = CVMX_QOS_NUM;
cvmx_sso_grpx_pri_t grppri;
int chan, qos, group;
cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
int node = xp.node;
for (chan = 0; chan < channels; chan++) {
qos = cvmx_helper_qos2prio(chan);
group = qos_cfg->groups[qos];
grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
grppri.s.pri = qos_cfg->group_prio[chan];
csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
}
return 0;
}
int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
{
int n, dpichans;
if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
return -1;
}
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
dpichans = 64;
else
dpichans = 128;
if (chan >= 0 && chan < 64)
n = snprintf(namebuf, buflen, "LBK%d", chan);
else if (chan >= 0x100 && chan < (0x100 + dpichans))
n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
else if (chan == 0x200)
n = snprintf(namebuf, buflen, "NQM");
else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
(chan - 0x240) & 0x1);
else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
(chan - 0x400) & 0xFF);
else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
else
n = snprintf(namebuf, buflen, "--");
return n;
}
#ifdef CVMX_DUMP_DIAGNOSTICS
void cvmx_helper_dump_for_diagnostics(int node)
{
if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
printf("Diagnostics are not implemented for this model\n");
return;
}
#ifdef CVMX_DUMP_GSER
{
int qlm, num_qlms;
num_qlms = cvmx_qlm_get_num();
for (qlm = 0; qlm < num_qlms; qlm++) {
cvmx_dump_gser_config_node(node, qlm);
cvmx_dump_gser_status_node(node, qlm);
}
}
#endif
#ifdef CVMX_DUMP_BGX
{
int bgx;
for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
cvmx_dump_bgx_config_node(node, bgx);
cvmx_dump_bgx_status_node(node, bgx);
}
}
#endif
#ifdef CVMX_DUMP_PKI
cvmx_pki_config_dump(node);
cvmx_pki_stats_dump(node);
#endif
#ifdef CVMX_DUMP_PKO
cvmx_helper_pko3_config_dump(node);
cvmx_helper_pko3_stats_dump(node);
#endif
#ifdef CVMX_DUMO_SSO
cvmx_sso_config_dump(node);
#endif
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,149 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* IPD Support.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
cvmx_ipd_config_t cvmx_ipd_cfg = {
.first_mbuf_skip = 184,
.ipd_enable = 1,
.cache_mode = CVMX_IPD_OPC_MODE_STT,
.packet_pool = { 0, 2048, 0 },
.wqe_pool = { 1, 128, 0 },
.port_config = { CVMX_PIP_PORT_CFG_MODE_SKIPL2,
CVMX_POW_TAG_TYPE_ORDERED, CVMX_PIP_TAG_MODE_TUPLE,
.tag_fields = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }
};
#define IPD_RED_AVG_DLY 1000
#define IPD_RED_PRB_DLY 1000
void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip,
u64 not_first_mbuff_skip, u64 first_back, u64 second_back,
u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode,
u64 back_pres_enable_flag)
{
cvmx_ipd_1st_mbuff_skip_t first_skip;
cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
cvmx_ipd_packet_mbuff_size_t size;
cvmx_ipd_1st_next_ptr_back_t first_back_struct;
cvmx_ipd_second_next_ptr_back_t second_back_struct;
cvmx_ipd_wqe_fpa_queue_t wqe_pool;
cvmx_ipd_ctl_status_t ipd_ctl_reg;
/* Enforce 1st skip minimum if WQE shares the buffer with packet */
if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
union cvmx_ipd_ctl_status ctl_status;
ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
if (ctl_status.s.no_wptr != 0 && first_mbuff_skip < 16)
first_mbuff_skip = 16;
}
first_skip.u64 = 0;
first_skip.s.skip_sz = first_mbuff_skip;
csr_wr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
not_first_skip.u64 = 0;
not_first_skip.s.skip_sz = not_first_mbuff_skip;
csr_wr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
size.u64 = 0;
size.s.mb_size = mbuff_size;
csr_wr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
first_back_struct.u64 = 0;
first_back_struct.s.back = first_back;
csr_wr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
second_back_struct.u64 = 0;
second_back_struct.s.back = second_back;
csr_wr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
wqe_pool.u64 = 0;
wqe_pool.s.wqe_pool = wqe_fpa_pool;
csr_wr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
ipd_ctl_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
ipd_ctl_reg.s.opc_mode = cache_mode;
ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
/* Note: the example RED code is below */
}
/**
* Enable IPD
*/
void cvmx_ipd_enable(void)
{
cvmx_ipd_ctl_status_t ipd_reg;
ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
/*
* busy-waiting for rst_done in o68
*/
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
while (ipd_reg.s.rst_done != 0)
ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
if (ipd_reg.s.ipd_en)
debug("Warning: Enabling IPD when IPD already enabled.\n");
ipd_reg.s.ipd_en = 1;
if (cvmx_ipd_cfg.enable_len_M8_fix)
ipd_reg.s.len_m8 = 1;
csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
}

View file

@ -0,0 +1,285 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* PKI Support.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
static s32 cvmx_pki_style_refcnt[CVMX_MAX_NODES][CVMX_PKI_NUM_INTERNAL_STYLE];
/**
* This function allocates/reserves a style from pool of global styles per node.
* @param node node to allocate style from.
* @param style style to allocate, if -1 it will be allocated
* first available style from style resource. If index is positive
* number and in range, it will try to allocate specified style.
* @return style number on success,
* -1 on alloc failure.
* -2 on resource already reserved.
*/
int cvmx_pki_style_alloc(int node, int style)
{
int rs;
if (cvmx_create_global_resource_range(CVMX_GR_TAG_STYLE(node),
CVMX_PKI_NUM_INTERNAL_STYLE)) {
printf("ERROR: Failed to create styles global resource\n");
return -1;
}
if (style >= 0) {
/* Reserving specific style, use refcnt for sharing */
rs = cvmx_atomic_fetch_and_add32(
&cvmx_pki_style_refcnt[node][style], 1);
if (rs > 0)
return CVMX_RESOURCE_ALREADY_RESERVED;
rs = cvmx_reserve_global_resource_range(CVMX_GR_TAG_STYLE(node),
style, style, 1);
if (rs == -1) {
/* This means the style is taken by another app */
printf("ERROR: style %d is reserved by another app\n",
style);
cvmx_atomic_fetch_and_add32(
&cvmx_pki_style_refcnt[node][style], -1);
return CVMX_RESOURCE_ALLOC_FAILED;
}
} else {
/* Allocate first available style */
rs = cvmx_allocate_global_resource_range(
CVMX_GR_TAG_STYLE(node), style, 1, 1);
if (rs < 0) {
printf("ERROR: Failed to allocate style, none available\n");
return CVMX_RESOURCE_ALLOC_FAILED;
}
style = rs;
/* Increment refcnt for newly created style */
cvmx_atomic_fetch_and_add32(&cvmx_pki_style_refcnt[node][style],
1);
}
return style;
}
/**
* This function frees a style from pool of global styles per node.
* @param node node to free style from.
* @param style style to free
* @return 0 on success, -1 on failure or
* if the style is shared a positive count of remaining users for this style.
*/
int cvmx_pki_style_free(int node, int style)
{
int rs;
rs = cvmx_atomic_fetch_and_add32(&cvmx_pki_style_refcnt[node][style],
-1);
if (rs > 1)
return rs - 1;
if (cvmx_free_global_resource_range_with_base(CVMX_GR_TAG_STYLE(node),
style, 1) == -1) {
printf("ERROR Failed to release style %d\n", (int)style);
return -1;
}
return 0;
}
/**
* This function allocates/reserves a cluster group from per node
cluster group resources.
* @param node node to allocate cluster group from.
@param cl_grp cluster group to allocate/reserve, if -1 ,
* allocate any available cluster group.
* @return cluster group number
* -1 on alloc failure.
* -2 on resource already reserved.
*/
int cvmx_pki_cluster_grp_alloc(int node, int cl_grp)
{
int rs;
if (node >= CVMX_MAX_NODES) {
printf("ERROR: Invalid node number %d\n", node);
return -1;
}
if (cvmx_create_global_resource_range(CVMX_GR_TAG_CLUSTER_GRP(node),
CVMX_PKI_NUM_CLUSTER_GROUP)) {
printf("ERROR: Failed to create Cluster group global resource\n");
return -1;
}
if (cl_grp >= 0) {
rs = cvmx_reserve_global_resource_range(
CVMX_GR_TAG_CLUSTER_GRP(node), 0, cl_grp, 1);
if (rs == -1) {
debug("INFO: cl_grp %d is already reserved\n",
(int)cl_grp);
return CVMX_RESOURCE_ALREADY_RESERVED;
}
} else {
rs = cvmx_allocate_global_resource_range(
CVMX_GR_TAG_CLUSTER_GRP(node), 0, 1, 1);
if (rs == -1) {
debug("Warning: Failed to alloc cluster grp\n");
return CVMX_RESOURCE_ALLOC_FAILED;
}
}
cl_grp = rs;
return cl_grp;
}
/**
* This function allocates/reserves a pcam entry from node
* @param node node to allocate pcam entry from.
* @param index index of pacm entry (0-191), if -1 ,
* allocate any available pcam entry.
* @param bank pcam bank where to allocate/reserve pcan entry from
* @param cluster_mask mask of clusters from which pcam entry is needed.
* @return pcam entry of -1 on failure
*/
int cvmx_pki_pcam_entry_alloc(int node, int index, int bank, u64 cluster_mask)
{
int rs = 0;
unsigned int cluster;
for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
if ((cluster_mask & (1 << cluster)) == 0)
continue;
rs = cvmx_create_global_resource_range(
CVMX_GR_TAG_PCAM(node, cluster, bank),
CVMX_PKI_TOTAL_PCAM_ENTRY);
if (rs != 0) {
printf("ERROR: Failed to create pki pcam global resource\n");
return -1;
}
if (index >= 0)
rs = cvmx_reserve_global_resource_range(
CVMX_GR_TAG_PCAM(node, cluster, bank), cluster,
index, 1);
else
rs = cvmx_allocate_global_resource_range(
CVMX_GR_TAG_PCAM(node, cluster, bank), cluster,
1, 1);
if (rs == -1) {
printf("ERROR: PCAM :index %d not available in cluster %d bank %d",
(int)index, (int)cluster, bank);
return -1;
}
} /* for cluster */
index = rs;
/* implement cluster handle for pass2, for now assume
all clusters will have same base index*/
return index;
}
/**
* This function allocates/reserves QPG table entries per node.
* @param node node number.
* @param base_offset base_offset in qpg table. If -1, first available
* qpg base_offset will be allocated. If base_offset is positive
* number and in range, it will try to allocate specified base_offset.
* @param count number of consecutive qpg entries to allocate. They will be consecutive
* from base offset.
* @return qpg table base offset number on success
* -1 on alloc failure.
* -2 on resource already reserved.
*/
int cvmx_pki_qpg_entry_alloc(int node, int base_offset, int count)
{
int rs;
if (cvmx_create_global_resource_range(CVMX_GR_TAG_QPG_ENTRY(node),
CVMX_PKI_NUM_QPG_ENTRY)) {
printf("ERROR: Failed to create qpg_entry global resource\n");
return -1;
}
if (base_offset >= 0) {
rs = cvmx_reserve_global_resource_range(
CVMX_GR_TAG_QPG_ENTRY(node), base_offset, base_offset,
count);
if (rs == -1) {
debug("INFO: qpg entry %d is already reserved\n",
(int)base_offset);
return CVMX_RESOURCE_ALREADY_RESERVED;
}
} else {
rs = cvmx_allocate_global_resource_range(
CVMX_GR_TAG_QPG_ENTRY(node), base_offset, count, 1);
if (rs == -1) {
printf("ERROR: Failed to allocate qpg entry\n");
return CVMX_RESOURCE_ALLOC_FAILED;
}
}
base_offset = rs;
return base_offset;
}
/**
* This function frees QPG table entries per node.
* @param node node number.
* @param base_offset base_offset in qpg table. If -1, first available
* qpg base_offset will be allocated. If base_offset is positive
* number and in range, it will try to allocate specified base_offset.
* @param count number of consecutive qpg entries to allocate. They will be consecutive
* from base offset.
* @return qpg table base offset number on success, -1 on failure.
*/
int cvmx_pki_qpg_entry_free(int node, int base_offset, int count)
{
if (cvmx_free_global_resource_range_with_base(
CVMX_GR_TAG_QPG_ENTRY(node), base_offset, count) == -1) {
printf("ERROR Failed to release qpg offset %d",
(int)base_offset);
return -1;
}
return 0;
}
int cvmx_pki_mtag_idx_alloc(int node, int idx)
{
if (cvmx_create_global_resource_range(CVMX_GR_TAG_MTAG_IDX(node),
CVMX_PKI_NUM_MTAG_IDX)) {
printf("ERROR: Failed to create MTAG-IDX global resource\n");
return -1;
}
if (idx >= 0) {
idx = cvmx_reserve_global_resource_range(
CVMX_GR_TAG_MTAG_IDX(node), idx, idx, 1);
if (idx == -1) {
debug("INFO: MTAG index %d is already reserved\n",
(int)idx);
return CVMX_RESOURCE_ALREADY_RESERVED;
}
} else {
idx = cvmx_allocate_global_resource_range(
CVMX_GR_TAG_MTAG_IDX(node), idx, 1, 1);
if (idx == -1) {
printf("ERROR: Failed to allocate MTAG index\n");
return CVMX_RESOURCE_ALLOC_FAILED;
}
}
return idx;
}

View file

@ -0,0 +1,910 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* PKI Support.
*/
#include <time.h>
#include <log.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pki-cluster.h>
#include <mach/cvmx-pki-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
/**
* This function enables PKI
*
* @param node Node to enable PKI.
*/
void cvmx_pki_enable(int node)
{
cvmx_pki_sft_rst_t sft_rst;
cvmx_pki_buf_ctl_t buf_ctl;
sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
while (sft_rst.s.busy != 0)
sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
if (buf_ctl.s.pki_en)
debug("Warning: Enabling PKI when PKI already enabled.\n");
buf_ctl.s.pki_en = 1;
csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
}
/**
* This function sets the clusters in PKI.
*
* @param node Node to set clusters.
*/
int cvmx_pki_setup_clusters(int node)
{
int i;
for (i = 0; i < cvmx_pki_cluster_code_length; i++)
csr_wr_node(node, CVMX_PKI_IMEMX(i),
cvmx_pki_cluster_code_default[i]);
return 0;
}
/**
* This function reads global configuration of PKI block.
*
* @param node Node number.
* @param gbl_cfg Pointer to struct to read global configuration.
*/
void cvmx_pki_read_global_config(int node,
struct cvmx_pki_global_config *gbl_cfg)
{
cvmx_pki_stat_ctl_t stat_ctl;
cvmx_pki_icgx_cfg_t icg_cfg;
cvmx_pki_gbl_pen_t gbl_pen;
cvmx_pki_tag_secret_t tag_secret;
cvmx_pki_frm_len_chkx_t frm_len_chk;
cvmx_pki_buf_ctl_t buf_ctl;
unsigned int cl_grp;
int id;
stat_ctl.u64 = csr_rd_node(node, CVMX_PKI_STAT_CTL);
gbl_cfg->stat_mode = stat_ctl.s.mode;
for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++) {
icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
gbl_cfg->cluster_mask[cl_grp] = icg_cfg.s.clusters;
}
gbl_pen.u64 = csr_rd_node(node, CVMX_PKI_GBL_PEN);
gbl_cfg->gbl_pen.virt_pen = gbl_pen.s.virt_pen;
gbl_cfg->gbl_pen.clg_pen = gbl_pen.s.clg_pen;
gbl_cfg->gbl_pen.cl2_pen = gbl_pen.s.cl2_pen;
gbl_cfg->gbl_pen.l4_pen = gbl_pen.s.l4_pen;
gbl_cfg->gbl_pen.il3_pen = gbl_pen.s.il3_pen;
gbl_cfg->gbl_pen.l3_pen = gbl_pen.s.l3_pen;
gbl_cfg->gbl_pen.mpls_pen = gbl_pen.s.mpls_pen;
gbl_cfg->gbl_pen.fulc_pen = gbl_pen.s.fulc_pen;
gbl_cfg->gbl_pen.dsa_pen = gbl_pen.s.dsa_pen;
gbl_cfg->gbl_pen.hg_pen = gbl_pen.s.hg_pen;
tag_secret.u64 = csr_rd_node(node, CVMX_PKI_TAG_SECRET);
gbl_cfg->tag_secret.dst6 = tag_secret.s.dst6;
gbl_cfg->tag_secret.src6 = tag_secret.s.src6;
gbl_cfg->tag_secret.dst = tag_secret.s.dst;
gbl_cfg->tag_secret.src = tag_secret.s.src;
for (id = 0; id < CVMX_PKI_NUM_FRAME_CHECK; id++) {
frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
gbl_cfg->frm_len[id].maxlen = frm_len_chk.s.maxlen;
gbl_cfg->frm_len[id].minlen = frm_len_chk.s.minlen;
}
buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
gbl_cfg->fpa_wait = buf_ctl.s.fpa_wait;
}
/**
* This function writes max and min frame lengths to hardware which can be used
* to check the size of frame arrived.There are 2 possible combination which are
* indicated by id field.
*
* @param node Node number.
* @param id Choose which frame len register to write to
* @param len_chk Struct containing byte count for max-sized/min-sized frame check.
*/
static void cvmx_pki_write_frame_len(int node, int id,
struct cvmx_pki_frame_len len_chk)
{
cvmx_pki_frm_len_chkx_t frm_len_chk;
frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
frm_len_chk.s.maxlen = len_chk.maxlen;
frm_len_chk.s.minlen = len_chk.minlen;
csr_wr_node(node, CVMX_PKI_FRM_LEN_CHKX(id), frm_len_chk.u64);
}
/**
* This function writes global configuration of PKI into hw.
*
* @param node Node number.
* @param gbl_cfg Pointer to struct to global configuration.
*/
void cvmx_pki_write_global_config(int node,
struct cvmx_pki_global_config *gbl_cfg)
{
cvmx_pki_stat_ctl_t stat_ctl;
cvmx_pki_buf_ctl_t buf_ctl;
unsigned int cl_grp;
for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++)
cvmx_pki_attach_cluster_to_group(node, cl_grp,
gbl_cfg->cluster_mask[cl_grp]);
stat_ctl.u64 = 0;
stat_ctl.s.mode = gbl_cfg->stat_mode;
csr_wr_node(node, CVMX_PKI_STAT_CTL, stat_ctl.u64);
buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
buf_ctl.s.fpa_wait = gbl_cfg->fpa_wait;
csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
cvmx_pki_write_global_parse(node, gbl_cfg->gbl_pen);
cvmx_pki_write_tag_secret(node, gbl_cfg->tag_secret);
cvmx_pki_write_frame_len(node, 0, gbl_cfg->frm_len[0]);
cvmx_pki_write_frame_len(node, 1, gbl_cfg->frm_len[1]);
}
/**
* This function reads per pkind parameters in hardware which defines how
* the incoming packet is processed.
*
* @param node Node number.
* @param pkind PKI supports a large number of incoming interfaces and packets
* arriving on different interfaces or channels may want to be processed
* differently. PKI uses the pkind to determine how the incoming packet
* is processed.
* @param pkind_cfg Pointer to struct conatining pkind configuration read
* from the hardware.
*/
int cvmx_pki_read_pkind_config(int node, int pkind,
struct cvmx_pki_pkind_config *pkind_cfg)
{
int cluster = 0;
u64 cl_mask;
cvmx_pki_pkindx_icgsel_t icgsel;
cvmx_pki_clx_pkindx_style_t pstyle;
cvmx_pki_icgx_cfg_t icg_cfg;
cvmx_pki_clx_pkindx_cfg_t pcfg;
cvmx_pki_clx_pkindx_skip_t skip;
cvmx_pki_clx_pkindx_l2_custom_t l2cust;
cvmx_pki_clx_pkindx_lg_custom_t lgcust;
icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(icgsel.s.icg));
pkind_cfg->cluster_grp = (uint8_t)icgsel.s.icg;
cl_mask = (uint64_t)icg_cfg.s.clusters;
cluster = __builtin_ffsll(cl_mask) - 1;
pstyle.u64 =
csr_rd_node(node, CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
pkind_cfg->initial_parse_mode = pstyle.s.pm;
pkind_cfg->initial_style = pstyle.s.style;
pcfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
pkind_cfg->fcs_pres = pcfg.s.fcs_pres;
pkind_cfg->parse_en.inst_hdr = pcfg.s.inst_hdr;
pkind_cfg->parse_en.mpls_en = pcfg.s.mpls_en;
pkind_cfg->parse_en.lg_custom = pcfg.s.lg_custom;
pkind_cfg->parse_en.fulc_en = pcfg.s.fulc_en;
pkind_cfg->parse_en.dsa_en = pcfg.s.dsa_en;
pkind_cfg->parse_en.hg2_en = pcfg.s.hg2_en;
pkind_cfg->parse_en.hg_en = pcfg.s.hg_en;
skip.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
pkind_cfg->fcs_skip = skip.s.fcs_skip;
pkind_cfg->inst_skip = skip.s.inst_skip;
l2cust.u64 = csr_rd_node(node,
CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
pkind_cfg->l2_scan_offset = l2cust.s.offset;
lgcust.u64 = csr_rd_node(node,
CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
pkind_cfg->lg_scan_offset = lgcust.s.offset;
return 0;
}
/**
* This function writes per pkind parameters in hardware which defines how
* the incoming packet is processed.
*
* @param node Node number.
* @param pkind PKI supports a large number of incoming interfaces and packets
* arriving on different interfaces or channels may want to be processed
* differently. PKI uses the pkind to determine how the incoming
* packet is processed.
* @param pkind_cfg Pointer to struct conatining pkind configuration need
* to be written in the hardware.
*/
int cvmx_pki_write_pkind_config(int node, int pkind,
struct cvmx_pki_pkind_config *pkind_cfg)
{
unsigned int cluster = 0;
u64 cluster_mask;
cvmx_pki_pkindx_icgsel_t icgsel;
cvmx_pki_clx_pkindx_style_t pstyle;
cvmx_pki_icgx_cfg_t icg_cfg;
cvmx_pki_clx_pkindx_cfg_t pcfg;
cvmx_pki_clx_pkindx_skip_t skip;
cvmx_pki_clx_pkindx_l2_custom_t l2cust;
cvmx_pki_clx_pkindx_lg_custom_t lgcust;
if (pkind >= CVMX_PKI_NUM_PKIND ||
pkind_cfg->cluster_grp >= CVMX_PKI_NUM_CLUSTER_GROUP ||
pkind_cfg->initial_style >= CVMX_PKI_NUM_FINAL_STYLE) {
debug("ERROR: Configuring PKIND pkind = %d cluster_group = %d style = %d\n",
pkind, pkind_cfg->cluster_grp, pkind_cfg->initial_style);
return -1;
}
icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
icgsel.s.icg = pkind_cfg->cluster_grp;
csr_wr_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind), icgsel.u64);
icg_cfg.u64 =
csr_rd_node(node, CVMX_PKI_ICGX_CFG(pkind_cfg->cluster_grp));
cluster_mask = (uint64_t)icg_cfg.s.clusters;
while (cluster < CVMX_PKI_NUM_CLUSTER) {
if (cluster_mask & (0x01L << cluster)) {
pstyle.u64 = csr_rd_node(
node,
CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
pstyle.s.pm = pkind_cfg->initial_parse_mode;
pstyle.s.style = pkind_cfg->initial_style;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster),
pstyle.u64);
pcfg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
pcfg.s.fcs_pres = pkind_cfg->fcs_pres;
pcfg.s.inst_hdr = pkind_cfg->parse_en.inst_hdr;
pcfg.s.mpls_en = pkind_cfg->parse_en.mpls_en;
pcfg.s.lg_custom = pkind_cfg->parse_en.lg_custom;
pcfg.s.fulc_en = pkind_cfg->parse_en.fulc_en;
pcfg.s.dsa_en = pkind_cfg->parse_en.dsa_en;
pcfg.s.hg2_en = pkind_cfg->parse_en.hg2_en;
pcfg.s.hg_en = pkind_cfg->parse_en.hg_en;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster),
pcfg.u64);
skip.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
skip.s.fcs_skip = pkind_cfg->fcs_skip;
skip.s.inst_skip = pkind_cfg->inst_skip;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster),
skip.u64);
l2cust.u64 = csr_rd_node(
node,
CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
l2cust.s.offset = pkind_cfg->l2_scan_offset;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind,
cluster),
l2cust.u64);
lgcust.u64 = csr_rd_node(
node,
CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
lgcust.s.offset = pkind_cfg->lg_scan_offset;
csr_wr_node(node,
CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind,
cluster),
lgcust.u64);
}
cluster++;
}
return 0;
}
/**
* This function reads parameters associated with tag configuration in hardware.
* Only first cluster in the group is used.
*
* @param node Node number.
* @param style Style to configure tag for.
* @param cluster_mask Mask of clusters to configure the style for.
* @param tag_cfg Pointer to tag configuration struct.
*/
void cvmx_pki_read_tag_config(int node, int style, uint64_t cluster_mask,
struct cvmx_pki_style_tag_cfg *tag_cfg)
{
int mask, tag_idx, index;
cvmx_pki_clx_stylex_cfg2_t style_cfg2;
cvmx_pki_clx_stylex_alg_t style_alg;
cvmx_pki_stylex_tag_sel_t tag_sel;
cvmx_pki_tag_incx_ctl_t tag_ctl;
cvmx_pki_tag_incx_mask_t tag_mask;
int cluster = __builtin_ffsll(cluster_mask) - 1;
style_cfg2.u64 =
csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
style_alg.u64 =
csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
/* 7-Tuple Tag: */
tag_cfg->tag_fields.layer_g_src = style_cfg2.s.tag_src_lg;
tag_cfg->tag_fields.layer_f_src = style_cfg2.s.tag_src_lf;
tag_cfg->tag_fields.layer_e_src = style_cfg2.s.tag_src_le;
tag_cfg->tag_fields.layer_d_src = style_cfg2.s.tag_src_ld;
tag_cfg->tag_fields.layer_c_src = style_cfg2.s.tag_src_lc;
tag_cfg->tag_fields.layer_b_src = style_cfg2.s.tag_src_lb;
tag_cfg->tag_fields.layer_g_dst = style_cfg2.s.tag_dst_lg;
tag_cfg->tag_fields.layer_f_dst = style_cfg2.s.tag_dst_lf;
tag_cfg->tag_fields.layer_e_dst = style_cfg2.s.tag_dst_le;
tag_cfg->tag_fields.layer_d_dst = style_cfg2.s.tag_dst_ld;
tag_cfg->tag_fields.layer_c_dst = style_cfg2.s.tag_dst_lc;
tag_cfg->tag_fields.layer_b_dst = style_cfg2.s.tag_dst_lb;
tag_cfg->tag_fields.tag_vni = style_alg.s.tag_vni;
tag_cfg->tag_fields.tag_gtp = style_alg.s.tag_gtp;
tag_cfg->tag_fields.tag_spi = style_alg.s.tag_spi;
tag_cfg->tag_fields.tag_sync = style_alg.s.tag_syn;
tag_cfg->tag_fields.ip_prot_nexthdr = style_alg.s.tag_pctl;
tag_cfg->tag_fields.second_vlan = style_alg.s.tag_vs1;
tag_cfg->tag_fields.first_vlan = style_alg.s.tag_vs0;
tag_cfg->tag_fields.mpls_label = style_alg.s.tag_mpls0;
tag_cfg->tag_fields.input_port = style_alg.s.tag_prt;
/* Custom-Mask Tag: */
tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
for (mask = 0; mask < 4; mask++) {
tag_cfg->mask_tag[mask].enable =
(style_cfg2.s.tag_inc & (1 << mask)) != 0;
switch (mask) {
case 0:
tag_idx = tag_sel.s.tag_idx0;
break;
case 1:
tag_idx = tag_sel.s.tag_idx1;
break;
case 2:
tag_idx = tag_sel.s.tag_idx2;
break;
case 3:
tag_idx = tag_sel.s.tag_idx3;
break;
}
index = tag_idx * 4 + mask;
tag_mask.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_MASK(index));
tag_cfg->mask_tag[mask].val = tag_mask.s.en;
tag_ctl.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_CTL(index));
tag_cfg->mask_tag[mask].base = tag_ctl.s.ptr_sel;
tag_cfg->mask_tag[mask].offset = tag_ctl.s.offset;
}
}
/**
* This function writes/configures parameters associated with tag configuration in
* hardware. In Custom-Mask Tagging, all four masks use the same base index
* to access Tag Control and Tag Mask registers.
*
* @param node Node number.
* @param style Style to configure tag for.
* @param cluster_mask Mask of clusters to configure the style for.
* @param tag_cfg Pointer to taf configuration struct.
*/
void cvmx_pki_write_tag_config(int node, int style, uint64_t cluster_mask,
struct cvmx_pki_style_tag_cfg *tag_cfg)
{
int mask, index, tag_idx, mtag_en = 0;
unsigned int cluster = 0;
cvmx_pki_clx_stylex_cfg2_t scfg2;
cvmx_pki_clx_stylex_alg_t style_alg;
cvmx_pki_tag_incx_ctl_t tag_ctl;
cvmx_pki_tag_incx_mask_t tag_mask;
cvmx_pki_stylex_tag_sel_t tag_sel;
while (cluster < CVMX_PKI_NUM_CLUSTER) {
if (cluster_mask & (0x01L << cluster)) {
/* 7-Tuple Tag: */
scfg2.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
scfg2.s.tag_src_lg = tag_cfg->tag_fields.layer_g_src;
scfg2.s.tag_src_lf = tag_cfg->tag_fields.layer_f_src;
scfg2.s.tag_src_le = tag_cfg->tag_fields.layer_e_src;
scfg2.s.tag_src_ld = tag_cfg->tag_fields.layer_d_src;
scfg2.s.tag_src_lc = tag_cfg->tag_fields.layer_c_src;
scfg2.s.tag_src_lb = tag_cfg->tag_fields.layer_b_src;
scfg2.s.tag_dst_lg = tag_cfg->tag_fields.layer_g_dst;
scfg2.s.tag_dst_lf = tag_cfg->tag_fields.layer_f_dst;
scfg2.s.tag_dst_le = tag_cfg->tag_fields.layer_e_dst;
scfg2.s.tag_dst_ld = tag_cfg->tag_fields.layer_d_dst;
scfg2.s.tag_dst_lc = tag_cfg->tag_fields.layer_c_dst;
scfg2.s.tag_dst_lb = tag_cfg->tag_fields.layer_b_dst;
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
scfg2.u64);
style_alg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
style_alg.s.tag_vni = tag_cfg->tag_fields.tag_vni;
style_alg.s.tag_gtp = tag_cfg->tag_fields.tag_gtp;
style_alg.s.tag_spi = tag_cfg->tag_fields.tag_spi;
style_alg.s.tag_syn = tag_cfg->tag_fields.tag_sync;
style_alg.s.tag_pctl =
tag_cfg->tag_fields.ip_prot_nexthdr;
style_alg.s.tag_vs1 = tag_cfg->tag_fields.second_vlan;
style_alg.s.tag_vs0 = tag_cfg->tag_fields.first_vlan;
style_alg.s.tag_mpls0 = tag_cfg->tag_fields.mpls_label;
style_alg.s.tag_prt = tag_cfg->tag_fields.input_port;
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
style_alg.u64);
/* Custom-Mask Tag (Part 1): */
for (mask = 0; mask < 4; mask++) {
if (tag_cfg->mask_tag[mask].enable)
mtag_en++;
}
if (mtag_en) {
scfg2.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG2(
style, cluster));
scfg2.s.tag_inc = 0;
for (mask = 0; mask < 4; mask++) {
if (tag_cfg->mask_tag[mask].enable)
scfg2.s.tag_inc |= 1 << mask;
}
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_CFG2(style,
cluster),
scfg2.u64);
}
}
cluster++;
}
/* Custom-Mask Tag (Part 2): */
if (mtag_en) {
tag_idx = cvmx_pki_mtag_idx_alloc(node, -1);
if (tag_idx < 0)
return;
tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
for (mask = 0; mask < 4; mask++) {
if (tag_cfg->mask_tag[mask].enable) {
switch (mask) {
case 0:
tag_sel.s.tag_idx0 = tag_idx;
break;
case 1:
tag_sel.s.tag_idx1 = tag_idx;
break;
case 2:
tag_sel.s.tag_idx2 = tag_idx;
break;
case 3:
tag_sel.s.tag_idx3 = tag_idx;
break;
}
index = tag_idx * 4 + mask;
tag_mask.u64 = csr_rd_node(
node, CVMX_PKI_TAG_INCX_MASK(index));
tag_mask.s.en = tag_cfg->mask_tag[mask].val;
csr_wr_node(node, CVMX_PKI_TAG_INCX_MASK(index),
tag_mask.u64);
tag_ctl.u64 = csr_rd_node(
node, CVMX_PKI_TAG_INCX_CTL(index));
tag_ctl.s.ptr_sel =
tag_cfg->mask_tag[mask].base;
tag_ctl.s.offset =
tag_cfg->mask_tag[mask].offset;
csr_wr_node(node, CVMX_PKI_TAG_INCX_CTL(index),
tag_ctl.u64);
}
}
csr_wr_node(node, CVMX_PKI_STYLEX_TAG_SEL(style), tag_sel.u64);
}
}
/**
* This function reads parameters associated with style in hardware.
*
* @param node Node number.
* @param style Style to read from.
* @param cluster_mask Mask of clusters style belongs to.
* @param style_cfg Pointer to style config struct.
*/
void cvmx_pki_read_style_config(int node, int style, uint64_t cluster_mask,
struct cvmx_pki_style_config *style_cfg)
{
cvmx_pki_clx_stylex_cfg_t scfg;
cvmx_pki_clx_stylex_cfg2_t scfg2;
cvmx_pki_clx_stylex_alg_t style_alg;
cvmx_pki_stylex_buf_t style_buf;
int cluster = __builtin_ffsll(cluster_mask) - 1;
scfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
scfg2.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
style_alg.u64 =
csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
style_cfg->parm_cfg.ip6_udp_opt = scfg.s.ip6_udp_opt;
style_cfg->parm_cfg.lenerr_en = scfg.s.lenerr_en;
style_cfg->parm_cfg.lenerr_eqpad = scfg.s.lenerr_eqpad;
style_cfg->parm_cfg.maxerr_en = scfg.s.maxerr_en;
style_cfg->parm_cfg.minerr_en = scfg.s.minerr_en;
style_cfg->parm_cfg.fcs_chk = scfg.s.fcs_chk;
style_cfg->parm_cfg.fcs_strip = scfg.s.fcs_strip;
style_cfg->parm_cfg.minmax_sel = scfg.s.minmax_sel;
style_cfg->parm_cfg.qpg_base = scfg.s.qpg_base;
style_cfg->parm_cfg.qpg_dis_padd = scfg.s.qpg_dis_padd;
style_cfg->parm_cfg.qpg_dis_aura = scfg.s.qpg_dis_aura;
style_cfg->parm_cfg.qpg_dis_grp = scfg.s.qpg_dis_grp;
style_cfg->parm_cfg.qpg_dis_grptag = scfg.s.qpg_dis_grptag;
style_cfg->parm_cfg.rawdrp = scfg.s.rawdrp;
style_cfg->parm_cfg.force_drop = scfg.s.drop;
style_cfg->parm_cfg.nodrop = scfg.s.nodrop;
style_cfg->parm_cfg.len_lg = scfg2.s.len_lg;
style_cfg->parm_cfg.len_lf = scfg2.s.len_lf;
style_cfg->parm_cfg.len_le = scfg2.s.len_le;
style_cfg->parm_cfg.len_ld = scfg2.s.len_ld;
style_cfg->parm_cfg.len_lc = scfg2.s.len_lc;
style_cfg->parm_cfg.len_lb = scfg2.s.len_lb;
style_cfg->parm_cfg.csum_lg = scfg2.s.csum_lg;
style_cfg->parm_cfg.csum_lf = scfg2.s.csum_lf;
style_cfg->parm_cfg.csum_le = scfg2.s.csum_le;
style_cfg->parm_cfg.csum_ld = scfg2.s.csum_ld;
style_cfg->parm_cfg.csum_lc = scfg2.s.csum_lc;
style_cfg->parm_cfg.csum_lb = scfg2.s.csum_lb;
style_cfg->parm_cfg.qpg_qos = style_alg.s.qpg_qos;
style_cfg->parm_cfg.tag_type = style_alg.s.tt;
style_cfg->parm_cfg.apad_nip = style_alg.s.apad_nip;
style_cfg->parm_cfg.qpg_port_sh = style_alg.s.qpg_port_sh;
style_cfg->parm_cfg.qpg_port_msb = style_alg.s.qpg_port_msb;
style_cfg->parm_cfg.wqe_vs = style_alg.s.wqe_vs;
style_cfg->parm_cfg.pkt_lend = style_buf.s.pkt_lend;
style_cfg->parm_cfg.wqe_hsz = style_buf.s.wqe_hsz;
style_cfg->parm_cfg.wqe_skip = style_buf.s.wqe_skip * 128;
style_cfg->parm_cfg.first_skip = style_buf.s.first_skip * 8;
style_cfg->parm_cfg.later_skip = style_buf.s.later_skip * 8;
style_cfg->parm_cfg.cache_mode = style_buf.s.opc_mode;
style_cfg->parm_cfg.mbuff_size = style_buf.s.mb_size * 8;
style_cfg->parm_cfg.dis_wq_dat = style_buf.s.dis_wq_dat;
cvmx_pki_read_tag_config(node, style, cluster_mask,
&style_cfg->tag_cfg);
}
/**
* This function writes/configures parameters associated with style in hardware.
*
* @param node Node number.
* @param style Style to configure.
* @param cluster_mask Mask of clusters to configure the style for.
* @param style_cfg Pointer to style config struct.
*/
void cvmx_pki_write_style_config(int node, uint64_t style, u64 cluster_mask,
struct cvmx_pki_style_config *style_cfg)
{
cvmx_pki_clx_stylex_cfg_t scfg;
cvmx_pki_clx_stylex_cfg2_t scfg2;
cvmx_pki_clx_stylex_alg_t style_alg;
cvmx_pki_stylex_buf_t style_buf;
unsigned int cluster = 0;
while (cluster < CVMX_PKI_NUM_CLUSTER) {
if (cluster_mask & (0x01L << cluster)) {
scfg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
scfg.s.ip6_udp_opt = style_cfg->parm_cfg.ip6_udp_opt;
scfg.s.lenerr_en = style_cfg->parm_cfg.lenerr_en;
scfg.s.lenerr_eqpad = style_cfg->parm_cfg.lenerr_eqpad;
scfg.s.maxerr_en = style_cfg->parm_cfg.maxerr_en;
scfg.s.minerr_en = style_cfg->parm_cfg.minerr_en;
scfg.s.fcs_chk = style_cfg->parm_cfg.fcs_chk;
scfg.s.fcs_strip = style_cfg->parm_cfg.fcs_strip;
scfg.s.minmax_sel = style_cfg->parm_cfg.minmax_sel;
scfg.s.qpg_base = style_cfg->parm_cfg.qpg_base;
scfg.s.qpg_dis_padd = style_cfg->parm_cfg.qpg_dis_padd;
scfg.s.qpg_dis_aura = style_cfg->parm_cfg.qpg_dis_aura;
scfg.s.qpg_dis_grp = style_cfg->parm_cfg.qpg_dis_grp;
scfg.s.qpg_dis_grptag =
style_cfg->parm_cfg.qpg_dis_grptag;
scfg.s.rawdrp = style_cfg->parm_cfg.rawdrp;
scfg.s.drop = style_cfg->parm_cfg.force_drop;
scfg.s.nodrop = style_cfg->parm_cfg.nodrop;
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
scfg.u64);
scfg2.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
scfg2.s.len_lg = style_cfg->parm_cfg.len_lg;
scfg2.s.len_lf = style_cfg->parm_cfg.len_lf;
scfg2.s.len_le = style_cfg->parm_cfg.len_le;
scfg2.s.len_ld = style_cfg->parm_cfg.len_ld;
scfg2.s.len_lc = style_cfg->parm_cfg.len_lc;
scfg2.s.len_lb = style_cfg->parm_cfg.len_lb;
scfg2.s.csum_lg = style_cfg->parm_cfg.csum_lg;
scfg2.s.csum_lf = style_cfg->parm_cfg.csum_lf;
scfg2.s.csum_le = style_cfg->parm_cfg.csum_le;
scfg2.s.csum_ld = style_cfg->parm_cfg.csum_ld;
scfg2.s.csum_lc = style_cfg->parm_cfg.csum_lc;
scfg2.s.csum_lb = style_cfg->parm_cfg.csum_lb;
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
scfg2.u64);
style_alg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
style_alg.s.qpg_qos = style_cfg->parm_cfg.qpg_qos;
style_alg.s.tt = style_cfg->parm_cfg.tag_type;
style_alg.s.apad_nip = style_cfg->parm_cfg.apad_nip;
style_alg.s.qpg_port_sh =
style_cfg->parm_cfg.qpg_port_sh;
style_alg.s.qpg_port_msb =
style_cfg->parm_cfg.qpg_port_msb;
style_alg.s.wqe_vs = style_cfg->parm_cfg.wqe_vs;
csr_wr_node(node,
CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
style_alg.u64);
}
cluster++;
}
style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
style_buf.s.pkt_lend = style_cfg->parm_cfg.pkt_lend;
style_buf.s.wqe_hsz = style_cfg->parm_cfg.wqe_hsz;
style_buf.s.wqe_skip = (style_cfg->parm_cfg.wqe_skip) / 128;
style_buf.s.first_skip = (style_cfg->parm_cfg.first_skip) / 8;
style_buf.s.later_skip = style_cfg->parm_cfg.later_skip / 8;
style_buf.s.opc_mode = style_cfg->parm_cfg.cache_mode;
style_buf.s.mb_size = (style_cfg->parm_cfg.mbuff_size) / 8;
style_buf.s.dis_wq_dat = style_cfg->parm_cfg.dis_wq_dat;
csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
cvmx_pki_write_tag_config(node, style, cluster_mask,
&style_cfg->tag_cfg);
}
/**
* This function reads qpg entry at specified offset from qpg table.
*
* @param node Node number.
* @param offset Offset in qpg table to read from.
* @param qpg_cfg Pointer to structure containing qpg values.
*/
int cvmx_pki_read_qpg_entry(int node, int offset,
struct cvmx_pki_qpg_config *qpg_cfg)
{
cvmx_pki_qpg_tblx_t qpg_tbl;
if (offset >= CVMX_PKI_NUM_QPG_ENTRY) {
debug("ERROR: qpg offset %d is >= 2048\n", offset);
return -1;
}
qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
qpg_cfg->aura_num = qpg_tbl.s.laura;
qpg_cfg->port_add = qpg_tbl.s.padd;
qpg_cfg->grp_ok = qpg_tbl.s.grp_ok;
qpg_cfg->grp_bad = qpg_tbl.s.grp_bad;
qpg_cfg->grptag_ok = qpg_tbl.s.grptag_ok;
qpg_cfg->grptag_bad = qpg_tbl.s.grptag_bad;
return 0;
}
/**
* This function writes qpg entry at specified offset in qpg table.
*
* @param node Node number.
* @param offset Offset in qpg table to read from.
* @param qpg_cfg Pointer to structure containing qpg values.
*/
void cvmx_pki_write_qpg_entry(int node, int offset,
struct cvmx_pki_qpg_config *qpg_cfg)
{
cvmx_pki_qpg_tblx_t qpg_tbl;
qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
qpg_tbl.s.padd = qpg_cfg->port_add;
qpg_tbl.s.laura = qpg_cfg->aura_num;
qpg_tbl.s.grp_ok = qpg_cfg->grp_ok;
qpg_tbl.s.grp_bad = qpg_cfg->grp_bad;
qpg_tbl.s.grptag_ok = qpg_cfg->grptag_ok;
qpg_tbl.s.grptag_bad = qpg_cfg->grptag_bad;
csr_wr_node(node, CVMX_PKI_QPG_TBLX(offset), qpg_tbl.u64);
}
/**
* This function writes pcam entry at given offset in pcam table in hardware
*
* @param node Node number.
* @param index Offset in pcam table.
* @param cluster_mask Mask of clusters in which to write pcam entry.
* @param input Input keys to pcam match passed as struct.
* @param action PCAM match action passed as struct.
*/
int cvmx_pki_pcam_write_entry(int node, int index, uint64_t cluster_mask,
struct cvmx_pki_pcam_input input,
struct cvmx_pki_pcam_action action)
{
int bank;
unsigned int cluster = 0;
cvmx_pki_clx_pcamx_termx_t term;
cvmx_pki_clx_pcamx_matchx_t match;
cvmx_pki_clx_pcamx_actionx_t act;
if (index >= CVMX_PKI_TOTAL_PCAM_ENTRY) {
debug("\nERROR: Invalid pcam entry %d\n", index);
return -1;
}
bank = (int)(input.field & 0x01);
while (cluster < CVMX_PKI_NUM_CLUSTER) {
if (cluster_mask & (0x01L << cluster)) {
term.u64 = csr_rd_node(
node,
CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
term.s.valid = 0;
csr_wr_node(node,
CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
index),
term.u64);
match.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
index));
match.s.data1 = input.data & input.data_mask;
match.s.data0 = (~input.data) & input.data_mask;
csr_wr_node(node,
CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
index),
match.u64);
act.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
index));
act.s.pmc = action.parse_mode_chg;
act.s.style_add = action.style_add;
act.s.pf = action.parse_flag_set;
act.s.setty = action.layer_type_set;
act.s.advance = action.pointer_advance;
csr_wr_node(node,
CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
index),
act.u64);
term.u64 = csr_rd_node(
node,
CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
term.s.term1 = input.field & input.field_mask;
term.s.term0 = (~input.field) & input.field_mask;
term.s.style1 = input.style & input.style_mask;
term.s.style0 = (~input.style) & input.style_mask;
term.s.valid = 1;
csr_wr_node(node,
CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
index),
term.u64);
}
cluster++;
}
return 0;
}
/**
* Enables/Disables fcs check and fcs stripping on the pkind.
*
* @param node Node number
* @param pknd PKIND to apply settings on.
* @param fcs_chk Enable/disable fcs check.
* 1 = enable fcs error check.
* 0 = disable fcs error check.
* @param fcs_strip Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
* 1 = strip L2 FCS.
* 0 = Do not strip L2 FCS.
*/
void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip)
{
int style;
unsigned int cluster;
cvmx_pki_clx_pkindx_style_t pstyle;
cvmx_pki_clx_stylex_cfg_t style_cfg;
/* Valudate PKIND # */
if (pknd >= CVMX_PKI_NUM_PKIND) {
printf("%s: PKIND %d out of range\n", __func__, pknd);
return;
}
for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
pstyle.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
style = pstyle.s.style;
/* Validate STYLE # */
if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
continue;
style_cfg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
style_cfg.s.fcs_chk = fcs_chk;
style_cfg.s.fcs_strip = fcs_strip;
csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
style_cfg.u64);
}
}
/**
* Enables/Disables l2 length error check and max & min frame length checks
*
* @param node Node number
* @param pknd PKIND to disable error for.
* @param l2len_err L2 length error check enable.
* @param maxframe_err Max frame error check enable.
* @param minframe_err Min frame error check enable.
* 1 = Enabel err checks
* 0 = Disable error checks
*/
void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err,
bool maxframe_err, bool minframe_err)
{
int style;
unsigned int cluster;
cvmx_pki_clx_pkindx_style_t pstyle;
cvmx_pki_clx_stylex_cfg_t style_cfg;
/* Valudate PKIND # */
if (pknd >= CVMX_PKI_NUM_PKIND) {
printf("%s: PKIND %d out of range\n", __func__, pknd);
return;
}
for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
pstyle.u64 = csr_rd_node(
node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
style = pstyle.s.style;
/* Validate STYLE # */
if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
continue;
style_cfg.u64 = csr_rd_node(
node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
style_cfg.s.lenerr_en = l2len_err;
style_cfg.s.maxerr_en = maxframe_err;
style_cfg.s.minerr_en = minframe_err;
csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
style_cfg.u64);
}
}

View file

@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-ipd.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
union interface_port {
struct {
int port;
int interface;
} s;
u64 u64;
};
static int dbg;
static int port_range_init;
int __cvmx_pko_internal_ports_range_init(void)
{
int rv = 0;
if (port_range_init)
return 0;
port_range_init = 1;
rv = cvmx_create_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
CVMX_HELPER_CFG_MAX_PKO_QUEUES);
if (rv != 0)
debug("ERROR : Failed to initialize pko internal port range\n");
return rv;
}
int cvmx_pko_internal_ports_alloc(int xiface, int port, u64 count)
{
int ret_val = -1;
union interface_port inf_port;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
__cvmx_pko_internal_ports_range_init();
inf_port.s.interface = xi.interface;
inf_port.s.port = port;
ret_val = cvmx_allocate_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
inf_port.u64, count, 1);
if (dbg)
debug("internal port alloc : port=%02d base=%02d count=%02d\n",
(int)port, ret_val, (int)count);
if (ret_val == -1)
return ret_val;
cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_port_base = ret_val;
cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_num_ports = count;
return 0;
}

View file

@ -0,0 +1,788 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Support library for the hardware Packet Output unit.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-iob-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
#include <mach/cvmx-helper-pko.h>
DECLARE_GLOBAL_DATA_PTR;
#define CVMX_PKO_NQ_PER_PORT_MAX 32
static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
int base_queue,
int num_queues,
const u8 priority[]);
static const int debug;
/**
* Internal state of packet output
*/
/*
* PKO port iterator
* XXX this macro only works for 68XX
*/
#define pko_for_each_port(__p) \
for (__p = 0; __p < CVMX_HELPER_CFG_MAX_PKO_PORT; __p++) \
if (__cvmx_helper_cfg_pko_queue_base(__p) != \
CVMX_HELPER_CFG_INVALID_VALUE)
/*
* @INTERNAL
*
* Get INT for a port
*
* @param interface
* @param index
* @return the INT value on success and -1 on error
*
* This function is only for CN68XX.
*/
static int __cvmx_pko_int(int interface, int index)
{
cvmx_helper_cfg_assert(interface < CVMX_HELPER_MAX_IFACE);
cvmx_helper_cfg_assert(index >= 0);
switch (interface) {
case 0:
cvmx_helper_cfg_assert(index < 4);
return index;
case 1:
cvmx_helper_cfg_assert(index == 0);
return 4;
case 2:
cvmx_helper_cfg_assert(index < 4);
return index + 8;
case 3:
cvmx_helper_cfg_assert(index < 4);
return index + 0xC;
case 4:
cvmx_helper_cfg_assert(index < 4);
return index + 0x10;
case 5:
cvmx_helper_cfg_assert(index < 256);
return 0x1C;
case 6:
cvmx_helper_cfg_assert(index < 256);
return 0x1D;
case 7:
cvmx_helper_cfg_assert(index < 32);
return 0x1E;
case 8:
cvmx_helper_cfg_assert(index < 8);
return 0x1F;
}
return -1;
}
int cvmx_pko_get_base_pko_port(int interface, int index)
{
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
return cvmx_helper_get_ipd_port(interface, index);
else if (octeon_has_feature(OCTEON_FEATURE_PKND))
return __cvmx_helper_cfg_pko_port_base(interface, index);
else
return cvmx_helper_get_ipd_port(interface, index);
}
int cvmx_pko_get_base_queue(int port)
{
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_get_queue_base(port);
} else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
return __cvmx_helper_cfg_pko_queue_base(
cvmx_helper_cfg_ipd2pko_port_base(port));
} else {
if (port < 48)
return cvmx_pko_queue_table[port].ccppp_queue_base;
else
return CVMX_PKO_ILLEGAL_QUEUE;
}
}
int cvmx_pko_get_num_queues(int port)
{
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_get_queue_num(port);
} else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
return __cvmx_helper_cfg_pko_queue_num(
cvmx_helper_cfg_ipd2pko_port_base(port));
} else {
if (port < 48)
return cvmx_pko_queue_table[port].ccppp_num_queues;
}
return 0;
}
/*
* Allocate memory for PKO engines.
*
* @param engine is the PKO engine ID.
* @return # of 2KB-chunks allocated to this PKO engine.
*/
static int __cvmx_pko_memory_per_engine_o68(int engine)
{
/* CN68XX has 40KB to devide between the engines in 2KB chunks */
int max_engine;
int size_per_engine;
int size;
max_engine = __cvmx_helper_cfg_pko_max_engine();
size_per_engine = 40 / 2 / max_engine;
if (engine >= max_engine)
/* Unused engines get no space */
size = 0;
else if (engine == max_engine - 1)
/*
* The last engine gets all the space lost by rounding. This means
* the ILK gets the most space
*/
size = 40 / 2 - engine * size_per_engine;
else
/* All other engines get the same space */
size = size_per_engine;
return size;
}
/*
* Setup one-to-one mapping between PKO2 iport and eport.
* @INTERNAL
*/
static void __cvmx_pko2_chip_init(void)
{
int i;
int interface, index, port;
cvmx_helper_interface_mode_t mode;
union cvmx_pko_mem_iport_ptrs config;
/*
* Initialize every iport with the invalid eid.
*/
#define CVMX_O68_PKO2_INVALID_EID 31
config.u64 = 0;
config.s.eid = CVMX_O68_PKO2_INVALID_EID;
for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
config.s.ipid = i;
csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
}
/*
* Set up PKO_MEM_IPORT_PTRS
*/
pko_for_each_port(port) {
interface = __cvmx_helper_cfg_pko_port_interface(port);
index = __cvmx_helper_cfg_pko_port_index(port);
mode = cvmx_helper_interface_get_mode(interface);
if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
continue;
config.s.ipid = port;
config.s.qos_mask = 0xff;
config.s.crc = __cvmx_helper_get_has_fcs(interface);
config.s.min_pkt = __cvmx_helper_get_pko_padding(interface);
config.s.intr = __cvmx_pko_int(interface, index);
config.s.eid = __cvmx_helper_cfg_pko_port_eid(port);
config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
index :
port;
csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
}
}
int __cvmx_pko_get_pipe(int interface, int index)
{
/* The loopback ports do not have pipes */
if (cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_LOOP)
return -1;
/* We use pko_port as the pipe. See __cvmx_pko_port_map_o68(). */
return cvmx_helper_get_pko_port(interface, index);
}
static void __cvmx_pko1_chip_init(void)
{
int queue;
union cvmx_pko_mem_queue_ptrs config;
union cvmx_pko_reg_queue_ptrs1 config1;
const int port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
/* Initialize all queues to connect to port 63 (ILLEGAL_PID) */
for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
config1.u64 = 0;
config1.s.idx3 = 0;
config1.s.qid7 = queue >> 7;
config.u64 = 0;
config.s.tail = 1;
config.s.index = 0;
config.s.port = port;
config.s.queue = queue;
config.s.buf_ptr = 0;
csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
}
}
/**
* Call before any other calls to initialize the packet
* output system. This does chip global config, and should only be
* done by one core.
*/
void cvmx_pko_hw_init(u8 pool, unsigned int bufsize)
{
union cvmx_pko_reg_cmd_buf config;
union cvmx_iob_fau_timeout fau_to;
int i;
if (debug)
debug("%s: pool=%u bufsz=%u\n", __func__, pool, bufsize);
/* chip-specific setup. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
__cvmx_pko2_chip_init();
else
__cvmx_pko1_chip_init();
/*
* Set the size of the PKO command buffers to an odd number of
* 64bit words. This allows the normal two word send to stay
* aligned and never span a command word buffer.
*/
config.u64 = 0;
config.s.pool = pool;
config.s.size = bufsize / 8 - 1;
csr_wr(CVMX_PKO_REG_CMD_BUF, config.u64);
/*
* Disable tagwait FAU timeout. This needs to be done before
* anyone might start packet output using tags.
*/
fau_to.u64 = 0;
fau_to.s.tout_val = 0xfff;
fau_to.s.tout_enb = 0;
csr_wr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_pko_reg_min_pkt min_pkt;
min_pkt.u64 = 0;
min_pkt.s.size1 = 59;
min_pkt.s.size2 = 59;
min_pkt.s.size3 = 59;
min_pkt.s.size4 = 59;
min_pkt.s.size5 = 59;
min_pkt.s.size6 = 59;
min_pkt.s.size7 = 59;
csr_wr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
}
/*
* If we aren't using all of the queues optimize PKO's
* internal memory.
*/
if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
int max_queues = __cvmx_helper_cfg_pko_max_queue();
if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32)
csr_wr(CVMX_PKO_REG_QUEUE_MODE, 3);
else if (max_queues <= 64)
csr_wr(CVMX_PKO_REG_QUEUE_MODE, 2);
else if (max_queues <= 128)
csr_wr(CVMX_PKO_REG_QUEUE_MODE, 1);
else
csr_wr(CVMX_PKO_REG_QUEUE_MODE, 0);
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
for (i = 0; i < 2; i++) {
union cvmx_pko_reg_engine_storagex
engine_storage;
#define PKO_ASSIGN_ENGINE_STORAGE(index) \
engine_storage.s.engine##index = \
__cvmx_pko_memory_per_engine_o68(16 * i + (index))
engine_storage.u64 = 0;
PKO_ASSIGN_ENGINE_STORAGE(0);
PKO_ASSIGN_ENGINE_STORAGE(1);
PKO_ASSIGN_ENGINE_STORAGE(2);
PKO_ASSIGN_ENGINE_STORAGE(3);
PKO_ASSIGN_ENGINE_STORAGE(4);
PKO_ASSIGN_ENGINE_STORAGE(5);
PKO_ASSIGN_ENGINE_STORAGE(6);
PKO_ASSIGN_ENGINE_STORAGE(7);
PKO_ASSIGN_ENGINE_STORAGE(8);
PKO_ASSIGN_ENGINE_STORAGE(9);
PKO_ASSIGN_ENGINE_STORAGE(10);
PKO_ASSIGN_ENGINE_STORAGE(11);
PKO_ASSIGN_ENGINE_STORAGE(12);
PKO_ASSIGN_ENGINE_STORAGE(13);
PKO_ASSIGN_ENGINE_STORAGE(14);
PKO_ASSIGN_ENGINE_STORAGE(15);
csr_wr(CVMX_PKO_REG_ENGINE_STORAGEX(i),
engine_storage.u64);
}
}
}
}
/**
* Enables the packet output hardware. It must already be
* configured.
*/
void cvmx_pko_enable(void)
{
union cvmx_pko_reg_flags flags;
flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS);
if (flags.s.ena_pko)
debug("Warning: Enabling PKO when PKO already enabled.\n");
flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
flags.s.ena_pko = 1;
/*
* always enable big endian for 3-word command. Does nothing
* for 2-word.
*/
flags.s.store_be = 1;
csr_wr(CVMX_PKO_REG_FLAGS, flags.u64);
}
/**
* Configure a output port and the associated queues for use.
*
* @param port Port to configure.
* @param base_queue First queue number to associate with this port.
* @param num_queues Number of queues to associate with this port
* @param priority Array of priority levels for each queue. Values are
* allowed to be 0-8. A value of 8 get 8 times the traffic
* of a value of 1. A value of 0 indicates that no rounds
* will be participated in. These priorities can be changed
* on the fly while the pko is enabled. A priority of 9
* indicates that static priority should be used. If static
* priority is used all queues with static priority must be
* contiguous starting at the base_queue, and lower numbered
* queues have higher priority than higher numbered queues.
* There must be num_queues elements in the array.
*/
cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue,
int num_queues,
const u8 priority[])
{
cvmx_pko_return_value_t result_code;
int queue;
union cvmx_pko_mem_queue_ptrs config;
union cvmx_pko_reg_queue_ptrs1 config1;
int static_priority_base = -1;
int static_priority_end = -1;
int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
/* This function is not used for CN68XX */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return cvmx_pko2_config_port(port, base_queue, num_queues,
priority);
if (debug)
debug("%s: port=%d queue=%d-%d pri %#x %#x %#x %#x\n", __func__,
port, base_queue, (base_queue + num_queues - 1),
priority[0], priority[1], priority[2], priority[3]);
/* The need to handle ILLEGAL_PID port argument
* is obsolete now, the code here can be simplified.
*/
if (port >= CVMX_PKO_NUM_OUTPUT_PORTS &&
port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
debug("ERROR: %s: Invalid port %llu\n", __func__,
(unsigned long long)port);
return CVMX_PKO_INVALID_PORT;
}
if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
debug("ERROR: %s: Invalid queue range port = %lld base=%llu numques=%lld\n",
__func__, (unsigned long long)port,
(unsigned long long)base_queue,
(unsigned long long)num_queues);
return CVMX_PKO_INVALID_QUEUE;
}
if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
/*
* Validate the static queue priority setup and set
* static_priority_base and static_priority_end
* accordingly.
*/
for (queue = 0; queue < num_queues; queue++) {
/* Find first queue of static priority */
int p_queue = queue % 16;
if (static_priority_base == -1 &&
priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
static_priority_base = queue;
/* Find last queue of static priority */
if (static_priority_base != -1 &&
static_priority_end == -1 &&
priority[p_queue] !=
CVMX_PKO_QUEUE_STATIC_PRIORITY &&
queue)
static_priority_end = queue - 1;
else if (static_priority_base != -1 &&
static_priority_end == -1 &&
queue == num_queues - 1)
/* all queues're static priority */
static_priority_end = queue;
/*
* Check to make sure all static priority
* queues are contiguous. Also catches some
* cases of static priorites not starting at
* queue 0.
*/
if (static_priority_end != -1 &&
(int)queue > static_priority_end &&
priority[p_queue] ==
CVMX_PKO_QUEUE_STATIC_PRIORITY) {
debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
__func__, (int)queue, static_priority_end);
return CVMX_PKO_INVALID_PRIORITY;
}
}
if (static_priority_base > 0) {
debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
__func__, static_priority_base);
return CVMX_PKO_INVALID_PRIORITY;
}
}
/*
* At this point, static_priority_base and static_priority_end
* are either both -1, or are valid start/end queue numbers
*/
result_code = CVMX_PKO_SUCCESS;
for (queue = 0; queue < num_queues; queue++) {
u64 *buf_ptr = NULL;
int p_queue = queue % 16;
config1.u64 = 0;
config1.s.idx3 = queue >> 3;
config1.s.qid7 = (base_queue + queue) >> 7;
config.u64 = 0;
config.s.tail = queue == (num_queues - 1);
config.s.index = queue;
config.s.port = port;
config.s.queue = base_queue + queue;
config.s.static_p = static_priority_base >= 0;
config.s.static_q = (int)queue <= static_priority_end;
config.s.s_tail = (int)queue == static_priority_end;
/*
* Convert the priority into an enable bit field. Try
* to space the bits out evenly so the packet don't
* get grouped up.
*/
switch ((int)priority[p_queue]) {
case 0:
config.s.qos_mask = 0x00;
break;
case 1:
config.s.qos_mask = 0x01;
break;
case 2:
config.s.qos_mask = 0x11;
break;
case 3:
config.s.qos_mask = 0x49;
break;
case 4:
config.s.qos_mask = 0x55;
break;
case 5:
config.s.qos_mask = 0x57;
break;
case 6:
config.s.qos_mask = 0x77;
break;
case 7:
config.s.qos_mask = 0x7f;
break;
case 8:
config.s.qos_mask = 0xff;
break;
case CVMX_PKO_QUEUE_STATIC_PRIORITY:
config.s.qos_mask = 0xff;
break;
default:
debug("ERROR: %s: Invalid priority %llu\n", __func__,
(unsigned long long)priority[p_queue]);
config.s.qos_mask = 0xff;
result_code = CVMX_PKO_INVALID_PRIORITY;
break;
}
if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
cvmx_cmd_queue_result_t cmd_res;
cmd_res = cvmx_cmd_queue_initialize(
CVMX_CMD_QUEUE_PKO(base_queue + queue),
CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
outputbuffer_pool_size -
CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST *
8);
if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
switch (cmd_res) {
case CVMX_CMD_QUEUE_NO_MEMORY:
debug("ERROR: %s: Unable to allocate output buffer\n",
__func__);
return CVMX_PKO_NO_MEMORY;
case CVMX_CMD_QUEUE_ALREADY_SETUP:
debug("ERROR: %s: Port already setup. port=%d\n",
__func__, (int)port);
return CVMX_PKO_PORT_ALREADY_SETUP;
case CVMX_CMD_QUEUE_INVALID_PARAM:
default:
debug("ERROR: %s: Command queue initialization failed.\n",
__func__);
return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
}
}
buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
CVMX_CMD_QUEUE_PKO(base_queue + queue));
config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
} else {
config.s.buf_ptr = 0;
}
CVMX_SYNCWS;
csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
}
return result_code;
}
/*
* Configure queues for an internal port.
* @INTERNAL
* @param pko_port PKO internal port number
* @note this is the PKO2 equivalent to cvmx_pko_config_port()
*/
static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
int base_queue,
int num_queues,
const u8 priority[])
{
int queue, pko_port;
int static_priority_base;
int static_priority_end;
union cvmx_pko_mem_iqueue_ptrs config;
u64 *buf_ptr = NULL;
int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);
if (debug)
debug("%s: ipd_port %d pko_iport %d qbase %d qnum %d\n",
__func__, ipd_port, pko_port, base_queue, num_queues);
static_priority_base = -1;
static_priority_end = -1;
/*
* static queue priority validation
*/
for (queue = 0; queue < num_queues; queue++) {
int p_queue = queue % 16;
if (static_priority_base == -1 &&
priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
static_priority_base = queue;
if (static_priority_base != -1 && static_priority_end == -1 &&
priority[p_queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY &&
queue)
static_priority_end = queue - 1;
else if (static_priority_base != -1 &&
static_priority_end == -1 && queue == num_queues - 1)
static_priority_end =
queue; /* all queues are static priority */
/*
* Check to make sure all static priority queues are contiguous.
* Also catches some cases of static priorites not starting from
* queue 0.
*/
if (static_priority_end != -1 &&
(int)queue > static_priority_end &&
priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) {
debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
__func__, (int)queue, static_priority_end);
}
if (static_priority_base > 0) {
debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
__func__, static_priority_base);
}
}
/*
* main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for
* each queue
*/
for (queue = 0; queue < num_queues; queue++) {
int p_queue = queue % 8;
config.u64 = 0;
config.s.index = queue;
config.s.qid = base_queue + queue;
config.s.ipid = pko_port;
config.s.tail = (queue == (num_queues - 1));
config.s.s_tail = (queue == static_priority_end);
config.s.static_p = (static_priority_base >= 0);
config.s.static_q = (queue <= static_priority_end);
/*
* Convert the priority into an enable bit field.
* Try to space the bits out evenly so the packet
* don't get grouped up.
*/
switch ((int)priority[p_queue]) {
case 0:
config.s.qos_mask = 0x00;
break;
case 1:
config.s.qos_mask = 0x01;
break;
case 2:
config.s.qos_mask = 0x11;
break;
case 3:
config.s.qos_mask = 0x49;
break;
case 4:
config.s.qos_mask = 0x55;
break;
case 5:
config.s.qos_mask = 0x57;
break;
case 6:
config.s.qos_mask = 0x77;
break;
case 7:
config.s.qos_mask = 0x7f;
break;
case 8:
config.s.qos_mask = 0xff;
break;
case CVMX_PKO_QUEUE_STATIC_PRIORITY:
config.s.qos_mask = 0xff;
break;
default:
debug("ERROR: %s: Invalid priority %llu\n", __func__,
(unsigned long long)priority[p_queue]);
config.s.qos_mask = 0xff;
break;
}
/*
* The command queues
*/
{
cvmx_cmd_queue_result_t cmd_res;
cmd_res = cvmx_cmd_queue_initialize(
CVMX_CMD_QUEUE_PKO(base_queue + queue),
CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
(outputbuffer_pool_size -
CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
switch (cmd_res) {
case CVMX_CMD_QUEUE_NO_MEMORY:
debug("ERROR: %s: Unable to allocate output buffer\n",
__func__);
break;
case CVMX_CMD_QUEUE_ALREADY_SETUP:
debug("ERROR: %s: Port already setup\n",
__func__);
break;
case CVMX_CMD_QUEUE_INVALID_PARAM:
default:
debug("ERROR: %s: Command queue initialization failed.",
__func__);
break;
}
debug(" pko_port%d base_queue%d num_queues%d queue%d.\n",
pko_port, base_queue, num_queues, queue);
}
buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
CVMX_CMD_QUEUE_PKO(base_queue + queue));
config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
}
CVMX_SYNCWS;
csr_wr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
}
/* Error detection is resirable here */
return 0;
}

View file

@ -0,0 +1,656 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-scratch.h>
#include <mach/cvmx-hwfau.h>
#include <mach/cvmx-fau.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
/* #undef CVMX_ENABLE_PARAMETER_CHECKING */
/* #define CVMX_ENABLE_PARAMETER_CHECKING 1 */
/* #define __PKO3_NATIVE_PTR */
static inline u64 cvmx_pko3_legacy_paddr(unsigned int node, u64 addr)
{
u64 paddr;
paddr = node;
paddr = (addr & ((1ull << 40) - 1)) | (paddr << 40);
return paddr;
}
#if CVMX_ENABLE_PARAMETER_CHECKING
/**
* @INTERNAL
*
* Verify the integrity of a legacy buffer link pointer,
*
* Note that the IPD/PIP/PKO hardware would sometimes
* round-up the buf_ptr->size field of the last buffer in a chain to the next
* cache line size, so the sum of buf_ptr->size
* fields for a packet may exceed total_bytes by up to 127 bytes.
*
* @returns 0 on success, a negative number on error.
*/
static int cvmx_pko3_legacy_bufptr_validate(cvmx_buf_ptr_t buf_ptr,
unsigned int gather,
unsigned int buffers,
unsigned int total_bytes)
{
unsigned int node = cvmx_get_node_num();
unsigned int segs = 0, bytes = 0;
unsigned int phys_addr;
cvmx_buf_ptr_t ptr;
int delta;
if (buffers == 0) {
return -1;
} else if (buffers == 1) {
delta = buf_ptr.s.size - total_bytes;
if (delta < 0 || delta > 127)
return -2;
} else if (gather) {
cvmx_buf_ptr_t *vptr;
/* Validate gather list */
if (buf_ptr.s.size < buffers)
return -3;
phys_addr = cvmx_pko3_legacy_paddr(node, buf_ptr.s.addr);
vptr = cvmx_phys_to_ptr(phys_addr);
for (segs = 0; segs < buffers; segs++)
bytes += vptr[segs].s.size;
delta = bytes - total_bytes;
if (delta < 0 || delta > 127)
return -4;
} else {
void *vptr;
/* Validate linked buffers */
ptr = buf_ptr;
for (segs = 0; segs < buffers; segs++) {
bytes += ptr.s.size;
phys_addr = cvmx_pko3_legacy_paddr(node, ptr.s.addr);
vptr = cvmx_phys_to_ptr(phys_addr);
memcpy(&ptr, vptr - sizeof(u64), sizeof(u64));
}
delta = bytes - total_bytes;
if (delta < 0 || delta > 127)
return -5;
}
return 0;
}
#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
/*
* @INTERNAL
*
* Implementation note:
* When the packet is sure to not need a jump_buf,
* it will be written directly into cvmseg.
* When the packet might not fit into cvmseg with all
* of its descriptors, a jump_buf is allocated a priori,
* and only header is first placed into cvmseg, all other
* descriptors are placed into jump_buf, and finally
* the PKO_SEND_JUMP_S is written to cvmseg.
* This is because if there are no EXT or TSO descriptors,
* then HDR must be first, and JMP second and that is all
* that should go into cvmseg.
*/
struct __cvmx_pko3_legacy_desc {
u64 *cmd_words;
u64 *jump_buf_base_ptr;
unsigned short word_count;
short last_pool;
u8 port_node;
u8 aura_node;
u8 jump_buf_size;
};
/**
* @INTERNAL
*
* Add a subdescriptor into a command buffer,
* and handle command-buffer overflow by allocating a JUMP_s buffer
* from PKO3 internal AURA.
*/
static int __cvmx_pko3_cmd_subdc_add(struct __cvmx_pko3_legacy_desc *desc,
u64 subdc)
{
/* SEND_JUMP_S missing on Pass1.X */
if (desc->word_count >= 15) {
printf("%s: ERROR: too many segments\n", __func__);
return -EBADF;
}
/* Handle small commands simply */
if (cvmx_likely(!desc->jump_buf_base_ptr)) {
desc->cmd_words[desc->word_count] = subdc;
(desc->word_count)++;
return desc->word_count;
}
if (cvmx_unlikely(desc->jump_buf_size >= 255))
return -ENOMEM;
desc->jump_buf_base_ptr[desc->jump_buf_size++] = subdc;
return desc->word_count + desc->jump_buf_size;
}
/**
* @INTERNAL
*
* Finalize command buffer
*
* @returns: number of command words in command buffer and jump buffer
* or negative number on error.
*/
static int __cvmx_pko3_cmd_done(struct __cvmx_pko3_legacy_desc *desc)
{
short pko_aura;
cvmx_pko_buf_ptr_t jump_s;
cvmx_pko_send_aura_t aura_s;
/* no jump buffer, nothing to do */
if (!desc->jump_buf_base_ptr)
return desc->word_count;
desc->word_count++;
/* Verify number of words is 15 */
if (desc->word_count != 2) {
printf("ERROR: %s: internal error, word_count=%d\n", __func__,
desc->word_count);
return -EINVAL;
}
/* Add SEND_AURA_S at the end of jump_buf */
pko_aura = __cvmx_pko3_aura_get(desc->port_node);
aura_s.u64 = 0;
aura_s.s.aura = pko_aura;
aura_s.s.offset = 0;
aura_s.s.alg = AURAALG_NOP;
aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
desc->jump_buf_base_ptr[desc->jump_buf_size++] = aura_s.u64;
/* Add SEND_JUMPS to point to jump_buf */
jump_s.u64 = 0;
jump_s.s.subdc3 = CVMX_PKO_SENDSUBDC_JUMP;
jump_s.s.addr = cvmx_ptr_to_phys(desc->jump_buf_base_ptr);
jump_s.s.i = 1; /* F=1: Free this buffer when done */
jump_s.s.size = desc->jump_buf_size;
desc->cmd_words[1] = jump_s.u64;
return desc->word_count + desc->jump_buf_size;
}
/**
* @INTERNAL
*
* Handle buffer pools for PKO legacy transmit operation
*/
static inline int cvmx_pko3_legacy_pool(struct __cvmx_pko3_legacy_desc *desc,
int pool)
{
cvmx_pko_send_aura_t aura_s;
unsigned int aura;
if (cvmx_unlikely(desc->last_pool == pool))
return 0;
aura = desc->aura_node << 10; /* LAURA=AURA[0..9] */
aura |= pool;
if (cvmx_likely(desc->last_pool < 0)) {
cvmx_pko_send_hdr_t *hdr_s;
hdr_s = (void *)&desc->cmd_words[0];
/* Create AURA from legacy pool (assume LAURA==POOL */
hdr_s->s.aura = aura;
desc->last_pool = pool;
return 0;
}
aura_s.u64 = 0;
aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
aura_s.s.offset = 0;
aura_s.s.alg = AURAALG_NOP;
aura |= pool;
aura_s.s.aura = aura;
desc->last_pool = pool;
return __cvmx_pko3_cmd_subdc_add(desc, aura_s.u64);
}
/**
* @INTERNAL
*
* Backward compatibility for packet transmission using legacy PKO command.
*
* NOTE: Only supports output on node-local ports.
*
* TBD: Could embed destination node in extended DQ number.
*/
cvmx_pko_return_value_t
cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, u64 addr, bool tag_sw)
{
cvmx_pko_query_rtn_t pko_status;
cvmx_pko_send_hdr_t *hdr_s;
struct __cvmx_pko3_legacy_desc desc;
u8 *data_ptr;
unsigned int node, seg_cnt;
int res;
cvmx_buf_ptr_pki_t bptr;
seg_cnt = pko_command.s.segs;
desc.cmd_words = cvmx_pko3_cvmseg_addr();
/* Allocate from local aura, assume all old-pools are local */
node = cvmx_get_node_num();
desc.aura_node = node;
/* Derive destination node from dq */
desc.port_node = dq >> 10;
dq &= (1 << 10) - 1;
desc.word_count = 1;
desc.last_pool = -1;
/* For small packets, write descriptors directly to CVMSEG
* but for longer packets use jump_buf
*/
if (seg_cnt < 7 || OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
desc.jump_buf_size = 0;
desc.jump_buf_base_ptr = NULL;
} else {
unsigned int pko_aura = __cvmx_pko3_aura_get(desc.port_node);
cvmx_fpa3_gaura_t aura =
__cvmx_fpa3_gaura(pko_aura >> 10, pko_aura & 0x3ff);
/* Allocate from internal AURA, size is 4KiB */
desc.jump_buf_base_ptr = cvmx_fpa3_alloc(aura);
if (!desc.jump_buf_base_ptr)
return -ENOMEM;
desc.jump_buf_size = 0;
}
/* Native buffer-pointer for error checiing */
bptr.u64 = packet.u64;
#if CVMX_ENABLE_PARAMETER_CHECKING
if (seg_cnt == 1 && bptr.size == pko_command.s.total_bytes) {
/*
* Special case for native buffer pointer:
* This is the only case where the native pointer-style can be
* automatically identified, that is when an entire packet
* fits into a single buffer by the PKI.
* The use of the native buffers with this function
* should be avoided.
*/
debug("%s: WARNING: Native buffer-pointer\n", __func__);
} else {
/* The buffer ptr is assume to be received in legacy format */
res = cvmx_pko3_legacy_bufptr_validate(
packet, pko_command.s.gather, pko_command.s.segs,
pko_command.s.total_bytes);
if (res < 0) {
debug("%s: ERROR: Not a valid packet pointer <%d>\n",
__func__, res);
return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
}
}
#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
/* Squash warnings */
(void)bptr;
/*** Translate legacy PKO fields into PKO3 PKO_SEND_HDR_S ***/
/* PKO_SEND_HDR_S is alwasy the first word in the command */
hdr_s = (void *)&desc.cmd_words[0];
hdr_s->u64 = 0;
/* Copy total packet size */
hdr_s->s.total = pko_command.s.total_bytes;
/* Endianness */
hdr_s->s.le = pko_command.s.le;
/* N2 is the same meaning */
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
hdr_s->s.n2 = 0; /* L2 allocate everything */
else
hdr_s->s.n2 = pko_command.s.n2;
/* DF bit has the same meaning */
hdr_s->s.df = pko_command.s.dontfree;
/* II bit has the same meaning */
hdr_s->s.ii = pko_command.s.ignore_i;
/* non-zero IP header offset requires L3/L4 checksum calculation */
if (cvmx_unlikely(pko_command.s.ipoffp1 > 0)) {
u8 ipoff, ip0, l4_proto = 0;
/* Get data pointer for header inspection below */
if (pko_command.s.gather) {
cvmx_buf_ptr_t *p_ptr;
cvmx_buf_ptr_t blk;
p_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, packet.s.addr));
blk = p_ptr[0];
data_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, blk.s.addr));
} else {
data_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, packet.s.addr));
}
/* Get IP header offset */
ipoff = pko_command.s.ipoffp1 - 1;
/* Parse IP header, version, L4 protocol */
hdr_s->s.l3ptr = ipoff;
ip0 = data_ptr[ipoff];
/* IPv4 header length, checksum offload */
if ((ip0 >> 4) == 4) {
hdr_s->s.l4ptr = hdr_s->s.l3ptr + ((ip0 & 0xf) << 2);
l4_proto = data_ptr[ipoff + 9];
hdr_s->s.ckl3 = 1; /* Only valid for IPv4 */
}
/* IPv6 header length is fixed, no checksum */
if ((ip0 >> 4) == 6) {
hdr_s->s.l4ptr = hdr_s->s.l3ptr + 40;
l4_proto = data_ptr[ipoff + 6];
}
/* Set L4 checksum algo based on L4 protocol */
if (l4_proto == 6)
hdr_s->s.ckl4 = /* TCP */ 2;
else if (l4_proto == 17)
hdr_s->s.ckl4 = /* UDP */ 1;
else if (l4_proto == 132)
hdr_s->s.ckl4 = /* SCTP */ 3;
else
hdr_s->s.ckl4 = /* Unknown */ 0;
}
if (pko_command.s.gather) {
/* Process legacy gather list */
cvmx_pko_buf_ptr_t gather_s;
cvmx_buf_ptr_t *p_ptr;
cvmx_buf_ptr_t blk;
unsigned int i;
/* Get gather list pointer */
p_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, packet.s.addr));
blk = p_ptr[0];
/* setup data_ptr */
data_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, blk.s.addr));
for (i = 0; i < seg_cnt; i++) {
if (cvmx_unlikely(cvmx_pko3_legacy_pool(
&desc, blk.s.pool) < 0))
return CVMX_PKO_NO_MEMORY;
/* Insert PKO_SEND_GATHER_S for the current buffer */
gather_s.u64 = 0;
gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
gather_s.s.size = blk.s.size;
gather_s.s.i = blk.s.i;
gather_s.s.addr =
cvmx_pko3_legacy_paddr(node, blk.s.addr);
res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
if (res < 0)
return CVMX_PKO_NO_MEMORY;
/* get next bufptr */
blk = p_ptr[i + 1];
} /* for i */
/* Free original gather-list buffer */
if ((pko_command.s.ignore_i && !pko_command.s.dontfree) ||
packet.s.i == pko_command.s.dontfree)
cvmx_fpa_free_nosync(p_ptr, packet.s.pool,
(i - 1) / 16 + 1);
} else {
/* Process legacy linked buffer list */
cvmx_pko_buf_ptr_t gather_s;
cvmx_buf_ptr_t blk;
void *vptr;
data_ptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, packet.s.addr));
blk = packet;
/*
* Legacy linked-buffers converted into flat gather list
* so that the AURA can optionally be changed to reflect
* the POOL number in the legacy pointers
*/
do {
/* Insert PKO_SEND_AURA_S if pool changes */
if (cvmx_unlikely(cvmx_pko3_legacy_pool(
&desc, blk.s.pool) < 0))
return CVMX_PKO_NO_MEMORY;
/* Insert PKO_SEND_GATHER_S for the current buffer */
gather_s.u64 = 0;
gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
gather_s.s.size = blk.s.size;
gather_s.s.i = blk.s.i;
gather_s.s.addr =
cvmx_pko3_legacy_paddr(node, blk.s.addr);
res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
if (res < 0)
return CVMX_PKO_NO_MEMORY;
/* Get the next buffer pointer */
vptr = cvmx_phys_to_ptr(
cvmx_pko3_legacy_paddr(node, blk.s.addr));
memcpy(&blk, vptr - sizeof(blk), sizeof(blk));
/* Decrement segment count */
seg_cnt--;
} while (seg_cnt > 0);
}
/* This field indicates the presence of 3rd legacy command word */
/* NOTE: legacy 3rd word may contain CN78XX native phys addr already */
if (cvmx_unlikely(pko_command.s.rsp)) {
/* PTP bit in word3 is not supported -
* can not be distibguished from larger phys_addr[42..41]
*/
if (pko_command.s.wqp) {
/* <addr> is an SSO WQE */
cvmx_wqe_word1_t *wqe_p;
cvmx_pko_send_work_t work_s;
work_s.u64 = 0;
work_s.s.subdc4 = CVMX_PKO_SENDSUBDC_WORK;
work_s.s.addr = addr;
/* Assume WQE is legacy format too */
wqe_p = cvmx_phys_to_ptr(addr + sizeof(u64));
work_s.s.grp = wqe_p->cn38xx.grp;
work_s.s.tt = wqe_p->tag_type;
res = __cvmx_pko3_cmd_subdc_add(&desc, work_s.u64);
} else {
cvmx_pko_send_mem_t mem_s;
/* MEMALG_SET broken on Pass1 */
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
debug("%s: ERROR: PKO byte-clear not supported\n",
__func__);
}
/* <addr> is a physical address of byte clear */
mem_s.u64 = 0;
mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
mem_s.s.addr = addr;
mem_s.s.dsz = MEMDSZ_B8;
mem_s.s.alg = MEMALG_SET;
mem_s.s.offset = 0;
res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
}
if (res < 0)
return CVMX_PKO_NO_MEMORY;
}
/* FAU counter binding reg0 */
if (pko_command.s.reg0) {
cvmx_pko_send_mem_t mem_s;
debug("%s: Legacy FAU commands: reg0=%#x sz0=%#x\n", __func__,
pko_command.s.reg0, pko_command.s.size0);
mem_s.u64 = 0;
mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
mem_s.s.addr = cvmx_ptr_to_phys(
CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg0)));
if (cvmx_likely(pko_command.s.size0 == CVMX_FAU_OP_SIZE_64))
mem_s.s.dsz = MEMDSZ_B64;
else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_32)
mem_s.s.dsz = MEMDSZ_B32;
else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_16)
mem_s.s.dsz = MEMDSZ_B16;
else
mem_s.s.dsz = MEMDSZ_B8;
if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
debug("%s: ERROR: 8/16 bit decrement unsupported",
__func__);
mem_s.s.offset = pko_command.s.subone0;
if (mem_s.s.offset)
mem_s.s.alg = MEMALG_SUB;
else
mem_s.s.alg = MEMALG_SUBLEN;
res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
if (res < 0)
return CVMX_PKO_NO_MEMORY;
}
/* FAU counter binding reg1 */
if (cvmx_unlikely(pko_command.s.reg1)) {
cvmx_pko_send_mem_t mem_s;
debug("%s: Legacy FAU commands: reg1=%#x sz1=%#x\n", __func__,
pko_command.s.reg1, pko_command.s.size1);
mem_s.u64 = 0;
mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
mem_s.s.addr = cvmx_ptr_to_phys(
CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg1)));
if (cvmx_likely(pko_command.s.size1 == CVMX_FAU_OP_SIZE_64))
mem_s.s.dsz = MEMDSZ_B64;
else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_32)
mem_s.s.dsz = MEMDSZ_B32;
else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_16)
mem_s.s.dsz = MEMDSZ_B16;
else
mem_s.s.dsz = MEMDSZ_B8;
if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
printf("%s: ERROR: 8/16 bit decrement unsupported",
__func__);
mem_s.s.offset = pko_command.s.subone1;
if (mem_s.s.offset)
mem_s.s.alg = MEMALG_SUB;
else
mem_s.s.alg = MEMALG_SUBLEN;
res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
if (res < 0)
return CVMX_PKO_NO_MEMORY;
}
/* These PKO_HDR_S fields are not used: */
/* hdr_s->s.ds does not have legacy equivalent, remains 0 */
/* hdr_s->s.format has no legacy equivalent, remains 0 */
/*** Finalize command buffer ***/
res = __cvmx_pko3_cmd_done(&desc);
if (res < 0)
return CVMX_PKO_NO_MEMORY;
/*** Send the PKO3 command into the Descriptor Queue ***/
pko_status =
__cvmx_pko3_lmtdma(desc.port_node, dq, desc.word_count, tag_sw);
/*** Map PKO3 result codes to legacy return values ***/
if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
return CVMX_PKO_SUCCESS;
debug("%s: ERROR: failed to enqueue: %s\n", __func__,
pko_dqstatus_error(pko_status.s.dqstatus));
if (pko_status.s.dqstatus == PKO_DQSTATUS_ALREADY)
return CVMX_PKO_PORT_ALREADY_SETUP;
if (pko_status.s.dqstatus == PKO_DQSTATUS_NOFPABUF ||
pko_status.s.dqstatus == PKO_DQSTATUS_NOPKOBUF)
return CVMX_PKO_NO_MEMORY;
if (pko_status.s.dqstatus == PKO_DQSTATUS_NOTCREATED)
return CVMX_PKO_INVALID_QUEUE;
if (pko_status.s.dqstatus == PKO_DQSTATUS_BADSTATE)
return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
if (pko_status.s.dqstatus == PKO_DQSTATUS_SENDPKTDROP)
return CVMX_PKO_INVALID_PORT;
return CVMX_PKO_INVALID_PORT;
}

View file

@ -0,0 +1,879 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
/* Smalles Round-Robin quantum to use +1 */
#define CVMX_PKO3_RR_QUANTUM_MIN 0x10
static int debug; /* 1 for basic, 2 for detailed trace */
struct cvmx_pko3_dq {
unsigned dq_count : 6; /* Number of descriptor queues */
unsigned dq_base : 10; /* Descriptor queue start number */
#define CVMX_PKO3_SWIZZLE_IPD 0x0
};
/*
* @INTERNAL
* Descriptor Queue to IPD port mapping table.
*
* This pointer is per-core, contains the virtual address
* of a global named block which has 2^12 entries per each
* possible node.
*/
struct cvmx_pko3_dq *__cvmx_pko3_dq_table;
int cvmx_pko3_get_queue_base(int ipd_port)
{
struct cvmx_pko3_dq *dq_table;
int ret = -1;
unsigned int i;
struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
/* get per-node table */
if (cvmx_unlikely(!__cvmx_pko3_dq_table))
__cvmx_pko3_dq_table_setup();
i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
/* get per-node table */
dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
if (cvmx_likely(dq_table[i].dq_count > 0))
ret = xp.node << 10 | dq_table[i].dq_base;
else if (debug)
cvmx_printf("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
ipd_port);
return ret;
}
int cvmx_pko3_get_queue_num(int ipd_port)
{
struct cvmx_pko3_dq *dq_table;
int ret = -1;
unsigned int i;
struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
/* get per-node table */
if (cvmx_unlikely(!__cvmx_pko3_dq_table))
__cvmx_pko3_dq_table_setup();
i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
/* get per-node table */
dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
if (cvmx_likely(dq_table[i].dq_count > 0))
ret = dq_table[i].dq_count;
else if (debug)
debug("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
ipd_port);
return ret;
}
/**
* @INTERNAL
*
* Initialize port/dq table contents
*/
static void __cvmx_pko3_dq_table_init(void *ptr)
{
unsigned int size = sizeof(struct cvmx_pko3_dq) *
CVMX_PKO3_IPD_NUM_MAX * CVMX_MAX_NODES;
memset(ptr, 0, size);
}
/**
* @INTERNAL
*
* Find or allocate global port/dq map table
* which is a named table, contains entries for
* all possible OCI nodes.
*
* The table global pointer is stored in core-local variable
* so that every core will call this function once, on first use.
*/
int __cvmx_pko3_dq_table_setup(void)
{
void *ptr;
ptr = cvmx_bootmem_alloc_named_range_once(
/* size */
sizeof(struct cvmx_pko3_dq) * CVMX_PKO3_IPD_NUM_MAX *
CVMX_MAX_NODES,
/* min_addr, max_addr, align */
0ull, 0ull, sizeof(struct cvmx_pko3_dq),
/* name */
"cvmx_pko3_global_dq_table", __cvmx_pko3_dq_table_init);
if (debug)
debug("%s: dq_table_ptr=%p\n", __func__, ptr);
if (!ptr)
return -1;
__cvmx_pko3_dq_table = ptr;
return 0;
}
/*
* @INTERNAL
* Register a range of Descriptor Queues with an interface port
*
* This function populates the DQ-to-IPD translation table
* used by the application to retrieve the DQ range (typically ordered
* by priority) for a given IPD-port, which is either a physical port,
* or a channel on a channelized interface (i.e. ILK).
*
* @param xiface is the physical interface number
* @param index is either a physical port on an interface
* or a channel of an ILK interface
* @param dq_base is the first Descriptor Queue number in a consecutive range
* @param dq_count is the number of consecutive Descriptor Queues leading
* the same channel or port.
*
* Only a consecutive range of Descriptor Queues can be associated with any
* given channel/port, and usually they are ordered from most to least
* in terms of scheduling priority.
*
* Note: thus function only populates the node-local translation table.
* NOTE: This function would be cleaner if it had a single ipd_port argument
*
* @returns 0 on success, -1 on failure.
*/
int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base,
unsigned int dq_count)
{
struct cvmx_pko3_dq *dq_table;
int ipd_port;
unsigned int i;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
struct cvmx_xport xp;
if (__cvmx_helper_xiface_is_null(xiface)) {
ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
CVMX_PKO3_IPD_PORT_NULL);
} else {
int p;
p = cvmx_helper_get_ipd_port(xiface, index);
if (p < 0) {
cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
__func__, xiface);
return -1;
}
ipd_port = p;
}
xp = cvmx_helper_ipd_port_to_xport(ipd_port);
i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
/* get per-node table */
if (!__cvmx_pko3_dq_table)
__cvmx_pko3_dq_table_setup();
dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
if (debug)
debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
ipd_port, i, dq_base, dq_count);
/* Check the IPD port has not already been configured */
if (dq_table[i].dq_count > 0) {
cvmx_printf("%s: ERROR: IPD %#x already registered\n", __func__,
ipd_port);
return -1;
}
/* Store DQ# range in the queue lookup table */
dq_table[i].dq_base = dq_base;
dq_table[i].dq_count = dq_count;
return 0;
}
/*
* @INTERNAL
* Convert normal CHAN_E (i.e. IPD port) value to compressed channel form
* that is used to populate PKO_LUT.
*
* Note: This code may be model specific.
*/
static int cvmx_pko3_chan_2_xchan(uint16_t ipd_port)
{
u16 xchan;
u8 off;
static const u8 *xchan_base;
static const u8 xchan_base_cn78xx[16] = {
/* IPD 0x000 */ 0x3c0 >> 4, /* LBK */
/* IPD 0x100 */ 0x380 >> 4, /* DPI */
/* IPD 0x200 */ 0xfff >> 4, /* not used */
/* IPD 0x300 */ 0xfff >> 4, /* not used */
/* IPD 0x400 */ 0x000 >> 4, /* ILK0 */
/* IPD 0x500 */ 0x100 >> 4, /* ILK1 */
/* IPD 0x600 */ 0xfff >> 4, /* not used */
/* IPD 0x700 */ 0xfff >> 4, /* not used */
/* IPD 0x800 */ 0x200 >> 4, /* BGX0 */
/* IPD 0x900 */ 0x240 >> 4, /* BGX1 */
/* IPD 0xa00 */ 0x280 >> 4, /* BGX2 */
/* IPD 0xb00 */ 0x2c0 >> 4, /* BGX3 */
/* IPD 0xc00 */ 0x300 >> 4, /* BGX4 */
/* IPD 0xd00 */ 0x340 >> 4, /* BGX5 */
/* IPD 0xe00 */ 0xfff >> 4, /* not used */
/* IPD 0xf00 */ 0xfff >> 4 /* not used */
};
static const u8 xchan_base_cn73xx[16] = {
/* IPD 0x000 */ 0x0c0 >> 4, /* LBK */
/* IPD 0x100 */ 0x100 >> 4, /* DPI */
/* IPD 0x200 */ 0xfff >> 4, /* not used */
/* IPD 0x300 */ 0xfff >> 4, /* not used */
/* IPD 0x400 */ 0xfff >> 4, /* not used */
/* IPD 0x500 */ 0xfff >> 4, /* not used */
/* IPD 0x600 */ 0xfff >> 4, /* not used */
/* IPD 0x700 */ 0xfff >> 4, /* not used */
/* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
/* IPD 0x900 */ 0x040 >> 4, /* BGX1 */
/* IPD 0xa00 */ 0x080 >> 4, /* BGX2 */
/* IPD 0xb00 */ 0xfff >> 4, /* not used */
/* IPD 0xc00 */ 0xfff >> 4, /* not used */
/* IPD 0xd00 */ 0xfff >> 4, /* not used */
/* IPD 0xe00 */ 0xfff >> 4, /* not used */
/* IPD 0xf00 */ 0xfff >> 4 /* not used */
};
static const u8 xchan_base_cn75xx[16] = {
/* IPD 0x000 */ 0x040 >> 4, /* LBK */
/* IPD 0x100 */ 0x080 >> 4, /* DPI */
/* IPD 0x200 */ 0xeee >> 4, /* SRIO0 noop */
/* IPD 0x300 */ 0xfff >> 4, /* not used */
/* IPD 0x400 */ 0xfff >> 4, /* not used */
/* IPD 0x500 */ 0xfff >> 4, /* not used */
/* IPD 0x600 */ 0xfff >> 4, /* not used */
/* IPD 0x700 */ 0xfff >> 4, /* not used */
/* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
/* IPD 0x900 */ 0xfff >> 4, /* not used */
/* IPD 0xa00 */ 0xfff >> 4, /* not used */
/* IPD 0xb00 */ 0xfff >> 4, /* not used */
/* IPD 0xc00 */ 0xfff >> 4, /* not used */
/* IPD 0xd00 */ 0xfff >> 4, /* not used */
/* IPD 0xe00 */ 0xfff >> 4, /* not used */
/* IPD 0xf00 */ 0xfff >> 4 /* not used */
};
if (OCTEON_IS_MODEL(OCTEON_CN73XX))
xchan_base = xchan_base_cn73xx;
if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
xchan_base = xchan_base_cn75xx;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
xchan_base = xchan_base_cn78xx;
if (!xchan_base)
return -1;
xchan = ipd_port >> 8;
/* ILKx, DPI has 8 bits logical channels, others just 6 */
if (((xchan & 0xfe) == 0x04) || xchan == 0x01)
off = ipd_port & 0xff;
else
off = ipd_port & 0x3f;
xchan = xchan_base[xchan & 0xf];
if (xchan == 0xff)
return -1; /* Invalid IPD_PORT */
else if (xchan == 0xee)
return -2; /* LUT not used */
else
return (xchan << 4) | off;
}
/*
* Map channel number in PKO
*
* @param node is to specify the node to which this configuration is applied.
* @param pq_num specifies the Port Queue (i.e. L1) queue number.
* @param l2_l3_q_num specifies L2/L3 queue number.
* @param channel specifies the channel number to map to the queue.
*
* The channel assignment applies to L2 or L3 Shaper Queues depending
* on the setting of channel credit level.
*
* @return returns none.
*/
void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num,
unsigned int l2_l3_q_num, uint16_t channel)
{
union cvmx_pko_l3_l2_sqx_channel sqx_channel;
cvmx_pko_lutx_t lutx;
int xchan;
sqx_channel.u64 =
csr_rd_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num));
sqx_channel.s.cc_channel = channel;
csr_wr_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num),
sqx_channel.u64);
/* Convert CHAN_E into compressed channel */
xchan = cvmx_pko3_chan_2_xchan(channel);
if (debug)
debug("%s: ipd_port=%#x xchan=%#x\n", __func__, channel, xchan);
if (xchan < 0) {
if (xchan == -1)
cvmx_printf("%s: ERROR: channel %#x not recognized\n",
__func__, channel);
return;
}
lutx.u64 = 0;
lutx.s.valid = 1;
lutx.s.pq_idx = pq_num;
lutx.s.queue_number = l2_l3_q_num;
csr_wr_node(node, CVMX_PKO_LUTX(xchan), lutx.u64);
if (debug)
debug("%s: channel %#x (compressed=%#x) mapped L2/L3 SQ=%u, PQ=%u\n",
__func__, channel, xchan, l2_l3_q_num, pq_num);
}
/*
* @INTERNAL
* This function configures port queue scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param port_queue is the port queue number to be configured.
* @param mac_num is the mac number of the mac that will be tied to this port_queue.
*/
static void cvmx_pko_configure_port_queue(int node, int port_queue, int mac_num)
{
cvmx_pko_l1_sqx_topology_t pko_l1_topology;
cvmx_pko_l1_sqx_shape_t pko_l1_shape;
cvmx_pko_l1_sqx_link_t pko_l1_link;
pko_l1_topology.u64 = 0;
pko_l1_topology.s.link = mac_num;
csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(port_queue),
pko_l1_topology.u64);
pko_l1_shape.u64 = 0;
pko_l1_shape.s.link = mac_num;
csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(port_queue), pko_l1_shape.u64);
pko_l1_link.u64 = 0;
pko_l1_link.s.link = mac_num;
csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(port_queue), pko_l1_link.u64);
}
/*
* @INTERNAL
* This function configures level 2 queues scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param queue is the level3 queue number to be configured.
* @param parent_queue is the parent queue at next level for this l3 queue.
* @param prio is this queue's priority in parent's scheduler.
* @param rr_quantum is this queue's round robin quantum value.
* @param child_base is the first child queue number in the static prioriy children.
* @param child_rr_prio is the round robin children priority.
*/
static void cvmx_pko_configure_l2_queue(int node, int queue, int parent_queue,
int prio, int rr_quantum,
int child_base, int child_rr_prio)
{
cvmx_pko_l2_sqx_schedule_t pko_sq_sched;
cvmx_pko_l2_sqx_topology_t pko_child_topology;
cvmx_pko_l1_sqx_topology_t pko_parent_topology;
/* parent topology configuration */
pko_parent_topology.u64 =
csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue));
pko_parent_topology.s.prio_anchor = child_base;
pko_parent_topology.s.rr_prio = child_rr_prio;
csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue),
pko_parent_topology.u64);
if (debug > 1)
debug("CVMX_PKO_L1_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
parent_queue, pko_parent_topology.s.prio_anchor,
pko_parent_topology.s.link);
/* scheduler configuration for this sq in the parent queue */
pko_sq_sched.u64 = 0;
pko_sq_sched.s.prio = prio;
pko_sq_sched.s.rr_quantum = rr_quantum;
csr_wr_node(node, CVMX_PKO_L2_SQX_SCHEDULE(queue), pko_sq_sched.u64);
/* child topology configuration */
pko_child_topology.u64 = 0;
pko_child_topology.s.parent = parent_queue;
csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(queue),
pko_child_topology.u64);
}
/*
* @INTERNAL
* This function configures level 3 queues scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param queue is the level3 queue number to be configured.
* @param parent_queue is the parent queue at next level for this l3 queue.
* @param prio is this queue's priority in parent's scheduler.
* @param rr_quantum is this queue's round robin quantum value.
* @param child_base is the first child queue number in the static prioriy children.
* @param child_rr_prio is the round robin children priority.
*/
static void cvmx_pko_configure_l3_queue(int node, int queue, int parent_queue,
int prio, int rr_quantum,
int child_base, int child_rr_prio)
{
cvmx_pko_l3_sqx_schedule_t pko_sq_sched;
cvmx_pko_l3_sqx_topology_t pko_child_topology;
cvmx_pko_l2_sqx_topology_t pko_parent_topology;
/* parent topology configuration */
pko_parent_topology.u64 =
csr_rd_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue));
pko_parent_topology.s.prio_anchor = child_base;
pko_parent_topology.s.rr_prio = child_rr_prio;
csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue),
pko_parent_topology.u64);
if (debug > 1)
debug("CVMX_PKO_L2_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
parent_queue, pko_parent_topology.s.prio_anchor,
pko_parent_topology.s.parent);
/* scheduler configuration for this sq in the parent queue */
pko_sq_sched.u64 = 0;
pko_sq_sched.s.prio = prio;
pko_sq_sched.s.rr_quantum = rr_quantum;
csr_wr_node(node, CVMX_PKO_L3_SQX_SCHEDULE(queue), pko_sq_sched.u64);
/* child topology configuration */
pko_child_topology.u64 = 0;
pko_child_topology.s.parent = parent_queue;
csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(queue),
pko_child_topology.u64);
}
/*
* @INTERNAL
* This function configures level 4 queues scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param queue is the level4 queue number to be configured.
* @param parent_queue is the parent queue at next level for this l4 queue.
* @param prio is this queue's priority in parent's scheduler.
* @param rr_quantum is this queue's round robin quantum value.
* @param child_base is the first child queue number in the static prioriy children.
* @param child_rr_prio is the round robin children priority.
*/
static void cvmx_pko_configure_l4_queue(int node, int queue, int parent_queue,
int prio, int rr_quantum,
int child_base, int child_rr_prio)
{
cvmx_pko_l4_sqx_schedule_t pko_sq_sched;
cvmx_pko_l4_sqx_topology_t pko_child_topology;
cvmx_pko_l3_sqx_topology_t pko_parent_topology;
/* parent topology configuration */
pko_parent_topology.u64 =
csr_rd_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue));
pko_parent_topology.s.prio_anchor = child_base;
pko_parent_topology.s.rr_prio = child_rr_prio;
csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue),
pko_parent_topology.u64);
if (debug > 1)
debug("CVMX_PKO_L3_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
parent_queue, pko_parent_topology.s.prio_anchor,
pko_parent_topology.s.parent);
/* scheduler configuration for this sq in the parent queue */
pko_sq_sched.u64 = 0;
pko_sq_sched.s.prio = prio;
pko_sq_sched.s.rr_quantum = rr_quantum;
csr_wr_node(node, CVMX_PKO_L4_SQX_SCHEDULE(queue), pko_sq_sched.u64);
/* topology configuration */
pko_child_topology.u64 = 0;
pko_child_topology.s.parent = parent_queue;
csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(queue),
pko_child_topology.u64);
}
/*
* @INTERNAL
* This function configures level 5 queues scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param queue is the level5 queue number to be configured.
* @param parent_queue is the parent queue at next level for this l5 queue.
* @param prio is this queue's priority in parent's scheduler.
* @param rr_quantum is this queue's round robin quantum value.
* @param child_base is the first child queue number in the static prioriy children.
* @param child_rr_prio is the round robin children priority.
*/
static void cvmx_pko_configure_l5_queue(int node, int queue, int parent_queue,
int prio, int rr_quantum,
int child_base, int child_rr_prio)
{
cvmx_pko_l5_sqx_schedule_t pko_sq_sched;
cvmx_pko_l4_sqx_topology_t pko_parent_topology;
cvmx_pko_l5_sqx_topology_t pko_child_topology;
/* parent topology configuration */
pko_parent_topology.u64 =
csr_rd_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue));
pko_parent_topology.s.prio_anchor = child_base;
pko_parent_topology.s.rr_prio = child_rr_prio;
csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue),
pko_parent_topology.u64);
if (debug > 1)
debug("CVMX_PKO_L4_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
parent_queue, pko_parent_topology.s.prio_anchor,
pko_parent_topology.s.parent);
/* scheduler configuration for this sq in the parent queue */
pko_sq_sched.u64 = 0;
pko_sq_sched.s.prio = prio;
pko_sq_sched.s.rr_quantum = rr_quantum;
csr_wr_node(node, CVMX_PKO_L5_SQX_SCHEDULE(queue), pko_sq_sched.u64);
/* topology configuration */
pko_child_topology.u64 = 0;
pko_child_topology.s.parent = parent_queue;
csr_wr_node(node, CVMX_PKO_L5_SQX_TOPOLOGY(queue),
pko_child_topology.u64);
}
/*
* @INTERNAL
* This function configures descriptor queues scheduling and topology parameters
* in hardware.
*
* @param node is to specify the node to which this configuration is applied.
* @param dq is the descriptor queue number to be configured.
* @param parent_queue is the parent queue at next level for this dq.
* @param prio is this queue's priority in parent's scheduler.
* @param rr_quantum is this queue's round robin quantum value.
* @param child_base is the first child queue number in the static prioriy children.
* @param child_rr_prio is the round robin children priority.
*/
static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio,
int rr_quantum, int child_base,
int child_rr_prio)
{
cvmx_pko_dqx_schedule_t pko_dq_sched;
cvmx_pko_dqx_topology_t pko_dq_topology;
cvmx_pko_l5_sqx_topology_t pko_parent_topology;
cvmx_pko_dqx_wm_ctl_t pko_dq_wm_ctl;
unsigned long long parent_topology_reg;
char lvl;
if (debug)
debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
parent_queue, child_base);
if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L5_QUEUES) {
parent_topology_reg = CVMX_PKO_L5_SQX_TOPOLOGY(parent_queue);
lvl = 5;
} else if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L3_QUEUES) {
parent_topology_reg = CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue);
lvl = 3;
} else {
return;
}
if (debug)
debug("%s: parent_topology_reg=%#llx\n", __func__,
parent_topology_reg);
/* parent topology configuration */
pko_parent_topology.u64 = csr_rd_node(node, parent_topology_reg);
pko_parent_topology.s.prio_anchor = child_base;
pko_parent_topology.s.rr_prio = child_rr_prio;
csr_wr_node(node, parent_topology_reg, pko_parent_topology.u64);
if (debug > 1)
debug("CVMX_PKO_L%d_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
lvl, parent_queue, pko_parent_topology.s.prio_anchor,
pko_parent_topology.s.parent);
/* scheduler configuration for this dq in the parent queue */
pko_dq_sched.u64 = 0;
pko_dq_sched.s.prio = prio;
pko_dq_sched.s.rr_quantum = rr_quantum;
csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sched.u64);
/* topology configuration */
pko_dq_topology.u64 = 0;
pko_dq_topology.s.parent = parent_queue;
csr_wr_node(node, CVMX_PKO_DQX_TOPOLOGY(dq), pko_dq_topology.u64);
/* configure for counting packets, not bytes at this level */
pko_dq_wm_ctl.u64 = 0;
pko_dq_wm_ctl.s.kind = 1;
pko_dq_wm_ctl.s.enable = 0;
csr_wr_node(node, CVMX_PKO_DQX_WM_CTL(dq), pko_dq_wm_ctl.u64);
if (debug > 1) {
pko_dq_sched.u64 = csr_rd_node(node, CVMX_PKO_DQX_SCHEDULE(dq));
pko_dq_topology.u64 =
csr_rd_node(node, CVMX_PKO_DQX_TOPOLOGY(dq));
debug("CVMX_PKO_DQX_TOPOLOGY(%u)PARENT=%u CVMX_PKO_DQX_SCHEDULE(%u) PRIO=%u Q=%u\n",
dq, pko_dq_topology.s.parent, dq, pko_dq_sched.s.prio,
pko_dq_sched.s.rr_quantum);
}
}
/*
* @INTERNAL
* The following structure selects the Scheduling Queue configuration
* routine for each of the supported levels.
* The initial content of the table will be setup in accordance
* to the specific SoC model and its implemented resources
*/
struct pko3_cfg_tab_s {
/* function pointer for to configure the given level, last=DQ */
struct {
u8 parent_level;
void (*cfg_sq_func)(int node, int queue, int parent_queue,
int prio, int rr_quantum, int child_base,
int child_rr_prio);
//XXX for debugging exagerated size
} lvl[256];
};
static const struct pko3_cfg_tab_s pko3_cn78xx_cfg = {
{ [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
cvmx_pko_configure_l2_queue },
[CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
cvmx_pko_configure_l3_queue },
[CVMX_PKO_L4_QUEUES] = { CVMX_PKO_L3_QUEUES,
cvmx_pko_configure_l4_queue },
[CVMX_PKO_L5_QUEUES] = { CVMX_PKO_L4_QUEUES,
cvmx_pko_configure_l5_queue },
[CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L5_QUEUES,
cvmx_pko_configure_dq } }
};
static const struct pko3_cfg_tab_s pko3_cn73xx_cfg = {
{ [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
cvmx_pko_configure_l2_queue },
[CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
cvmx_pko_configure_l3_queue },
[CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L3_QUEUES,
cvmx_pko_configure_dq } }
};
/*
* Configure Port Queue and its children Scheduler Queue
*
* Port Queues (a.k.a L1) are assigned 1-to-1 to MACs.
* L2 Scheduler Queues are used for specifying channels, and thus there
* could be multiple L2 SQs attached to a single L1 PQ, either in a
* fair round-robin scheduling, or with static and/or round-robin priorities.
*
* @param node on which to operate
* @param mac_num is the LMAC number to that is associated with the Port Queue,
* @param pq_num is the number of the L1 PQ attached to the MAC
*
* @returns 0 on success, -1 on failure.
*/
int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num,
unsigned int pq_num)
{
char b1[10];
if (debug)
debug("%s: MAC%u -> %s\n", __func__, mac_num,
__cvmx_pko3_sq_str(b1, CVMX_PKO_PORT_QUEUES, pq_num));
cvmx_pko_configure_port_queue(node, pq_num, mac_num);
return 0;
}
/*
* Configure L3 through L5 Scheduler Queues and Descriptor Queues
*
* The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
* configured one-to-one or many-to-one to a single parent Scheduler
* Queues. The level of the parent SQ is specified in an argument,
* as well as the number of childer to attach to the specific parent.
* The children can have fair round-robin or priority-based scheduling
* when multiple children are assigned a single parent.
*
* @param node on which to operate
* @param child_level is the level of the child queue
* @param parent_queue is the number of the parent Scheduler Queue
* @param child_base is the number of the first child SQ or DQ to assign to
* @param child_count is the number of consecutive children to assign
* @param stat_prio_count is the priority setting for the children L2 SQs
*
* If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
* relationship with eachother. If <stat_prio_count> is 0, all Ln children
* will be arranged in Weighted-Round-Robin, with the first having the most
* precedence. If <stat_prio_count> is between 1 and 8, it indicates how
* many children will have static priority settings (with the first having
* the most precedence), with the remaining Ln children having WRR scheduling.
*
* @returns 0 on success, -1 on failure.
*
* Note: this function supports the configuration of node-local unit.
*/
int cvmx_pko3_sq_config_children(unsigned int node,
enum cvmx_pko3_level_e child_level,
unsigned int parent_queue,
unsigned int child_base,
unsigned int child_count, int stat_prio_count)
{
enum cvmx_pko3_level_e parent_level;
unsigned int num_elem = 0;
unsigned int rr_quantum, rr_count;
unsigned int child, prio, rr_prio;
const struct pko3_cfg_tab_s *cfg_tbl = NULL;
char b1[10], b2[10];
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
num_elem = NUM_ELEMENTS(pko3_cn78xx_cfg.lvl);
cfg_tbl = &pko3_cn78xx_cfg;
}
if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
num_elem = NUM_ELEMENTS(pko3_cn73xx_cfg.lvl);
cfg_tbl = &pko3_cn73xx_cfg;
}
if (!cfg_tbl || child_level >= num_elem) {
cvmx_printf("ERROR: %s: model or level %#x invalid\n", __func__,
child_level);
return -1;
}
parent_level = cfg_tbl->lvl[child_level].parent_level;
if (!cfg_tbl->lvl[child_level].cfg_sq_func ||
cfg_tbl->lvl[child_level].parent_level == 0) {
cvmx_printf("ERROR: %s: queue level %#x invalid\n", __func__,
child_level);
return -1;
}
/* First static priority is 0 - top precedence */
prio = 0;
if (stat_prio_count > (signed int)child_count)
stat_prio_count = child_count;
/* Valid PRIO field is 0..9, limit maximum static priorities */
if (stat_prio_count > 9)
stat_prio_count = 9;
/* Special case of a single child */
if (child_count == 1) {
rr_count = 0;
rr_prio = 0xF;
/* Special case for Fair-RR */
} else if (stat_prio_count < 0) {
rr_count = child_count;
rr_prio = 0;
} else {
rr_count = child_count - stat_prio_count;
rr_prio = stat_prio_count;
}
/* Compute highest RR_QUANTUM */
if (stat_prio_count > 0)
rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN * rr_count;
else
rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN;
if (debug)
debug("%s: Parent %s child_base %u rr_pri %u\n", __func__,
__cvmx_pko3_sq_str(b1, parent_level, parent_queue),
child_base, rr_prio);
/* Parent is configured with child */
for (child = child_base; child < (child_base + child_count); child++) {
if (debug)
debug("%s: Child %s of %s prio %u rr_quantum %#x\n",
__func__,
__cvmx_pko3_sq_str(b1, child_level, child),
__cvmx_pko3_sq_str(b2, parent_level,
parent_queue),
prio, rr_quantum);
cfg_tbl->lvl[child_level].cfg_sq_func(node, child, parent_queue,
prio, rr_quantum,
child_base, rr_prio);
if (prio < rr_prio)
prio++;
else if (stat_prio_count > 0)
rr_quantum -= CVMX_PKO3_RR_QUANTUM_MIN;
} /* for child */
return 0;
}

View file

@ -0,0 +1,213 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* PKO resources.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-range.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
#define CVMX_GR_TAG_PKO_PORT_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'o', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_L2_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '2', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_L3_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '3', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_L4_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '4', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_L5_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '5', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_DESCR_QUEUES(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'd', 'e', 'q', '_', \
((x) + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_PKO_PORT_INDEX(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'i', 'd', '_', \
((x) + '0'), '.', '.', '.', '.')
/*
* @INRWENAL
* Per-DQ parameters, current and maximum queue depth counters
*/
cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];
static const short cvmx_pko_num_queues_78XX[256] = {
[CVMX_PKO_PORT_QUEUES] = 32, [CVMX_PKO_L2_QUEUES] = 512,
[CVMX_PKO_L3_QUEUES] = 512, [CVMX_PKO_L4_QUEUES] = 1024,
[CVMX_PKO_L5_QUEUES] = 1024, [CVMX_PKO_DESCR_QUEUES] = 1024
};
static const short cvmx_pko_num_queues_73XX[256] = {
[CVMX_PKO_PORT_QUEUES] = 16, [CVMX_PKO_L2_QUEUES] = 256,
[CVMX_PKO_L3_QUEUES] = 256, [CVMX_PKO_L4_QUEUES] = 0,
[CVMX_PKO_L5_QUEUES] = 0, [CVMX_PKO_DESCR_QUEUES] = 256
};
int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level)
{
unsigned int nq = 0, ne = 0;
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
ne = NUM_ELEMENTS(cvmx_pko_num_queues_78XX);
nq = cvmx_pko_num_queues_78XX[level];
}
if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
ne = NUM_ELEMENTS(cvmx_pko_num_queues_73XX);
nq = cvmx_pko_num_queues_73XX[level];
}
if (nq == 0 || level >= ne) {
printf("ERROR: %s: queue level %#x invalid\n", __func__, level);
return -1;
}
return nq;
}
static inline struct global_resource_tag
__cvmx_pko_get_queues_resource_tag(int node, enum cvmx_pko3_level_e queue_level)
{
if (cvmx_pko3_num_level_queues(queue_level) == 0) {
printf("ERROR: %s: queue level %#x invalid\n", __func__,
queue_level);
return CVMX_GR_TAG_INVALID;
}
switch (queue_level) {
case CVMX_PKO_PORT_QUEUES:
return CVMX_GR_TAG_PKO_PORT_QUEUES(node);
case CVMX_PKO_L2_QUEUES:
return CVMX_GR_TAG_PKO_L2_QUEUES(node);
case CVMX_PKO_L3_QUEUES:
return CVMX_GR_TAG_PKO_L3_QUEUES(node);
case CVMX_PKO_L4_QUEUES:
return CVMX_GR_TAG_PKO_L4_QUEUES(node);
case CVMX_PKO_L5_QUEUES:
return CVMX_GR_TAG_PKO_L5_QUEUES(node);
case CVMX_PKO_DESCR_QUEUES:
return CVMX_GR_TAG_PKO_DESCR_QUEUES(node);
default:
printf("ERROR: %s: queue level %#x invalid\n", __func__,
queue_level);
return CVMX_GR_TAG_INVALID;
}
}
/**
* Allocate or reserve a pko resource - called by wrapper functions
* @param tag processed global resource tag
* @param base_queue if specified the queue to reserve
* @param owner to be specified for resource
* @param num_queues to allocate
* @param max_num_queues for global resource
*/
int cvmx_pko_alloc_global_resource(struct global_resource_tag tag,
int base_queue, int owner, int num_queues,
int max_num_queues)
{
int res;
if (cvmx_create_global_resource_range(tag, max_num_queues)) {
debug("ERROR: Failed to create PKO3 resource: %lx-%lx\n",
(unsigned long)tag.hi, (unsigned long)tag.lo);
return -1;
}
if (base_queue >= 0) {
res = cvmx_reserve_global_resource_range(tag, owner, base_queue,
num_queues);
} else {
res = cvmx_allocate_global_resource_range(tag, owner,
num_queues, 1);
}
if (res < 0) {
debug("ERROR: Failed to %s PKO3 tag %lx:%lx, %i %i %i %i.\n",
((base_queue < 0) ? "allocate" : "reserve"),
(unsigned long)tag.hi, (unsigned long)tag.lo, base_queue,
owner, num_queues, max_num_queues);
return -1;
}
return res;
}
/**
* Allocate or reserve PKO queues - wrapper for cvmx_pko_alloc_global_resource
*
* @param node on which to allocate/reserve PKO queues
* @param level of PKO queue
* @param owner of reserved/allocated resources
* @param base_queue to start reservation/allocatation
* @param num_queues number of queues to be allocated
* @return 0 on success, -1 on failure
*/
int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
int num_queues)
{
struct global_resource_tag tag =
__cvmx_pko_get_queues_resource_tag(node, level);
int max_num_queues = cvmx_pko3_num_level_queues(level);
return cvmx_pko_alloc_global_resource(tag, base_queue, owner,
num_queues, max_num_queues);
}
/**
* @INTERNAL
*
* Initialize the pointer to the descriptor queue parameter table.
* The table is one named block per node, and may be shared between
* applications.
*/
int __cvmx_pko3_dq_param_setup(unsigned int node)
{
return 0;
}

View file

@ -0,0 +1,786 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <errno.h>
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-agl-defs.h>
#include <mach/cvmx-bgxx-defs.h>
#include <mach/cvmx-ciu-defs.h>
#include <mach/cvmx-gmxx-defs.h>
#include <mach/cvmx-gserx-defs.h>
#include <mach/cvmx-ilk-defs.h>
#include <mach/cvmx-ipd-defs.h>
#include <mach/cvmx-pcsx-defs.h>
#include <mach/cvmx-pcsxx-defs.h>
#include <mach/cvmx-pki-defs.h>
#include <mach/cvmx-pko-defs.h>
#include <mach/cvmx-xcv-defs.h>
#include <mach/cvmx-hwpko.h>
#include <mach/cvmx-ilk.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-pko3.h>
#include <mach/cvmx-pko3-queue.h>
#include <mach/cvmx-pko3-resources.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-bgx.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-util.h>
#include <mach/cvmx-helper-pki.h>
static const int debug;
#define CVMX_DUMP_REGX(reg) \
if (debug) \
debug("%s=%#llx\n", #reg, (long long)csr_rd_node(node, reg))
static int cvmx_pko_setup_macs(int node);
/*
* PKO descriptor queue operation error string
*
* @param dqstatus is the enumeration returned from hardware,
* PKO_QUERY_RTN_S[DQSTATUS].
*
* @return static constant string error description
*/
const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus)
{
char *str = "PKO Undefined error";
switch (dqstatus) {
case PKO_DQSTATUS_PASS:
str = "No error";
break;
case PKO_DQSTATUS_BADSTATE:
str = "PKO queue not ready";
break;
case PKO_DQSTATUS_NOFPABUF:
str = "PKO failed to allocate buffer from FPA";
break;
case PKO_DQSTATUS_NOPKOBUF:
str = "PKO out of buffers";
break;
case PKO_DQSTATUS_FAILRTNPTR:
str = "PKO failed to return buffer to FPA";
break;
case PKO_DQSTATUS_ALREADY:
str = "PKO queue already opened";
break;
case PKO_DQSTATUS_NOTCREATED:
str = "PKO queue has not been created";
break;
case PKO_DQSTATUS_NOTEMPTY:
str = "PKO queue is not empty";
break;
case PKO_DQSTATUS_SENDPKTDROP:
str = "Illegal PKO command construct";
break;
}
return str;
}
/*
* PKO global initialization for 78XX.
*
* @param node is the node on which PKO block is initialized.
* @return none.
*/
int cvmx_pko3_hw_init_global(int node, uint16_t aura)
{
cvmx_pko_dpfi_flush_t pko_flush;
cvmx_pko_dpfi_fpa_aura_t pko_aura;
cvmx_pko_dpfi_ena_t dpfi_enable;
cvmx_pko_ptf_iobp_cfg_t ptf_iobp_cfg;
cvmx_pko_pdm_cfg_t pko_pdm_cfg;
cvmx_pko_enable_t pko_enable;
cvmx_pko_dpfi_status_t dpfi_status;
cvmx_pko_status_t pko_status;
cvmx_pko_shaper_cfg_t shaper_cfg;
u64 cycles;
const unsigned int timeout = 100; /* 100 milliseconds */
if (node != (aura >> 10))
cvmx_printf("WARNING: AURA vs PKO node mismatch\n");
pko_enable.u64 = csr_rd_node(node, CVMX_PKO_ENABLE);
if (pko_enable.s.enable) {
cvmx_printf("WARNING: %s: PKO already enabled on node %u\n",
__func__, node);
return 0;
}
/* Enable color awareness. */
shaper_cfg.u64 = csr_rd_node(node, CVMX_PKO_SHAPER_CFG);
shaper_cfg.s.color_aware = 1;
csr_wr_node(node, CVMX_PKO_SHAPER_CFG, shaper_cfg.u64);
/* Clear FLUSH command to be sure */
pko_flush.u64 = 0;
pko_flush.s.flush_en = 0;
csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
/* set the aura number in pko, use aura node from parameter */
pko_aura.u64 = 0;
pko_aura.s.node = aura >> 10;
pko_aura.s.laura = aura;
csr_wr_node(node, CVMX_PKO_DPFI_FPA_AURA, pko_aura.u64);
CVMX_DUMP_REGX(CVMX_PKO_DPFI_FPA_AURA);
dpfi_enable.u64 = 0;
dpfi_enable.s.enable = 1;
csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
/* Prepare timeout */
cycles = get_timer(0);
/* Wait until all pointers have been returned */
do {
pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
if (get_timer(cycles) > timeout)
break;
} while (!pko_status.s.pko_rdy);
if (!pko_status.s.pko_rdy) {
dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
cvmx_printf("ERROR: %s: PKO DFPI failed, PKO_STATUS=%#llx DPFI_STATUS=%#llx\n",
__func__, (unsigned long long)pko_status.u64,
(unsigned long long)dpfi_status.u64);
return -1;
}
/* Set max outstanding requests in IOBP for any FIFO.*/
ptf_iobp_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTF_IOBP_CFG);
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
ptf_iobp_cfg.s.max_read_size = 0x10; /* Recommended by HRM.*/
else
/* Reduce the value from recommended 0x10 to avoid
* getting "underflow" condition in the BGX TX FIFO.
*/
ptf_iobp_cfg.s.max_read_size = 3;
csr_wr_node(node, CVMX_PKO_PTF_IOBP_CFG, ptf_iobp_cfg.u64);
/* Set minimum packet size per Ethernet standard */
pko_pdm_cfg.u64 = 0;
pko_pdm_cfg.s.pko_pad_minlen = 0x3c; /* 60 bytes before FCS */
csr_wr_node(node, CVMX_PKO_PDM_CFG, pko_pdm_cfg.u64);
/* Initialize MACs and FIFOs */
cvmx_pko_setup_macs(node);
/* enable PKO, although interfaces and queues are not up yet */
pko_enable.u64 = 0;
pko_enable.s.enable = 1;
csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
/* PKO_RDY set indicates successful initialization */
pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
if (pko_status.s.pko_rdy)
return 0;
cvmx_printf("ERROR: %s: failed, PKO_STATUS=%#llx\n", __func__,
(unsigned long long)pko_status.u64);
return -1;
}
/*
* Configure Channel credit level in PKO.
*
* @param node is to specify the node to which this configuration is applied.
* @param level specifies the level at which pko channel queues will be configured,
* @return returns 0 if successful and -1 on failure.
*/
int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level)
{
union cvmx_pko_channel_level channel_level;
channel_level.u64 = 0;
if (level == CVMX_PKO_L2_QUEUES)
channel_level.s.cc_level = 0;
else if (level == CVMX_PKO_L3_QUEUES)
channel_level.s.cc_level = 1;
else
return -1;
csr_wr_node(node, CVMX_PKO_CHANNEL_LEVEL, channel_level.u64);
return 0;
}
/** Open configured descriptor queues before queueing packets into them.
*
* @param node is to specify the node to which this configuration is applied.
* @param dq is the descriptor queue number to be opened.
* @return returns 0 on success or -1 on failure.
*/
int cvmx_pko_dq_open(int node, int dq)
{
cvmx_pko_query_rtn_t pko_status;
pko_query_dqstatus_t dqstatus;
cvmx_pko3_dq_params_t *p_param;
if (debug)
debug("%s: DEBUG: dq %u\n", __func__, dq);
__cvmx_pko3_dq_param_setup(node);
pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
dqstatus = pko_status.s.dqstatus;
if (dqstatus == PKO_DQSTATUS_ALREADY)
return 0;
if (dqstatus != PKO_DQSTATUS_PASS) {
cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
dq, pko_dqstatus_error(dqstatus));
return -1;
}
/* Setup the descriptor queue software parameters */
p_param = cvmx_pko3_dq_parameters(node, dq);
if (p_param) {
p_param->depth = pko_status.s.depth;
if (p_param->limit == 0)
p_param->limit = 1024; /* last-resort default */
}
return 0;
}
/*
* PKO initialization of MACs and FIFOs
*
* All MACs are configured and assigned a specific FIFO,
* and each FIFO is configured with size for a best utilization
* of available FIFO resources.
*
* @param node is to specify which node's pko block for this setup.
* @return returns 0 if successful and -1 on failure.
*
* Note: This function contains model-specific code.
*/
static int cvmx_pko_setup_macs(int node)
{
unsigned int interface;
unsigned int port, num_ports;
unsigned int mac_num, fifo, pri, cnt;
cvmx_helper_interface_mode_t mode;
const unsigned int num_interfaces =
cvmx_helper_get_number_of_interfaces();
u8 fifo_group_cfg[8];
u8 fifo_group_spd[8];
unsigned int fifo_count = 0;
unsigned int max_fifos = 0, fifo_groups = 0;
struct {
u8 fifo_cnt;
u8 fifo_id;
u8 pri;
u8 spd;
u8 mac_fifo_cnt;
} cvmx_pko3_mac_table[32];
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
max_fifos = 28; /* exclusive of NULL FIFO */
fifo_groups = 8; /* inclusive of NULL PTGF */
}
if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
max_fifos = 16;
fifo_groups = 5;
}
/* Initialize FIFO allocation table */
memset(&fifo_group_cfg, 0, sizeof(fifo_group_cfg));
memset(&fifo_group_spd, 0, sizeof(fifo_group_spd));
memset(cvmx_pko3_mac_table, 0, sizeof(cvmx_pko3_mac_table));
/* Initialize all MACs as disabled */
for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
cvmx_pko3_mac_table[mac_num].pri = 0;
cvmx_pko3_mac_table[mac_num].fifo_cnt = 0;
cvmx_pko3_mac_table[mac_num].fifo_id = 0x1f;
}
for (interface = 0; interface < num_interfaces; interface++) {
int xiface =
cvmx_helper_node_interface_to_xiface(node, interface);
/* Interface type for ALL interfaces */
mode = cvmx_helper_interface_get_mode(xiface);
num_ports = cvmx_helper_interface_enumerate(xiface);
if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
continue;
/*
* Non-BGX interfaces:
* Each of these interfaces has a single MAC really.
*/
if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
num_ports = 1;
for (port = 0; port < num_ports; port++) {
int i;
/* Get the per-port mode for BGX-interfaces */
if (interface < CVMX_HELPER_MAX_GMX)
mode = cvmx_helper_bgx_get_mode(xiface, port);
/* In MIXED mode, LMACs can run different protocols */
/* convert interface/port to mac number */
i = __cvmx_pko3_get_mac_num(xiface, port);
if (i < 0 || i >= (int)__cvmx_pko3_num_macs()) {
cvmx_printf("%s: ERROR: interface %d:%u port %d has no MAC %d/%d\n",
__func__, node, interface, port, i,
__cvmx_pko3_num_macs());
continue;
}
if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
unsigned int bgx_fifo_size =
__cvmx_helper_bgx_fifo_size(xiface,
port);
cvmx_pko3_mac_table[i].mac_fifo_cnt =
bgx_fifo_size /
(CVMX_BGX_TX_FIFO_SIZE / 4);
cvmx_pko3_mac_table[i].pri = 2;
cvmx_pko3_mac_table[i].spd = 10;
cvmx_pko3_mac_table[i].fifo_cnt = 2;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) {
unsigned int bgx_fifo_size =
__cvmx_helper_bgx_fifo_size(xiface,
port);
cvmx_pko3_mac_table[i].mac_fifo_cnt =
bgx_fifo_size /
(CVMX_BGX_TX_FIFO_SIZE / 4);
cvmx_pko3_mac_table[i].pri = 4;
cvmx_pko3_mac_table[i].spd = 40;
cvmx_pko3_mac_table[i].fifo_cnt = 4;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
unsigned int bgx_fifo_size =
__cvmx_helper_bgx_fifo_size(xiface,
port);
cvmx_pko3_mac_table[i].mac_fifo_cnt =
bgx_fifo_size /
(CVMX_BGX_TX_FIFO_SIZE / 4);
cvmx_pko3_mac_table[i].pri = 3;
cvmx_pko3_mac_table[i].fifo_cnt = 4;
/* DXAUI at 20G, or XAU at 10G */
cvmx_pko3_mac_table[i].spd = 20;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_XFI) {
unsigned int bgx_fifo_size =
__cvmx_helper_bgx_fifo_size(xiface,
port);
cvmx_pko3_mac_table[i].mac_fifo_cnt =
bgx_fifo_size /
(CVMX_BGX_TX_FIFO_SIZE / 4);
cvmx_pko3_mac_table[i].pri = 3;
cvmx_pko3_mac_table[i].fifo_cnt = 4;
cvmx_pko3_mac_table[i].spd = 10;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
cvmx_pko3_mac_table[i].fifo_cnt = 1;
cvmx_pko3_mac_table[i].pri = 1;
cvmx_pko3_mac_table[i].spd = 1;
cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
mode == CVMX_HELPER_INTERFACE_MODE_SRIO) {
cvmx_pko3_mac_table[i].fifo_cnt = 4;
cvmx_pko3_mac_table[i].pri = 3;
/* ILK/SRIO: speed depends on lane count */
cvmx_pko3_mac_table[i].spd = 40;
cvmx_pko3_mac_table[i].mac_fifo_cnt = 4;
} else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
cvmx_pko3_mac_table[i].fifo_cnt = 4;
cvmx_pko3_mac_table[i].pri = 2;
/* Actual speed depends on PCIe lanes/mode */
cvmx_pko3_mac_table[i].spd = 50;
/* SLI Tx FIFO size to be revisitted */
cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
} else {
/* Other BGX interface modes: SGMII/RGMII */
unsigned int bgx_fifo_size =
__cvmx_helper_bgx_fifo_size(xiface,
port);
cvmx_pko3_mac_table[i].mac_fifo_cnt =
bgx_fifo_size /
(CVMX_BGX_TX_FIFO_SIZE / 4);
cvmx_pko3_mac_table[i].fifo_cnt = 1;
cvmx_pko3_mac_table[i].pri = 1;
cvmx_pko3_mac_table[i].spd = 1;
}
if (debug)
debug("%s: intf %d:%u port %u %s mac %02u cnt %u macfifo %uk spd %u\n",
__func__, node, interface, port,
cvmx_helper_interface_mode_to_string(mode),
i, cvmx_pko3_mac_table[i].fifo_cnt,
cvmx_pko3_mac_table[i].mac_fifo_cnt * 8,
cvmx_pko3_mac_table[i].spd);
} /* for port */
} /* for interface */
/* Count the number of requested FIFOs */
for (fifo_count = mac_num = 0; mac_num < __cvmx_pko3_num_macs();
mac_num++)
fifo_count += cvmx_pko3_mac_table[mac_num].fifo_cnt;
if (debug)
debug("%s: initially requested FIFO count %u\n", __func__,
fifo_count);
/* Heuristically trim FIFO count to fit in available number */
pri = 1;
cnt = 4;
while (fifo_count > max_fifos) {
for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
if (cvmx_pko3_mac_table[mac_num].fifo_cnt == cnt &&
cvmx_pko3_mac_table[mac_num].pri <= pri) {
cvmx_pko3_mac_table[mac_num].fifo_cnt >>= 1;
fifo_count -=
cvmx_pko3_mac_table[mac_num].fifo_cnt;
}
if (fifo_count <= max_fifos)
break;
}
if (pri >= 4) {
pri = 1;
cnt >>= 1;
} else {
pri++;
}
if (cnt == 0)
break;
}
if (debug)
debug("%s: adjusted FIFO count %u\n", __func__, fifo_count);
/* Special case for NULL Virtual FIFO */
fifo_group_cfg[fifo_groups - 1] = 0;
/* there is no MAC connected to NULL FIFO */
/* Configure MAC units, and attach a FIFO to each */
for (fifo = 0, cnt = 4; cnt > 0; cnt >>= 1) {
unsigned int g;
for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
if (cvmx_pko3_mac_table[mac_num].fifo_cnt < cnt ||
cvmx_pko3_mac_table[mac_num].fifo_id != 0x1f)
continue;
/* Attach FIFO to MAC */
cvmx_pko3_mac_table[mac_num].fifo_id = fifo;
g = fifo >> 2;
/* Sum speed for FIFO group */
fifo_group_spd[g] += cvmx_pko3_mac_table[mac_num].spd;
if (cnt == 4)
fifo_group_cfg[g] = 4; /* 10k,0,0,0 */
else if (cnt == 2 && (fifo & 0x3) == 0)
fifo_group_cfg[g] = 3; /* 5k,0,5k,0 */
else if (cnt == 2 && fifo_group_cfg[g] == 3)
/* no change */;
else if (cnt == 1 && (fifo & 0x2) &&
fifo_group_cfg[g] == 3)
fifo_group_cfg[g] = 1; /* 5k,0,2.5k 2.5k*/
else if (cnt == 1 && (fifo & 0x3) == 0x3)
/* no change */;
else if (cnt == 1)
fifo_group_cfg[g] = 0; /* 2.5k x 4 */
else
cvmx_printf("ERROR: %s: internal error\n",
__func__);
fifo += cnt;
}
}
/* Check if there was no error in FIFO allocation */
if (fifo > max_fifos) {
cvmx_printf("ERROR: %s: Internal error FIFO %u\n", __func__,
fifo);
return -1;
}
if (debug)
debug("%s: used %u of FIFOs\n", __func__, fifo);
/* Now configure all FIFO groups */
for (fifo = 0; fifo < fifo_groups; fifo++) {
cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
if (pko_ptgfx_cfg.s.size != fifo_group_cfg[fifo])
pko_ptgfx_cfg.s.reset = 1;
pko_ptgfx_cfg.s.size = fifo_group_cfg[fifo];
if (fifo_group_spd[fifo] >= 40)
if (pko_ptgfx_cfg.s.size >= 3)
pko_ptgfx_cfg.s.rate = 3; /* 50 Gbps */
else
pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
else if (fifo_group_spd[fifo] >= 20)
pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
else if (fifo_group_spd[fifo] >= 10)
pko_ptgfx_cfg.s.rate = 1; /* 12.5 Gbps */
else
pko_ptgfx_cfg.s.rate = 0; /* 6.25 Gbps */
if (debug)
debug("%s: FIFO %#x-%#x size=%u speed=%d rate=%d\n",
__func__, fifo * 4, fifo * 4 + 3,
pko_ptgfx_cfg.s.size, fifo_group_spd[fifo],
pko_ptgfx_cfg.s.rate);
csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
pko_ptgfx_cfg.s.reset = 0;
csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
}
/* Configure all MACs assigned FIFO number */
for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
cvmx_pko_macx_cfg_t pko_mac_cfg;
if (debug)
debug("%s: mac#%02u: fifo=%#x cnt=%u speed=%d\n",
__func__, mac_num,
cvmx_pko3_mac_table[mac_num].fifo_id,
cvmx_pko3_mac_table[mac_num].fifo_cnt,
cvmx_pko3_mac_table[mac_num].spd);
pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
pko_mac_cfg.s.fifo_num = cvmx_pko3_mac_table[mac_num].fifo_id;
csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
}
/* Setup PKO MCI0/MCI1/SKID credits */
for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
cvmx_pko_mci0_max_credx_t pko_mci0_max_cred;
cvmx_pko_mci1_max_credx_t pko_mci1_max_cred;
cvmx_pko_macx_cfg_t pko_mac_cfg;
unsigned int fifo_credit, mac_credit, skid_credit;
unsigned int pko_fifo_cnt, fifo_size;
unsigned int mac_fifo_cnt;
unsigned int tmp;
int saved_fifo_num;
pko_fifo_cnt = cvmx_pko3_mac_table[mac_num].fifo_cnt;
mac_fifo_cnt = cvmx_pko3_mac_table[mac_num].mac_fifo_cnt;
/* Skip unused MACs */
if (pko_fifo_cnt == 0)
continue;
/* Check for sanity */
if (pko_fifo_cnt > 4)
pko_fifo_cnt = 1;
fifo_size = (2 * 1024) + (1024 / 2); /* 2.5KiB */
fifo_credit = pko_fifo_cnt * fifo_size;
if (mac_num == 0) {
/* loopback */
mac_credit = 4096; /* From HRM Sec 13.0 */
skid_credit = 0;
} else if (mac_num == 1) {
/* DPI */
mac_credit = 2 * 1024;
skid_credit = 0;
} else if (octeon_has_feature(OCTEON_FEATURE_ILK) &&
(mac_num & 0xfe) == 2) {
/* ILK0, ILK1: MAC 2,3 */
mac_credit = 4 * 1024; /* 4KB fifo */
skid_credit = 0;
} else if (octeon_has_feature(OCTEON_FEATURE_SRIO) &&
(mac_num >= 6) && (mac_num <= 9)) {
/* SRIO0, SRIO1: MAC 6..9 */
mac_credit = 1024 / 2;
skid_credit = 0;
} else {
/* BGX */
mac_credit = mac_fifo_cnt * 8 * 1024;
skid_credit = mac_fifo_cnt * 256;
}
if (debug)
debug("%s: mac %u pko_fifo_credit=%u mac_credit=%u\n",
__func__, mac_num, fifo_credit, mac_credit);
tmp = (fifo_credit + mac_credit) / 16;
pko_mci0_max_cred.u64 = 0;
pko_mci0_max_cred.s.max_cred_lim = tmp;
/* Check for overflow */
if (pko_mci0_max_cred.s.max_cred_lim != tmp) {
cvmx_printf("WARNING: %s: MCI0 credit overflow\n",
__func__);
pko_mci0_max_cred.s.max_cred_lim = 0xfff;
}
/* Pass 2 PKO hardware does not use the MCI0 credits */
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
csr_wr_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num),
pko_mci0_max_cred.u64);
/* The original CSR formula is the correct one after all */
tmp = (mac_credit) / 16;
pko_mci1_max_cred.u64 = 0;
pko_mci1_max_cred.s.max_cred_lim = tmp;
/* Check for overflow */
if (pko_mci1_max_cred.s.max_cred_lim != tmp) {
cvmx_printf("WARNING: %s: MCI1 credit overflow\n",
__func__);
pko_mci1_max_cred.s.max_cred_lim = 0xfff;
}
csr_wr_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num),
pko_mci1_max_cred.u64);
tmp = (skid_credit / 256) >> 1; /* valid 0,1,2 */
pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
/* The PKO_MACX_CFG bits cannot be changed unless FIFO_MUM=0x1f (unused fifo) */
saved_fifo_num = pko_mac_cfg.s.fifo_num;
pko_mac_cfg.s.fifo_num = 0x1f;
pko_mac_cfg.s.skid_max_cnt = tmp;
csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
pko_mac_cfg.s.fifo_num = saved_fifo_num;
csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
if (debug) {
pko_mci0_max_cred.u64 =
csr_rd_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num));
pko_mci1_max_cred.u64 =
csr_rd_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num));
pko_mac_cfg.u64 =
csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
debug("%s: mac %u PKO_MCI0_MAX_CREDX=%u PKO_MCI1_MAX_CREDX=%u PKO_MACX_CFG[SKID_MAX_CNT]=%u\n",
__func__, mac_num,
pko_mci0_max_cred.s.max_cred_lim,
pko_mci1_max_cred.s.max_cred_lim,
pko_mac_cfg.s.skid_max_cnt);
}
} /* for mac_num */
return 0;
}
/** Set MAC options
*
* The options supported are the parameters below:
*
* @param xiface The physical interface number
* @param index The physical sub-interface port
* @param fcs_enable Enable FCS generation
* @param pad_enable Enable padding to minimum packet size
* @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
*
* The typical use for `fcs_sop_off` is when the interface is configured
* to use a header such as HighGig to precede every Ethernet packet,
* such a header usually does not partake in the CRC32 computation stream,
* and its size must be set with this parameter.
*
* @return Returns 0 on success, -1 if interface/port is invalid.
*/
int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
bool pad_enable, unsigned int fcs_sop_off)
{
int mac_num;
cvmx_pko_macx_cfg_t pko_mac_cfg;
unsigned int fifo_num;
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
if (debug)
debug("%s: intf %u:%u/%u fcs=%d pad=%d\n", __func__, xi.node,
xi.interface, index, fcs_enable, pad_enable);
mac_num = __cvmx_pko3_get_mac_num(xiface, index);
if (mac_num < 0) {
cvmx_printf("ERROR: %s: invalid interface %u:%u/%u\n", __func__,
xi.node, xi.interface, index);
return -1;
}
pko_mac_cfg.u64 = csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num));
/* If MAC is not assigned, return an error */
if (pko_mac_cfg.s.fifo_num == 0x1f) {
cvmx_printf("ERROR: %s: unused interface %u:%u/%u\n", __func__,
xi.node, xi.interface, index);
return -1;
}
if (pko_mac_cfg.s.min_pad_ena == pad_enable &&
pko_mac_cfg.s.fcs_ena == fcs_enable) {
if (debug)
debug("%s: mac %#x unchanged\n", __func__, mac_num);
return 0;
}
/* WORKAROUND: Pass1 won't allow change any bits unless FIFO_NUM=0x1f */
fifo_num = pko_mac_cfg.s.fifo_num;
pko_mac_cfg.s.fifo_num = 0x1f;
pko_mac_cfg.s.min_pad_ena = pad_enable;
pko_mac_cfg.s.fcs_ena = fcs_enable;
pko_mac_cfg.s.fcs_sop_off = fcs_sop_off;
csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
pko_mac_cfg.s.fifo_num = fifo_num;
csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
if (debug)
debug("%s: PKO_MAC[%u]CFG=%#llx\n", __func__, mac_num,
(unsigned long long)csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num)));
return 0;
}
/** Set Descriptor Queue options
*
* The `min_pad` parameter must be in agreement with the interface-level
* padding option for all descriptor queues assigned to that particular
* interface/port.
*
* @param node on which to operate
* @param dq descriptor queue to set
* @param min_pad minimum padding to set for dq
*/
void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad)
{
cvmx_pko_pdm_dqx_minpad_t reg;
dq &= (1 << 10) - 1;
reg.u64 = csr_rd_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq));
reg.s.minpad = min_pad;
csr_wr_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq), reg.u64);
}

View file

@ -0,0 +1,292 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <mach/cvmx-regs.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[] = {
{ "prbs_err_cnt", 299, 252 }, // prbs_err_cnt[47..0]
{ "prbs_lock", 251, 251 }, // prbs_lock
{ "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
{ "jtg_run_prbs31", 249, 249 }, // jtg_run_prbs31
{ "jtg_run_prbs7", 248, 248 }, // jtg_run_prbs7
{ "Unused1", 247, 245 }, // 0
{ "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
{ "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
{ "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
{ "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
{ "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
{ "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
{ "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
{ "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
{ "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
{ "cfg_loopback", 226, 226 }, // cfg_loopback
{ "shlpbck", 225, 224 }, // shlpbck[1..0]
{ "sl_enable", 223, 223 }, // sl_enable
{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
{ "trimen", 221, 220 }, // trimen[1..0]
{ "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
{ "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
{ "lowf_byp", 217, 217 }, // lowf_byp
{ "spdsel_byp", 216, 216 }, // spdsel_byp
{ "div4_byp", 215, 215 }, // div4_byp
{ "clkf_byp", 214, 208 }, // clkf_byp[6..0]
{ "Unused2", 207, 206 }, // 0
{ "biasdrv_hs_ls_byp", 205, 201 }, // biasdrv_hs_ls_byp[4..0]
{ "tcoeff_hf_ls_byp", 200, 197 }, // tcoeff_hf_ls_byp[3..0]
{ "biasdrv_hf_byp", 196, 192 }, // biasdrv_hf_byp[4..0]
{ "tcoeff_hf_byp", 191, 188 }, // tcoeff_hf_byp[3..0]
{ "Unused3", 187, 186 }, // 0
{ "biasdrv_lf_ls_byp", 185, 181 }, // biasdrv_lf_ls_byp[4..0]
{ "tcoeff_lf_ls_byp", 180, 177 }, // tcoeff_lf_ls_byp[3..0]
{ "biasdrv_lf_byp", 176, 172 }, // biasdrv_lf_byp[4..0]
{ "tcoeff_lf_byp", 171, 168 }, // tcoeff_lf_byp[3..0]
{ "Unused4", 167, 167 }, // 0
{ "interpbw", 166, 162 }, // interpbw[4..0]
{ "pll_cpb", 161, 159 }, // pll_cpb[2..0]
{ "pll_cps", 158, 156 }, // pll_cps[2..0]
{ "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
{ "Unused5", 151, 150 }, // 0
{ "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
{ "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
{ "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
{ "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
{ "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
{ "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
{ "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
{ "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
{ "Unused6", 126, 100 }, // 0
{ "detected_n", 99, 99 }, // detected_n
{ "detected_p", 98, 98 }, // detected_p
{ "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
{ "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
{ "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
{ "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
{ "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
{ "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
{ "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
{ "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
{ "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
{ "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
{ "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
{ "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
{ "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
{ "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
{ "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
{ "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
{ "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
{ "rndt", 50, 50 }, // rndt
{ "hcya", 49, 49 }, // hcya
{ "hyst", 48, 48 }, // hyst
{ "idle_dac", 47, 45 }, // idle_dac[2..0]
{ "bg_ref_sel", 44, 44 }, // bg_ref_sel
{ "ic50dac", 43, 39 }, // ic50dac[4..0]
{ "ir50dac", 38, 34 }, // ir50dac[4..0]
{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
{ "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
{ "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
{ "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
{ "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
{ "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
{ "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
{ "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
{ "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
{ NULL, -1, -1 }
};
const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[] = {
{ "prbs_err_cnt", 303, 256 }, // prbs_err_cnt[47..0]
{ "prbs_lock", 255, 255 }, // prbs_lock
{ "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
{ "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
{ "jtg_prbs_mode", 252, 251 }, // jtg_prbs_mode[252:251]
{ "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
{ "jtg_run_prbs31", 249,
249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
{ "jtg_run_prbs7", 248,
248 }, // jtg_run_prbs7 - Use jtg_prbs_mode instead
{ "Unused1", 247, 246 }, // 0
{ "div5_byp", 245, 245 }, // div5_byp
{ "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
{ "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
{ "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
{ "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
{ "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
{ "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
{ "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
{ "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
{ "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
{ "cfg_loopback", 226, 226 }, // cfg_loopback
{ "shlpbck", 225, 224 }, // shlpbck[1..0]
{ "sl_enable", 223, 223 }, // sl_enable
{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
{ "trimen", 221, 220 }, // trimen[1..0]
{ "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
{ "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
{ "lowf_byp", 217, 217 }, // lowf_byp
{ "spdsel_byp", 216, 216 }, // spdsel_byp
{ "div4_byp", 215, 215 }, // div4_byp
{ "clkf_byp", 214, 208 }, // clkf_byp[6..0]
{ "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
{ "tcoeff_hf_ls_byp", 202, 198 }, // tcoeff_hf_ls_byp[4..0]
{ "biasdrv_hf_byp", 197, 193 }, // biasdrv_hf_byp[4..0]
{ "tcoeff_hf_byp", 192, 188 }, // tcoeff_hf_byp[4..0]
{ "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
{ "tcoeff_lf_ls_byp", 182, 178 }, // tcoeff_lf_ls_byp[4..0]
{ "biasdrv_lf_byp", 177, 173 }, // biasdrv_lf_byp[4..0]
{ "tcoeff_lf_byp", 172, 168 }, // tcoeff_lf_byp[4..0]
{ "Unused4", 167, 167 }, // 0
{ "interpbw", 166, 162 }, // interpbw[4..0]
{ "pll_cpb", 161, 159 }, // pll_cpb[2..0]
{ "pll_cps", 158, 156 }, // pll_cps[2..0]
{ "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
{ "cfg_err_thr", 151, 150 }, // cfg_err_thr
{ "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
{ "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
{ "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
{ "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
{ "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
{ "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
{ "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
{ "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
{ "Unused6", 126, 101 }, // 0
{ "cfg_indep_dis", 100, 100 }, // cfg_indep_dis
{ "detected_n", 99, 99 }, // detected_n
{ "detected_p", 98, 98 }, // detected_p
{ "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
{ "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
{ "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
{ "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
{ "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
{ "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
{ "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
{ "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
{ "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
{ "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
{ "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
{ "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
{ "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
{ "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
{ "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
{ "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
{ "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
{ "rndt", 50, 50 }, // rndt
{ "hcya", 49, 49 }, // hcya
{ "hyst", 48, 48 }, // hyst
{ "idle_dac", 47, 45 }, // idle_dac[2..0]
{ "bg_ref_sel", 44, 44 }, // bg_ref_sel
{ "ic50dac", 43, 39 }, // ic50dac[4..0]
{ "ir50dac", 38, 34 }, // ir50dac[4..0]
{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
{ "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
{ "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
{ "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
{ "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
{ "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
{ "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
{ "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
{ "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
{ NULL, -1, -1 }
};
const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[] = {
{ "prbs_err_cnt", 303, 256 }, // prbs_err_cnt[47..0]
{ "prbs_lock", 255, 255 }, // prbs_lock
{ "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
{ "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
{ "jtg_prbs_mode", 252, 251 }, // jtg_prbs_mode[252:251]
{ "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
{ "jtg_run_prbs31", 249,
249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
{ "jtg_run_prbs7", 248,
248 }, // jtg_run_prbs7 - Use jtg_prbs_mode instead
{ "Unused1", 247, 245 }, // 0
{ "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
{ "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
{ "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
{ "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
{ "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
{ "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
{ "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
{ "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
{ "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
{ "cfg_loopback", 226, 226 }, // cfg_loopback
{ "shlpbck", 225, 224 }, // shlpbck[1..0]
{ "sl_enable", 223, 223 }, // sl_enable
{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
{ "trimen", 221, 220 }, // trimen[1..0]
{ "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
{ "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
{ "lowf_byp", 217, 217 }, // lowf_byp
{ "spdsel_byp", 216, 216 }, // spdsel_byp
{ "div4_byp", 215, 215 }, // div4_byp
{ "clkf_byp", 214, 208 }, // clkf_byp[6..0]
{ "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
{ "tcoeff_hf_ls_byp", 202, 198 }, // tcoeff_hf_ls_byp[4..0]
{ "biasdrv_hf_byp", 197, 193 }, // biasdrv_hf_byp[4..0]
{ "tcoeff_hf_byp", 192, 188 }, // tcoeff_hf_byp[4..0]
{ "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
{ "tcoeff_lf_ls_byp", 182, 178 }, // tcoeff_lf_ls_byp[4..0]
{ "biasdrv_lf_byp", 177, 173 }, // biasdrv_lf_byp[4..0]
{ "tcoeff_lf_byp", 172, 168 }, // tcoeff_lf_byp[4..0]
{ "Unused4", 167, 167 }, // 0
{ "interpbw", 166, 162 }, // interpbw[4..0]
{ "pll_cpb", 161, 159 }, // pll_cpb[2..0]
{ "pll_cps", 158, 156 }, // pll_cps[2..0]
{ "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
{ "cfg_err_thr", 151, 150 }, // cfg_err_thr
{ "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
{ "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
{ "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
{ "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
{ "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
{ "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
{ "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
{ "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
{ "Unused6", 126, 100 }, // 0
{ "detected_n", 99, 99 }, // detected_n
{ "detected_p", 98, 98 }, // detected_p
{ "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
{ "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
{ "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
{ "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
{ "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
{ "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
{ "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
{ "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
{ "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
{ "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
{ "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
{ "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
{ "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
{ "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
{ "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
{ "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
{ "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
{ "rndt", 50, 50 }, // rndt
{ "hcya", 49, 49 }, // hcya
{ "hyst", 48, 48 }, // hyst
{ "idle_dac", 47, 45 }, // idle_dac[2..0]
{ "bg_ref_sel", 44, 44 }, // bg_ref_sel
{ "ic50dac", 43, 39 }, // ic50dac[4..0]
{ "ir50dac", 38, 34 }, // ir50dac[4..0]
{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
{ "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
{ "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
{ "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
{ "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
{ "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
{ "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
{ "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
{ "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
{ NULL, -1, -1 }
};

View file

@ -0,0 +1,259 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <log.h>
#include <time.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-global-resources.h>
#include <mach/cvmx-pki.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-range.h>
#define CVMX_RANGE_AVAILABLE ((u64)-88)
#define addr_of_element(base, index) \
(1ull << 63 | ((base) + sizeof(u64) + (index) * sizeof(u64)))
#define addr_of_size(base) (1ull << 63 | (base))
static const int debug;
int cvmx_range_memory_size(int nelements)
{
return sizeof(u64) * (nelements + 1);
}
int cvmx_range_init(u64 range_addr, int size)
{
u64 lsize = size;
u64 i;
cvmx_write64_uint64(addr_of_size(range_addr), lsize);
for (i = 0; i < lsize; i++) {
cvmx_write64_uint64(addr_of_element(range_addr, i),
CVMX_RANGE_AVAILABLE);
}
return 0;
}
static int64_t cvmx_range_find_next_available(u64 range_addr, u64 index,
int align)
{
u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
u64 i;
while ((index % align) != 0)
index++;
for (i = index; i < size; i += align) {
u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
if (debug)
debug("%s: index=%d owner=%llx\n", __func__, (int)i,
(unsigned long long)r_owner);
if (r_owner == CVMX_RANGE_AVAILABLE)
return i;
}
return -1;
}
static int64_t cvmx_range_find_last_available(u64 range_addr, u64 index,
u64 align)
{
u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
u64 i;
if (index == 0)
index = size - 1;
while ((index % align) != 0)
index++;
for (i = index; i > align; i -= align) {
u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
if (debug)
debug("%s: index=%d owner=%llx\n", __func__, (int)i,
(unsigned long long)r_owner);
if (r_owner == CVMX_RANGE_AVAILABLE)
return i;
}
return -1;
}
int cvmx_range_alloc_ordered(u64 range_addr, u64 owner, u64 cnt,
int align, int reverse)
{
u64 i = 0, size;
s64 first_available;
if (debug)
debug("%s: range_addr=%llx owner=%llx cnt=%d\n", __func__,
(unsigned long long)range_addr,
(unsigned long long)owner, (int)cnt);
size = cvmx_read64_uint64(addr_of_size(range_addr));
while (i < size) {
u64 available_cnt = 0;
if (reverse)
first_available = cvmx_range_find_last_available(range_addr, i, align);
else
first_available = cvmx_range_find_next_available(range_addr, i, align);
if (first_available == -1)
return -1;
i = first_available;
if (debug)
debug("%s: first_available=%d\n", __func__, (int)first_available);
while ((available_cnt != cnt) && (i < size)) {
u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
if (r_owner == CVMX_RANGE_AVAILABLE)
available_cnt++;
i++;
}
if (available_cnt == cnt) {
u64 j;
if (debug)
debug("%s: first_available=%d available=%d\n",
__func__,
(int)first_available, (int)available_cnt);
for (j = first_available; j < first_available + cnt;
j++) {
u64 a = addr_of_element(range_addr, j);
cvmx_write64_uint64(a, owner);
}
return first_available;
}
}
if (debug) {
debug("ERROR: %s: failed to allocate range cnt=%d\n",
__func__, (int)cnt);
cvmx_range_show(range_addr);
}
return -1;
}
int cvmx_range_alloc(u64 range_addr, u64 owner, u64 cnt, int align)
{
return cvmx_range_alloc_ordered(range_addr, owner, cnt, align, 0);
}
int cvmx_range_reserve(u64 range_addr, u64 owner, u64 base,
u64 cnt)
{
u64 i, size, r_owner;
u64 up = base + cnt;
size = cvmx_read64_uint64(addr_of_size(range_addr));
if (up > size) {
debug("ERROR: %s: invalid base or cnt. range_addr=0x%llx, owner=0x%llx, size=%d base+cnt=%d\n",
__func__, (unsigned long long)range_addr,
(unsigned long long)owner,
(int)size, (int)up);
return -1;
}
for (i = base; i < up; i++) {
r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
if (debug)
debug("%s: %d: %llx\n",
__func__, (int)i, (unsigned long long)r_owner);
if (r_owner != CVMX_RANGE_AVAILABLE) {
if (debug) {
debug("%s: resource already reserved base+cnt=%d %llu %llu %llx %llx %llx\n",
__func__, (int)i, (unsigned long long)cnt,
(unsigned long long)base,
(unsigned long long)r_owner,
(unsigned long long)range_addr,
(unsigned long long)owner);
}
return -1;
}
}
for (i = base; i < up; i++)
cvmx_write64_uint64(addr_of_element(range_addr, i), owner);
return base;
}
int __cvmx_range_is_allocated(u64 range_addr, int bases[], int count)
{
u64 i, cnt, size;
u64 r_owner;
cnt = count;
size = cvmx_read64_uint64(addr_of_size(range_addr));
for (i = 0; i < cnt; i++) {
u64 base = bases[i];
if (base >= size) {
debug("ERROR: %s: invalid base or cnt size=%d base=%d\n",
__func__, (int)size, (int)base);
return 0;
}
r_owner = cvmx_read64_uint64(addr_of_element(range_addr, base));
if (r_owner == CVMX_RANGE_AVAILABLE) {
if (debug) {
debug("%s: i=%d:base=%d is available\n",
__func__, (int)i, (int)base);
}
return 0;
}
}
return 1;
}
int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count)
{
u64 i, cnt;
cnt = count;
if (__cvmx_range_is_allocated(range_addr, bases, count) != 1)
return -1;
for (i = 0; i < cnt; i++) {
u64 base = bases[i];
cvmx_write64_uint64(addr_of_element(range_addr, base),
CVMX_RANGE_AVAILABLE);
}
return 0;
}
int cvmx_range_free_with_base(u64 range_addr, int base, int cnt)
{
u64 i, size;
u64 up = base + cnt;
size = cvmx_read64_uint64(addr_of_size(range_addr));
if (up > size) {
debug("ERROR: %s: invalid base or cnt size=%d base+cnt=%d\n",
__func__, (int)size, (int)up);
return -1;
}
for (i = base; i < up; i++) {
cvmx_write64_uint64(addr_of_element(range_addr, i),
CVMX_RANGE_AVAILABLE);
}
return 0;
}

View file

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Functions for AGL (RGMII) commong initialization, configuration.
*/
#ifndef __CVMX_AGL_H__
#define __CVMX_AGL_H__
/*
* @param port to enable
*
* @return Zero on success, negative on failure
*/
int cvmx_agl_enable(int port);
cvmx_helper_link_info_t cvmx_agl_link_get(int port);
/*
* Set MII/RGMII link based on mode.
*
* @param port interface port to set the link.
* @param link_info Link status
*
* @return 0 on success and 1 on failure
*/
int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info);
/**
* Disables the sending of flow control (pause) frames on the specified
* AGL (RGMII) port(s).
*
* @param interface Which interface (0 or 1)
* @param port_mask Mask (4bits) of which ports on the interface to disable
* backpressure on.
* 1 => disable backpressure
* 0 => enable backpressure
*
* @return 0 on success
* -1 on error
*/
int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask);
#endif /* __CVMX_AGL_H__ */

View file

@ -26,7 +26,8 @@
/* Real physical addresses of memory regions */
#define OCTEON_DDR0_BASE (0x0ULL)
#define OCTEON_DDR0_SIZE (0x010000000ULL)
/* Use 16MiB here, as 256 leads to overwriting U-Boot reloc space */
#define OCTEON_DDR0_SIZE (0x001000000ULL)
#define OCTEON_DDR1_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
? 0x20000000ULL : 0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)

View file

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#ifndef __CVMX_CONFIG_H__
#define __CVMX_CONFIG_H__
/************************* Config Specific Defines ************************/
#define CVMX_LLM_NUM_PORTS 1
/**< PKO queues per port for interface 0 (ports 0-15) */
#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
/**< PKO queues per port for interface 1 (ports 16-31) */
#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
/**< PKO queues per port for interface 4 (AGL) */
#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE4 1
/**< Limit on the number of PKO ports enabled for interface 0 */
#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
/**< Limit on the number of PKO ports enabled for interface 1 */
#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
/**< PKO queues per port for PCI (ports 32-35) */
#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
/**< PKO queues per port for Loop devices (ports 36-39) */
#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
/**< PKO queues per port for SRIO0 devices (ports 40-41) */
#define CVMX_PKO_QUEUES_PER_PORT_SRIO0 1
/**< PKO queues per port for SRIO1 devices (ports 42-43) */
#define CVMX_PKO_QUEUES_PER_PORT_SRIO1 1
/************************* FPA allocation *********************************/
/* Pool sizes in bytes, must be multiple of a cache line */
#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_3_SIZE (2 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_6_SIZE (8 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
/* Pools in use */
/**< Packet buffers */
#define CVMX_FPA_PACKET_POOL (0)
#ifndef CVMX_FPA_PACKET_POOL_SIZE
#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
#endif
/**< Work queue entries */
#define CVMX_FPA_WQE_POOL (1)
#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/**< PKO queue command buffers */
#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
/**< BCH queue command buffers */
#define CVMX_FPA_BCH_POOL (6)
#define CVMX_FPA_BCH_POOL_SIZE CVMX_FPA_POOL6_SIZE
/************************* FAU allocation ********************************/
/* The fetch and add registers are allocated here. They are arranged
* in order of descending size so that all alignment constraints are
* automatically met.
* The enums are linked so that the following enum continues allocating
* where the previous one left off, so the numbering within each
* enum always starts with zero. The macros take care of the address
* increment size, so the values entered always increase by 1.
* FAU registers are accessed with byte addresses.
*/
#define CVMX_FAU_REG_64_ADDR(x) (((x) << 3) + CVMX_FAU_REG_64_START)
typedef enum {
CVMX_FAU_REG_64_START = 0,
/**< FAU registers for the position in PKO command buffers */
CVMX_FAU_REG_OQ_ADDR_INDEX = CVMX_FAU_REG_64_ADDR(0),
/* Array of 36 */
CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(36),
} cvmx_fau_reg_64_t;
#define CVMX_FAU_REG_32_ADDR(x) (((x) << 2) + CVMX_FAU_REG_32_START)
typedef enum {
CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
} cvmx_fau_reg_32_t;
#define CVMX_FAU_REG_16_ADDR(x) (((x) << 1) + CVMX_FAU_REG_16_START)
typedef enum {
CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
} cvmx_fau_reg_16_t;
#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
typedef enum {
CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
} cvmx_fau_reg_8_t;
/* The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first available
* FAU address that is not allocated in cvmx-config.h. This is 64 bit aligned.
*/
#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
#define CVMX_FAU_REG_END (2048)
/********************** scratch memory allocation *************************/
/* Scratchpad memory allocation. Note that these are byte memory addresses.
* Some uses of scratchpad (IOBDMA for example) require the use of 8-byte
* aligned addresses, so proper alignment needs to be taken into account.
*/
/**< Pre allocation for PKO queue command buffers */
#define CVMX_SCR_OQ_BUF_PRE_ALLOC (0)
/**< Generic scratch iobdma area */
#define CVMX_SCR_SCRATCH (8)
/**< First location available after cvmx-config.h allocated region. */
#define CVMX_SCR_REG_AVAIL_BASE (16)
#endif /* __CVMX_CONFIG_H__ */

View file

@ -0,0 +1,581 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Interface to the hardware Fetch and Add Unit.
*/
#ifndef __CVMX_FAU_H__
#define __CVMX_FAU_H__
extern u8 *cvmx_fau_regs_ptr;
/**
* Initializes fau, on devices with FAU hw this is a noop.
*/
int cvmx_fau_init(void);
/**
* Return the location of emulated FAU register
*/
static inline u8 *__cvmx_fau_sw_addr(int reg)
{
if (cvmx_unlikely(!cvmx_fau_regs_ptr))
cvmx_fau_init();
return (cvmx_fau_regs_ptr + reg);
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg64_t reg,
int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_fetch_and_add64(reg, value);
return __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg32_t reg,
int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_fetch_and_add32(reg, value);
reg ^= SWIZZLE_32;
return __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg16_t reg,
int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_fetch_and_add16(reg, value);
reg ^= SWIZZLE_16;
return __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_fetch_and_add8(reg, value);
reg ^= SWIZZLE_8;
return __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 64 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg, int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_tagwait_fetch_and_add64(reg, value);
/* not implemented yet.*/
return (cvmx_fau_tagwait64_t){ 1, 0 };
}
/**
* Perform an atomic 32 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg, int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_tagwait_fetch_and_add32(reg, value);
/* not implemented yet.*/
return (cvmx_fau_tagwait32_t){ 1, 0 };
}
/**
* Perform an atomic 16 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg, int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_tagwait_fetch_and_add16(reg, value);
/* not implemented yet.*/
return (cvmx_fau_tagwait16_t){ 1, 0 };
}
/**
* Perform an atomic 8 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU))
return cvmx_hwfau_tagwait_fetch_and_add8(reg, value);
/* not implemented yet.*/
return (cvmx_fau_tagwait8_t){ 1, 0 };
}
/**
* Perform an async atomic 64 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void
cvmx_fau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_fetch_and_add64(scraddr, reg, value);
return;
}
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 32 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void
cvmx_fau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_fetch_and_add32(scraddr, reg, value);
return;
}
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 16 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void
cvmx_fau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_fetch_and_add16(scraddr, reg, value);
return;
}
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 8 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void
cvmx_fau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_fetch_and_add8(scraddr, reg, value);
return;
}
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 64 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(u64 scraddr,
cvmx_fau_reg64_t reg,
int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_tagwait_fetch_and_add64(scraddr, reg, value);
return;
}
/* Broken. Where is the tag wait? */
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 32 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(u64 scraddr,
cvmx_fau_reg32_t reg,
int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_tagwait_fetch_and_add32(scraddr, reg, value);
return;
}
/* Broken. Where is the tag wait? */
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 16 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add16(u64 scraddr,
cvmx_fau_reg16_t reg,
int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_tagwait_fetch_and_add16(scraddr, reg, value);
return;
}
/* Broken. Where is the tag wait? */
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an async atomic 8 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add8(u64 scraddr,
cvmx_fau_reg8_t reg,
int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_async_tagwait_fetch_and_add8(scraddr, reg, value);
return;
}
/* Broken. Where is the tag wait? */
cvmx_scratch_write64(
scraddr,
__atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
value, __ATOMIC_SEQ_CST));
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg64_t reg, int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_add64(reg, value);
return;
}
/* Ignored fetch values should be optimized away */
__atomic_add_fetch(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg32_t reg, int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_add32(reg, value);
return;
}
reg ^= SWIZZLE_32;
/* Ignored fetch values should be optimized away */
__atomic_add_fetch(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg16_t reg, int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_add16(reg, value);
return;
}
reg ^= SWIZZLE_16;
/* Ignored fetch values should be optimized away */
__atomic_add_fetch(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_add8(reg, value);
return;
}
reg ^= SWIZZLE_8;
/* Ignored fetch values should be optimized away */
__atomic_add_fetch(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 64 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg64_t reg, int64_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_write64(reg, value);
return;
}
__atomic_store_n(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 32 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg32_t reg, int32_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_write32(reg, value);
return;
}
reg ^= SWIZZLE_32;
__atomic_store_n(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 16 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg16_t reg, int16_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_write16(reg, value);
return;
}
reg ^= SWIZZLE_16;
__atomic_store_n(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/**
* Perform an atomic 8 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
{
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
cvmx_hwfau_atomic_write8(reg, value);
return;
}
reg ^= SWIZZLE_8;
__atomic_store_n(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
__ATOMIC_SEQ_CST);
}
/** Allocates 64bit FAU register.
* @param reserve base address to reserve
* @return value is the base address of allocated FAU register
*/
int cvmx_fau64_alloc(int reserve);
/** Allocates 32bit FAU register.
* @param reserve base address to reserve
* @return value is the base address of allocated FAU register
*/
int cvmx_fau32_alloc(int reserve);
/** Allocates 16bit FAU register.
* @param reserve base address to reserve
* @return value is the base address of allocated FAU register
*/
int cvmx_fau16_alloc(int reserve);
/** Allocates 8bit FAU register.
* @param reserve base address to reserve
* @return value is the base address of allocated FAU register
*/
int cvmx_fau8_alloc(int reserve);
/** Frees the specified FAU register.
* @param address base address of register to release.
* @return 0 on success; -1 on failure
*/
int cvmx_fau_free(int address);
/** Display the fau registers array
*/
void cvmx_fau_show(void);
#endif /* __CVMX_FAU_H__ */

View file

@ -104,8 +104,9 @@ static inline void *cvmx_fpa_alloc(u64 pool)
/* FPA3 is handled differently */
if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
return cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool));
} else
} else {
return cvmx_fpa1_alloc(pool);
}
}
/**

View file

@ -526,41 +526,4 @@ const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);
int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
/* FIXME: Need a different macro for stage2 of u-boot */
static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
int buffer_sz, int buf_cnt)
{
cvmx_fpa_poolx_cfg_t pool_cfg;
/* Configure pool stack */
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
/* Configure pool with buffer size */
pool_cfg.u64 = 0;
pool_cfg.cn78xx.nat_align = 1;
pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
pool_cfg.cn78xx.l_type = 0x2;
pool_cfg.cn78xx.ena = 0;
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
/* Reset pool before starting */
pool_cfg.cn78xx.ena = 1;
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
}
static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
{
cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
}
#endif /* __CVMX_FPA3_H__ */

View file

@ -9,6 +9,8 @@
#ifndef __CVMX_HELPER_BOARD_H__
#define __CVMX_HELPER_BOARD_H__
#include <asm-generic/gpio.h>
#define CVMX_VSC7224_NAME_LEN 16
typedef enum {
@ -185,8 +187,8 @@ struct cvmx_vsc7224 {
struct cvmx_fdt_i2c_bus_info *i2c_bus;
/** Address of VSC7224 on i2c bus */
int i2c_addr;
struct cvmx_fdt_gpio_info *los_gpio; /** LoS GPIO pin */
struct cvmx_fdt_gpio_info *reset_gpio; /** Reset GPIO pin */
struct gpio_desc los_gpio; /** LoS GPIO pin */
struct gpio_desc reset_gpio; /** Reset GPIO pin */
int of_offset; /** Offset in device tree */
};

View file

@ -14,10 +14,13 @@
#include <fdtdec.h>
#include <time.h>
#include <asm/global_data.h>
#include <asm-generic/gpio.h>
#include <dm/device.h>
#include <linux/libfdt.h>
#include <mach/cvmx-helper-sfp.h>
/* todo: this is deprecated and some of it can be removed at some time */
enum cvmx_i2c_bus_type {
CVMX_I2C_BUS_OCTEON,
CVMX_I2C_MUX_PCA9540,
@ -55,6 +58,8 @@ struct cvmx_fdt_i2c_bus_info {
u8 enable_bit;
/** True if mux, false if switch */
bool is_mux;
struct udevice *i2c_bus;
};
/**
@ -85,22 +90,24 @@ struct cvmx_fdt_sfp_info {
bool is_qsfp;
/** True if EEPROM data is valid */
bool valid;
/** SFP tx_disable GPIO descriptor */
struct cvmx_fdt_gpio_info *tx_disable;
struct gpio_desc tx_disable;
/** SFP mod_abs/QSFP mod_prs GPIO descriptor */
struct cvmx_fdt_gpio_info *mod_abs;
struct gpio_desc mod_abs;
/** SFP tx_error GPIO descriptor */
struct cvmx_fdt_gpio_info *tx_error;
struct gpio_desc tx_error;
/** SFP rx_los GPIO discriptor */
struct cvmx_fdt_gpio_info *rx_los;
struct gpio_desc rx_los;
/** QSFP select GPIO descriptor */
struct cvmx_fdt_gpio_info *select;
struct gpio_desc select;
/** QSFP reset GPIO descriptor */
struct cvmx_fdt_gpio_info *reset;
struct gpio_desc reset;
/** QSFP interrupt GPIO descriptor */
struct cvmx_fdt_gpio_info *interrupt;
struct gpio_desc interrupt;
/** QSFP lp_mode GPIO descriptor */
struct cvmx_fdt_gpio_info *lp_mode;
struct gpio_desc lp_mode;
/** Last mod_abs value */
int last_mod_abs;
/** Last rx_los value */
@ -146,6 +153,9 @@ struct cvmx_fdt_sfp_info {
int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node, const char *prop_name, int *lenp,
int *nodes);
int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name,
int *lenp, ofnode *nodes);
/**
* Helper to return the address property
*
@ -341,8 +351,7 @@ int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffse
* Given the parent offset of an i2c device build up a list describing the bus
* which can contain i2c muxes and switches.
*
* @param[in] fdt_addr address of device tree
* @param of_offset Offset of the parent node of a GPIO device in
* @param[in] node ofnode of the parent node of a GPIO device in
* the device tree.
*
* Return: pointer to list of i2c devices starting from the root which
@ -351,7 +360,7 @@ int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffse
*
* @see cvmx_fdt_free_i2c_bus()
*/
struct cvmx_fdt_i2c_bus_info *cvmx_fdt_get_i2c_bus(const void *fdt_addr, int of_offset);
struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node);
/**
* Return the Octeon bus number for a bus descriptor
@ -496,15 +505,6 @@ int __cvmx_fdt_parse_vsc7224(const void *fdt_addr);
*/
int __cvmx_fdt_parse_avsp5410(const void *fdt_addr);
/**
* Parse SFP information from device tree
*
* @param[in] fdt_addr Address of flat device tree
*
* Return: pointer to sfp info or NULL if error
*/
struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset);
/**
* @INTERNAL
* Parses either a CS4343 phy or a slice of the phy from the device tree

View file

@ -17,7 +17,7 @@
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
extern void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
/**
* Gets the fpa pool number of pko pool

View file

@ -127,6 +127,26 @@ enum cvmx_pko_padding {
CVMX_PKO_PADDING_60 = 1,
};
/**
* cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
* It is meant to allow customization of interfaces which do not have a PHY.
*
* @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
*
* If this function pointer is NULL then it defaults to the MAC.
*/
extern int (*cvmx_override_iface_phy_mode) (int interface, int index);
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD port/port kind
* setup before packet input/output comes online. It is called
* after cvmx-helper does the default IPD configuration, but
* before IPD is enabled. Users should set this pointer to a
* function before calling any cvmx-helper operations.
*/
extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon lbk.
*/
#ifndef __CVMX_LBK_DEFS_H__
#define __CVMX_LBK_DEFS_H__
#define CVMX_LBK_BIST_RESULT (0x0001180012000020ull)
#define CVMX_LBK_CHX_PKIND(offset) (0x0001180012000200ull + ((offset) & 63) * 8)
#define CVMX_LBK_CLK_GATE_CTL (0x0001180012000008ull)
#define CVMX_LBK_DAT_ERR_INFO (0x0001180012000050ull)
#define CVMX_LBK_ECC_CFG (0x0001180012000060ull)
#define CVMX_LBK_INT (0x0001180012000040ull)
#define CVMX_LBK_SFT_RST (0x0001180012000000ull)
/**
* cvmx_lbk_bist_result
*
* This register provides access to the internal BIST results. Each bit is the
* BIST result of an individual memory (per bit, 0 = pass and 1 = fail).
*/
union cvmx_lbk_bist_result {
u64 u64;
struct cvmx_lbk_bist_result_s {
u64 reserved_1_63 : 63;
u64 dat : 1;
} s;
struct cvmx_lbk_bist_result_s cn73xx;
struct cvmx_lbk_bist_result_s cn78xx;
struct cvmx_lbk_bist_result_s cn78xxp1;
struct cvmx_lbk_bist_result_s cnf75xx;
};
typedef union cvmx_lbk_bist_result cvmx_lbk_bist_result_t;
/**
* cvmx_lbk_ch#_pkind
*/
union cvmx_lbk_chx_pkind {
u64 u64;
struct cvmx_lbk_chx_pkind_s {
u64 reserved_6_63 : 58;
u64 pkind : 6;
} s;
struct cvmx_lbk_chx_pkind_s cn73xx;
struct cvmx_lbk_chx_pkind_s cn78xx;
struct cvmx_lbk_chx_pkind_s cn78xxp1;
struct cvmx_lbk_chx_pkind_s cnf75xx;
};
typedef union cvmx_lbk_chx_pkind cvmx_lbk_chx_pkind_t;
/**
* cvmx_lbk_clk_gate_ctl
*
* This register is for diagnostic use only.
*
*/
union cvmx_lbk_clk_gate_ctl {
u64 u64;
struct cvmx_lbk_clk_gate_ctl_s {
u64 reserved_1_63 : 63;
u64 dis : 1;
} s;
struct cvmx_lbk_clk_gate_ctl_s cn73xx;
struct cvmx_lbk_clk_gate_ctl_s cn78xx;
struct cvmx_lbk_clk_gate_ctl_s cn78xxp1;
struct cvmx_lbk_clk_gate_ctl_s cnf75xx;
};
typedef union cvmx_lbk_clk_gate_ctl cvmx_lbk_clk_gate_ctl_t;
/**
* cvmx_lbk_dat_err_info
*/
union cvmx_lbk_dat_err_info {
u64 u64;
struct cvmx_lbk_dat_err_info_s {
u64 reserved_58_63 : 6;
u64 dbe_ecc_out : 9;
u64 dbe_synd : 9;
u64 dbe_addr : 8;
u64 reserved_26_31 : 6;
u64 sbe_ecc_out : 9;
u64 sbe_synd : 9;
u64 sbe_addr : 8;
} s;
struct cvmx_lbk_dat_err_info_s cn73xx;
struct cvmx_lbk_dat_err_info_s cn78xx;
struct cvmx_lbk_dat_err_info_s cn78xxp1;
struct cvmx_lbk_dat_err_info_s cnf75xx;
};
typedef union cvmx_lbk_dat_err_info cvmx_lbk_dat_err_info_t;
/**
* cvmx_lbk_ecc_cfg
*/
union cvmx_lbk_ecc_cfg {
u64 u64;
struct cvmx_lbk_ecc_cfg_s {
u64 reserved_3_63 : 61;
u64 dat_flip : 2;
u64 dat_cdis : 1;
} s;
struct cvmx_lbk_ecc_cfg_s cn73xx;
struct cvmx_lbk_ecc_cfg_s cn78xx;
struct cvmx_lbk_ecc_cfg_s cn78xxp1;
struct cvmx_lbk_ecc_cfg_s cnf75xx;
};
typedef union cvmx_lbk_ecc_cfg cvmx_lbk_ecc_cfg_t;
/**
* cvmx_lbk_int
*/
union cvmx_lbk_int {
u64 u64;
struct cvmx_lbk_int_s {
u64 reserved_6_63 : 58;
u64 chan_oflow : 1;
u64 chan_uflow : 1;
u64 dat_oflow : 1;
u64 dat_uflow : 1;
u64 dat_dbe : 1;
u64 dat_sbe : 1;
} s;
struct cvmx_lbk_int_s cn73xx;
struct cvmx_lbk_int_s cn78xx;
struct cvmx_lbk_int_s cn78xxp1;
struct cvmx_lbk_int_s cnf75xx;
};
typedef union cvmx_lbk_int cvmx_lbk_int_t;
/**
* cvmx_lbk_sft_rst
*/
union cvmx_lbk_sft_rst {
u64 u64;
struct cvmx_lbk_sft_rst_s {
u64 reserved_1_63 : 63;
u64 reset : 1;
} s;
struct cvmx_lbk_sft_rst_s cn73xx;
struct cvmx_lbk_sft_rst_s cn78xx;
struct cvmx_lbk_sft_rst_s cn78xxp1;
struct cvmx_lbk_sft_rst_s cnf75xx;
};
typedef union cvmx_lbk_sft_rst cvmx_lbk_sft_rst_t;
#endif

View file

@ -0,0 +1,516 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
* clause 22 and clause 45 operations.
*/
#ifndef __CVMX_MIO_H__
#define __CVMX_MIO_H__
/**
* PHY register 0 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_CONTROL 0
typedef union {
u16 u16;
struct {
u16 reset : 1;
u16 loopback : 1;
u16 speed_lsb : 1;
u16 autoneg_enable : 1;
u16 power_down : 1;
u16 isolate : 1;
u16 restart_autoneg : 1;
u16 duplex : 1;
u16 collision_test : 1;
u16 speed_msb : 1;
u16 unidirectional_enable : 1;
u16 reserved_0_4 : 5;
} s;
} cvmx_mdio_phy_reg_control_t;
/**
* PHY register 1 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_STATUS 1
typedef union {
u16 u16;
struct {
u16 capable_100base_t4 : 1;
u16 capable_100base_x_full : 1;
u16 capable_100base_x_half : 1;
u16 capable_10_full : 1;
u16 capable_10_half : 1;
u16 capable_100base_t2_full : 1;
u16 capable_100base_t2_half : 1;
u16 capable_extended_status : 1;
u16 capable_unidirectional : 1;
u16 capable_mf_preamble_suppression : 1;
u16 autoneg_complete : 1;
u16 remote_fault : 1;
u16 capable_autoneg : 1;
u16 link_status : 1;
u16 jabber_detect : 1;
u16 capable_extended_registers : 1;
} s;
} cvmx_mdio_phy_reg_status_t;
/**
* PHY register 2 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_ID1 2
typedef union {
u16 u16;
struct {
u16 oui_bits_3_18;
} s;
} cvmx_mdio_phy_reg_id1_t;
/**
* PHY register 3 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_ID2 3
typedef union {
u16 u16;
struct {
u16 oui_bits_19_24 : 6;
u16 model : 6;
u16 revision : 4;
} s;
} cvmx_mdio_phy_reg_id2_t;
/**
* PHY register 4 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
typedef union {
u16 u16;
struct {
u16 next_page : 1;
u16 reserved_14 : 1;
u16 remote_fault : 1;
u16 reserved_12 : 1;
u16 asymmetric_pause : 1;
u16 pause : 1;
u16 advert_100base_t4 : 1;
u16 advert_100base_tx_full : 1;
u16 advert_100base_tx_half : 1;
u16 advert_10base_tx_full : 1;
u16 advert_10base_tx_half : 1;
u16 selector : 5;
} s;
} cvmx_mdio_phy_reg_autoneg_adver_t;
/**
* PHY register 5 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
typedef union {
u16 u16;
struct {
u16 next_page : 1;
u16 ack : 1;
u16 remote_fault : 1;
u16 reserved_12 : 1;
u16 asymmetric_pause : 1;
u16 pause : 1;
u16 advert_100base_t4 : 1;
u16 advert_100base_tx_full : 1;
u16 advert_100base_tx_half : 1;
u16 advert_10base_tx_full : 1;
u16 advert_10base_tx_half : 1;
u16 selector : 5;
} s;
} cvmx_mdio_phy_reg_link_partner_ability_t;
/**
* PHY register 6 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
typedef union {
u16 u16;
struct {
u16 reserved_5_15 : 11;
u16 parallel_detection_fault : 1;
u16 link_partner_next_page_capable : 1;
u16 local_next_page_capable : 1;
u16 page_received : 1;
u16 link_partner_autoneg_capable : 1;
} s;
} cvmx_mdio_phy_reg_autoneg_expansion_t;
/**
* PHY register 9 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
typedef union {
u16 u16;
struct {
u16 test_mode : 3;
u16 manual_master_slave : 1;
u16 master : 1;
u16 port_type : 1;
u16 advert_1000base_t_full : 1;
u16 advert_1000base_t_half : 1;
u16 reserved_0_7 : 8;
} s;
} cvmx_mdio_phy_reg_control_1000_t;
/**
* PHY register 10 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_STATUS_1000 10
typedef union {
u16 u16;
struct {
u16 master_slave_fault : 1;
u16 is_master : 1;
u16 local_receiver_ok : 1;
u16 remote_receiver_ok : 1;
u16 remote_capable_1000base_t_full : 1;
u16 remote_capable_1000base_t_half : 1;
u16 reserved_8_9 : 2;
u16 idle_error_count : 8;
} s;
} cvmx_mdio_phy_reg_status_1000_t;
/**
* PHY register 15 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
typedef union {
u16 u16;
struct {
u16 capable_1000base_x_full : 1;
u16 capable_1000base_x_half : 1;
u16 capable_1000base_t_full : 1;
u16 capable_1000base_t_half : 1;
u16 reserved_0_11 : 12;
} s;
} cvmx_mdio_phy_reg_extended_status_t;
/**
* PHY register 13 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
typedef union {
u16 u16;
struct {
u16 function : 2;
u16 reserved_5_13 : 9;
u16 devad : 5;
} s;
} cvmx_mdio_phy_reg_mmd_control_t;
/**
* PHY register 14 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
typedef union {
u16 u16;
struct {
u16 address_data : 16;
} s;
} cvmx_mdio_phy_reg_mmd_address_data_t;
/* Operating request encodings. */
#define MDIO_CLAUSE_22_WRITE 0
#define MDIO_CLAUSE_22_READ 1
#define MDIO_CLAUSE_45_ADDRESS 0
#define MDIO_CLAUSE_45_WRITE 1
#define MDIO_CLAUSE_45_READ_INC 2
#define MDIO_CLAUSE_45_READ 3
/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
#define CVMX_MMD_DEVICE_PMA_PMD 1
#define CVMX_MMD_DEVICE_WIS 2
#define CVMX_MMD_DEVICE_PCS 3
#define CVMX_MMD_DEVICE_PHY_XS 4
#define CVMX_MMD_DEVICE_DTS_XS 5
#define CVMX_MMD_DEVICE_TC 6
#define CVMX_MMD_DEVICE_CL22_EXT 29
#define CVMX_MMD_DEVICE_VENDOR_1 30
#define CVMX_MMD_DEVICE_VENDOR_2 31
#define CVMX_MDIO_TIMEOUT 100000 /* 100 millisec */
static inline int cvmx_mdio_bus_id_to_node(int bus_id)
{
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return (bus_id >> 2) & CVMX_NODE_MASK;
else
return 0;
}
static inline int cvmx_mdio_bus_id_to_bus(int bus_id)
{
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return bus_id & 3;
else
return bus_id;
}
/* Helper function to put MDIO interface into clause 45 mode */
static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
{
cvmx_smix_clk_t smi_clk;
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
/* Put bus into clause 45 mode */
smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
smi_clk.s.mode = 1;
smi_clk.s.preamble = 1;
csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
}
/* Helper function to put MDIO interface into clause 22 mode */
static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
{
cvmx_smix_clk_t smi_clk;
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
/* Put bus into clause 22 mode */
smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
smi_clk.s.mode = 0;
csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
}
/**
* @INTERNAL
* Function to read SMIX_RD_DAT and check for timeouts. This
* code sequence is done fairly often, so put in one spot.
*
* @param bus_id SMI/MDIO bus to read
*
* @return Value of SMIX_RD_DAT. pending will be set on
* a timeout.
*/
static inline cvmx_smix_rd_dat_t __cvmx_mdio_read_rd_dat(int bus_id)
{
cvmx_smix_rd_dat_t smi_rd;
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
u64 done;
done = get_timer(0);
do {
mdelay(1);
smi_rd.u64 = csr_rd_node(node, CVMX_SMIX_RD_DAT(bus));
if (get_timer(done) > (CVMX_MDIO_TIMEOUT / 1000))
break;
} while (smi_rd.s.pending);
return smi_rd;
}
/**
* Perform an MII read. This function is used to read PHY
* registers controlling auto negotiation.
*
* @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @param phy_id The MII phy id
* @param location Register location to read
*
* @return Result from the read or -1 on failure
*/
static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
{
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
cvmx_smix_cmd_t smi_cmd;
cvmx_smix_rd_dat_t smi_rd;
if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
__cvmx_mdio_set_clause22_mode(bus_id);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = location;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
if (smi_rd.s.val)
return smi_rd.s.dat;
else
return -1;
}
/**
* Perform an MII write. This function is used to write PHY
* registers controlling auto negotiation.
*
* @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @param phy_id The MII phy id
* @param location Register location to write
* @param val Value to write
*
* @return -1 on error
* 0 on success
*/
static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
{
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
cvmx_smix_cmd_t smi_cmd;
cvmx_smix_wr_dat_t smi_wr;
if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
__cvmx_mdio_set_clause22_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = val;
csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = location;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
cvmx_smix_wr_dat_t, pending, ==, 0,
CVMX_MDIO_TIMEOUT))
return -1;
return 0;
}
/**
* Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
* registers controlling auto negotiation.
*
* @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @param phy_id The MII phy id
* @param device MDIO Manageable Device (MMD) id
* @param location Register location to read
*
* @return Result from the read or -1 on failure
*/
static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
int location)
{
cvmx_smix_cmd_t smi_cmd;
cvmx_smix_rd_dat_t smi_rd;
cvmx_smix_wr_dat_t smi_wr;
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
return -1;
__cvmx_mdio_set_clause45_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = location;
csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
cvmx_smix_wr_dat_t, pending, ==, 0,
CVMX_MDIO_TIMEOUT)) {
debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(address)\n",
bus_id, phy_id, device, location);
return -1;
}
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
if (smi_rd.s.pending) {
debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(data)\n",
bus_id, phy_id, device, location);
return -1;
}
if (smi_rd.s.val)
return smi_rd.s.dat;
debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d INVALID READ\n",
bus_id, phy_id, device, location);
return -1;
}
/**
* Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
* registers controlling auto negotiation.
*
* @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @param phy_id The MII phy id
* @param device MDIO Manageable Device (MMD) id
* @param location Register location to write
* @param val Value to write
*
* @return -1 on error
* 0 on success
*/
static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
int location, int val)
{
cvmx_smix_cmd_t smi_cmd;
cvmx_smix_wr_dat_t smi_wr;
int node = cvmx_mdio_bus_id_to_node(bus_id);
int bus = cvmx_mdio_bus_id_to_bus(bus_id);
if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
return -1;
__cvmx_mdio_set_clause45_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = location;
csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
cvmx_smix_wr_dat_t, pending, ==, 0,
CVMX_MDIO_TIMEOUT))
return -1;
smi_wr.u64 = 0;
smi_wr.s.dat = val;
csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
cvmx_smix_wr_dat_t, pending, ==, 0,
CVMX_MDIO_TIMEOUT))
return -1;
return 0;
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,787 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon pcsxx.
*/
#ifndef __CVMX_PCSXX_DEFS_H__
#define __CVMX_PCSXX_DEFS_H__
static inline u64 CVMX_PCSXX_10GBX_STATUS_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000828ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000828ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000828ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000828ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_BIST_STATUS_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000870ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000870ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000870ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000870ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000850ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000850ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000850ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000850ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_CONTROL1_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000800ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000800ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000800ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000800ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_CONTROL2_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000818ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000818ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000818ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000818ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_INT_EN_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000860ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000860ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000860ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000860ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_INT_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000858ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000858ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000858ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000858ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_LOG_ANL_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000868ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000868ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000868ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000868ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_MISC_CTL_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000848ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000848ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000848ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000848ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000838ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000838ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000838ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000838ull + (offset) * 0x8000000ull;
}
#define CVMX_PCSXX_SERDES_CRDT_CNT_REG(offset) (0x00011800B0000880ull)
static inline u64 CVMX_PCSXX_SPD_ABIL_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000810ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000810ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000810ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000810ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_STATUS1_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000808ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000808ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000808ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000808ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_STATUS2_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000820ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000820ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000820ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000820ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000840ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000840ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000840ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000840ull + (offset) * 0x8000000ull;
}
static inline u64 CVMX_PCSXX_TX_RX_STATES_REG(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000830ull + (offset) * 0x8000000ull;
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000830ull + (offset) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x00011800B0000830ull + (offset) * 0x1000000ull;
}
return 0x00011800B0000830ull + (offset) * 0x8000000ull;
}
/**
* cvmx_pcsx#_10gbx_status_reg
*
* PCSX_10GBX_STATUS_REG = 10gbx_status_reg
*
*/
union cvmx_pcsxx_10gbx_status_reg {
u64 u64;
struct cvmx_pcsxx_10gbx_status_reg_s {
u64 reserved_13_63 : 51;
u64 alignd : 1;
u64 pattst : 1;
u64 reserved_4_10 : 7;
u64 l3sync : 1;
u64 l2sync : 1;
u64 l1sync : 1;
u64 l0sync : 1;
} s;
struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn70xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_10gbx_status_reg cvmx_pcsxx_10gbx_status_reg_t;
/**
* cvmx_pcsx#_bist_status_reg
*
* PCSX Bist Status Register
*
*/
union cvmx_pcsxx_bist_status_reg {
u64 u64;
struct cvmx_pcsxx_bist_status_reg_s {
u64 reserved_1_63 : 63;
u64 bist_status : 1;
} s;
struct cvmx_pcsxx_bist_status_reg_s cn52xx;
struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn56xx;
struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn61xx;
struct cvmx_pcsxx_bist_status_reg_s cn63xx;
struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn66xx;
struct cvmx_pcsxx_bist_status_reg_s cn68xx;
struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn70xx;
struct cvmx_pcsxx_bist_status_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_bist_status_reg cvmx_pcsxx_bist_status_reg_t;
/**
* cvmx_pcsx#_bit_lock_status_reg
*
* PCSX Bit Lock Status Register
*
*/
union cvmx_pcsxx_bit_lock_status_reg {
u64 u64;
struct cvmx_pcsxx_bit_lock_status_reg_s {
u64 reserved_4_63 : 60;
u64 bitlck3 : 1;
u64 bitlck2 : 1;
u64 bitlck1 : 1;
u64 bitlck0 : 1;
} s;
struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn70xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_bit_lock_status_reg cvmx_pcsxx_bit_lock_status_reg_t;
/**
* cvmx_pcsx#_control1_reg
*
* NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only.
* PKT_SZ is effective only when LA_EN=1
* For normal operation(sgmii or 1000Base-X), this bit must be 0.
* See pcsx.csr for xaui logic analyzer mode.
* For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
*
*
* PCSX regs follow IEEE Std 802.3-2005, Section: 45.2.3
*
*
* PCSX_CONTROL1_REG = Control Register1
*/
union cvmx_pcsxx_control1_reg {
u64 u64;
struct cvmx_pcsxx_control1_reg_s {
u64 reserved_16_63 : 48;
u64 reset : 1;
u64 loopbck1 : 1;
u64 spdsel1 : 1;
u64 reserved_12_12 : 1;
u64 lo_pwr : 1;
u64 reserved_7_10 : 4;
u64 spdsel0 : 1;
u64 spd : 4;
u64 reserved_0_1 : 2;
} s;
struct cvmx_pcsxx_control1_reg_s cn52xx;
struct cvmx_pcsxx_control1_reg_s cn52xxp1;
struct cvmx_pcsxx_control1_reg_s cn56xx;
struct cvmx_pcsxx_control1_reg_s cn56xxp1;
struct cvmx_pcsxx_control1_reg_s cn61xx;
struct cvmx_pcsxx_control1_reg_s cn63xx;
struct cvmx_pcsxx_control1_reg_s cn63xxp1;
struct cvmx_pcsxx_control1_reg_s cn66xx;
struct cvmx_pcsxx_control1_reg_s cn68xx;
struct cvmx_pcsxx_control1_reg_s cn68xxp1;
struct cvmx_pcsxx_control1_reg_s cn70xx;
struct cvmx_pcsxx_control1_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_control1_reg cvmx_pcsxx_control1_reg_t;
/**
* cvmx_pcsx#_control2_reg
*
* PCSX_CONTROL2_REG = Control Register2
*
*/
union cvmx_pcsxx_control2_reg {
u64 u64;
struct cvmx_pcsxx_control2_reg_s {
u64 reserved_2_63 : 62;
u64 type : 2;
} s;
struct cvmx_pcsxx_control2_reg_s cn52xx;
struct cvmx_pcsxx_control2_reg_s cn52xxp1;
struct cvmx_pcsxx_control2_reg_s cn56xx;
struct cvmx_pcsxx_control2_reg_s cn56xxp1;
struct cvmx_pcsxx_control2_reg_s cn61xx;
struct cvmx_pcsxx_control2_reg_s cn63xx;
struct cvmx_pcsxx_control2_reg_s cn63xxp1;
struct cvmx_pcsxx_control2_reg_s cn66xx;
struct cvmx_pcsxx_control2_reg_s cn68xx;
struct cvmx_pcsxx_control2_reg_s cn68xxp1;
struct cvmx_pcsxx_control2_reg_s cn70xx;
struct cvmx_pcsxx_control2_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_control2_reg cvmx_pcsxx_control2_reg_t;
/**
* cvmx_pcsx#_int_en_reg
*
* PCSX Interrupt Enable Register
*
*/
union cvmx_pcsxx_int_en_reg {
u64 u64;
struct cvmx_pcsxx_int_en_reg_s {
u64 reserved_7_63 : 57;
u64 dbg_sync_en : 1;
u64 algnlos_en : 1;
u64 synlos_en : 1;
u64 bitlckls_en : 1;
u64 rxsynbad_en : 1;
u64 rxbad_en : 1;
u64 txflt_en : 1;
} s;
struct cvmx_pcsxx_int_en_reg_cn52xx {
u64 reserved_6_63 : 58;
u64 algnlos_en : 1;
u64 synlos_en : 1;
u64 bitlckls_en : 1;
u64 rxsynbad_en : 1;
u64 rxbad_en : 1;
u64 txflt_en : 1;
} cn52xx;
struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
struct cvmx_pcsxx_int_en_reg_s cn61xx;
struct cvmx_pcsxx_int_en_reg_s cn63xx;
struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
struct cvmx_pcsxx_int_en_reg_s cn66xx;
struct cvmx_pcsxx_int_en_reg_s cn68xx;
struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
struct cvmx_pcsxx_int_en_reg_s cn70xx;
struct cvmx_pcsxx_int_en_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_int_en_reg cvmx_pcsxx_int_en_reg_t;
/**
* cvmx_pcsx#_int_reg
*
* PCSX Interrupt Register
* Note: DBG_SYNC is a edge triggered interrupt. When set it indicates PCS Synchronization state
* machine in
* Figure 48-7 state diagram in IEEE Std 802.3-2005 changes state SYNC_ACQUIRED_1 to
* SYNC_ACQUIRED_2
* indicating an invalid code group was received on one of the 4 receive lanes.
* This interrupt should be always disabled and used only for link problem debugging help.
*/
union cvmx_pcsxx_int_reg {
u64 u64;
struct cvmx_pcsxx_int_reg_s {
u64 reserved_7_63 : 57;
u64 dbg_sync : 1;
u64 algnlos : 1;
u64 synlos : 1;
u64 bitlckls : 1;
u64 rxsynbad : 1;
u64 rxbad : 1;
u64 txflt : 1;
} s;
struct cvmx_pcsxx_int_reg_cn52xx {
u64 reserved_6_63 : 58;
u64 algnlos : 1;
u64 synlos : 1;
u64 bitlckls : 1;
u64 rxsynbad : 1;
u64 rxbad : 1;
u64 txflt : 1;
} cn52xx;
struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
struct cvmx_pcsxx_int_reg_s cn61xx;
struct cvmx_pcsxx_int_reg_s cn63xx;
struct cvmx_pcsxx_int_reg_s cn63xxp1;
struct cvmx_pcsxx_int_reg_s cn66xx;
struct cvmx_pcsxx_int_reg_s cn68xx;
struct cvmx_pcsxx_int_reg_s cn68xxp1;
struct cvmx_pcsxx_int_reg_s cn70xx;
struct cvmx_pcsxx_int_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_int_reg cvmx_pcsxx_int_reg_t;
/**
* cvmx_pcsx#_log_anl_reg
*
* PCSX Logic Analyzer Register
* NOTE: Logic Analyzer is enabled with LA_EN for xaui only. PKT_SZ is effective only when
* LA_EN=1
* For normal operation(xaui), this bit must be 0. The dropped lane is used to send rxc[3:0].
* See pcs.csr for sgmii/1000Base-X logic analyzer mode.
* For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
*/
union cvmx_pcsxx_log_anl_reg {
u64 u64;
struct cvmx_pcsxx_log_anl_reg_s {
u64 reserved_7_63 : 57;
u64 enc_mode : 1;
u64 drop_ln : 2;
u64 lafifovfl : 1;
u64 la_en : 1;
u64 pkt_sz : 2;
} s;
struct cvmx_pcsxx_log_anl_reg_s cn52xx;
struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn56xx;
struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn61xx;
struct cvmx_pcsxx_log_anl_reg_s cn63xx;
struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn66xx;
struct cvmx_pcsxx_log_anl_reg_s cn68xx;
struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn70xx;
struct cvmx_pcsxx_log_anl_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_log_anl_reg cvmx_pcsxx_log_anl_reg_t;
/**
* cvmx_pcsx#_misc_ctl_reg
*
* PCSX Misc Control Register
* LN_SWAP for XAUI is to simplify interconnection layout between devices
*/
union cvmx_pcsxx_misc_ctl_reg {
u64 u64;
struct cvmx_pcsxx_misc_ctl_reg_s {
u64 reserved_4_63 : 60;
u64 tx_swap : 1;
u64 rx_swap : 1;
u64 xaui : 1;
u64 gmxeno : 1;
} s;
struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn70xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_misc_ctl_reg cvmx_pcsxx_misc_ctl_reg_t;
/**
* cvmx_pcsx#_rx_sync_states_reg
*
* PCSX_RX_SYNC_STATES_REG = Receive Sync States Register
*
*/
union cvmx_pcsxx_rx_sync_states_reg {
u64 u64;
struct cvmx_pcsxx_rx_sync_states_reg_s {
u64 reserved_16_63 : 48;
u64 sync3st : 4;
u64 sync2st : 4;
u64 sync1st : 4;
u64 sync0st : 4;
} s;
struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn70xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_rx_sync_states_reg cvmx_pcsxx_rx_sync_states_reg_t;
/**
* cvmx_pcsx#_serdes_crdt_cnt_reg
*
* PCSX SERDES Credit Count
*
*/
union cvmx_pcsxx_serdes_crdt_cnt_reg {
u64 u64;
struct cvmx_pcsxx_serdes_crdt_cnt_reg_s {
u64 reserved_5_63 : 59;
u64 cnt : 5;
} s;
struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xx;
struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_serdes_crdt_cnt_reg cvmx_pcsxx_serdes_crdt_cnt_reg_t;
/**
* cvmx_pcsx#_spd_abil_reg
*
* PCSX_SPD_ABIL_REG = Speed ability register
*
*/
union cvmx_pcsxx_spd_abil_reg {
u64 u64;
struct cvmx_pcsxx_spd_abil_reg_s {
u64 reserved_2_63 : 62;
u64 tenpasst : 1;
u64 tengb : 1;
} s;
struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn70xx;
struct cvmx_pcsxx_spd_abil_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_spd_abil_reg cvmx_pcsxx_spd_abil_reg_t;
/**
* cvmx_pcsx#_status1_reg
*
* PCSX_STATUS1_REG = Status Register1
*
*/
union cvmx_pcsxx_status1_reg {
u64 u64;
struct cvmx_pcsxx_status1_reg_s {
u64 reserved_8_63 : 56;
u64 flt : 1;
u64 reserved_3_6 : 4;
u64 rcv_lnk : 1;
u64 lpable : 1;
u64 reserved_0_0 : 1;
} s;
struct cvmx_pcsxx_status1_reg_s cn52xx;
struct cvmx_pcsxx_status1_reg_s cn52xxp1;
struct cvmx_pcsxx_status1_reg_s cn56xx;
struct cvmx_pcsxx_status1_reg_s cn56xxp1;
struct cvmx_pcsxx_status1_reg_s cn61xx;
struct cvmx_pcsxx_status1_reg_s cn63xx;
struct cvmx_pcsxx_status1_reg_s cn63xxp1;
struct cvmx_pcsxx_status1_reg_s cn66xx;
struct cvmx_pcsxx_status1_reg_s cn68xx;
struct cvmx_pcsxx_status1_reg_s cn68xxp1;
struct cvmx_pcsxx_status1_reg_s cn70xx;
struct cvmx_pcsxx_status1_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_status1_reg cvmx_pcsxx_status1_reg_t;
/**
* cvmx_pcsx#_status2_reg
*
* PCSX_STATUS2_REG = Status Register2
*
*/
union cvmx_pcsxx_status2_reg {
u64 u64;
struct cvmx_pcsxx_status2_reg_s {
u64 reserved_16_63 : 48;
u64 dev : 2;
u64 reserved_12_13 : 2;
u64 xmtflt : 1;
u64 rcvflt : 1;
u64 reserved_3_9 : 7;
u64 tengb_w : 1;
u64 tengb_x : 1;
u64 tengb_r : 1;
} s;
struct cvmx_pcsxx_status2_reg_s cn52xx;
struct cvmx_pcsxx_status2_reg_s cn52xxp1;
struct cvmx_pcsxx_status2_reg_s cn56xx;
struct cvmx_pcsxx_status2_reg_s cn56xxp1;
struct cvmx_pcsxx_status2_reg_s cn61xx;
struct cvmx_pcsxx_status2_reg_s cn63xx;
struct cvmx_pcsxx_status2_reg_s cn63xxp1;
struct cvmx_pcsxx_status2_reg_s cn66xx;
struct cvmx_pcsxx_status2_reg_s cn68xx;
struct cvmx_pcsxx_status2_reg_s cn68xxp1;
struct cvmx_pcsxx_status2_reg_s cn70xx;
struct cvmx_pcsxx_status2_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_status2_reg cvmx_pcsxx_status2_reg_t;
/**
* cvmx_pcsx#_tx_rx_polarity_reg
*
* RX lane polarity vector [3:0] = XOR_RXPLRT<9:6> ^ [4[RXPLRT<1>]];
* TX lane polarity vector [3:0] = XOR_TXPLRT<5:2> ^ [4[TXPLRT<0>]];
* In short keep <1:0> to 2'b00, and use <5:2> and <9:6> fields to define per lane polarities
*/
union cvmx_pcsxx_tx_rx_polarity_reg {
u64 u64;
struct cvmx_pcsxx_tx_rx_polarity_reg_s {
u64 reserved_10_63 : 54;
u64 xor_rxplrt : 4;
u64 xor_txplrt : 4;
u64 rxplrt : 1;
u64 txplrt : 1;
} s;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
u64 reserved_2_63 : 62;
u64 rxplrt : 1;
u64 txplrt : 1;
} cn52xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_tx_rx_polarity_reg cvmx_pcsxx_tx_rx_polarity_reg_t;
/**
* cvmx_pcsx#_tx_rx_states_reg
*
* PCSX_TX_RX_STATES_REG = Transmit Receive States Register
*
*/
union cvmx_pcsxx_tx_rx_states_reg {
u64 u64;
struct cvmx_pcsxx_tx_rx_states_reg_s {
u64 reserved_14_63 : 50;
u64 term_err : 1;
u64 syn3bad : 1;
u64 syn2bad : 1;
u64 syn1bad : 1;
u64 syn0bad : 1;
u64 rxbad : 1;
u64 algn_st : 3;
u64 rx_st : 2;
u64 tx_st : 3;
} s;
struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
u64 reserved_13_63 : 51;
u64 syn3bad : 1;
u64 syn2bad : 1;
u64 syn1bad : 1;
u64 syn0bad : 1;
u64 rxbad : 1;
u64 algn_st : 3;
u64 rx_st : 2;
u64 tx_st : 3;
} cn52xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn70xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn70xxp1;
};
typedef union cvmx_pcsxx_tx_rx_states_reg cvmx_pcsxx_tx_rx_states_reg_t;
#endif

View file

@ -0,0 +1,343 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
/* L4_PORT_CHECK_DISABLE_LF tag */
/* This file is autogenerated from ipemainc.elf */
const int cvmx_pki_cluster_code_length = 997;
const u64 cvmx_pki_cluster_code_default[] = {
0x000000000a000000ull, 0x0000413a68024070ull, 0x0000813800200020ull,
0x900081b800200020ull, 0x0004da00ffff0001ull, 0x000455ab68010b0eull,
0x00045fba46010000ull, 0x9046898120002000ull, 0x0004418068010028ull,
0x90665300680100f0ull, 0x0004413f68004070ull, 0x00065380680100f0ull,
0x00045a346803a0f0ull, 0x000401b448000001ull, 0x00045cb968030870ull,
0x0007debd00100010ull, 0x0000813b80008000ull, 0x000441bb68004070ull,
0xd001c00000000000ull, 0xd021c00000000000ull, 0x00045f80680100f0ull,
0x0004c639ff000200ull, 0x0004403f72010000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x000041ba68034078ull, 0x0000512268030870ull,
0x000041bc68034070ull, 0x00005d3a68030870ull, 0x00045cb942080000ull,
0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
0x0000813800800080ull, 0x000401a486000005ull, 0x000615ab74000123ull,
0x0007122448000004ull, 0x0000813901000000ull, 0x000481b800010001ull,
0x000685b800020002ull, 0xa006823800010001ull, 0x0006c639ff000400ull,
0x00085f3e68010a00ull, 0xa0885f3e68010f01ull, 0x00085f3e68010405ull,
0x00085f3e68010906ull, 0xa0485f3e68010e07ull, 0xa061c00000000000ull,
0xa4085f3e68010b28ull, 0xa421c00000000000ull, 0x00095f3e68010940ull,
0xa066403e72010000ull, 0x000941be68034039ull, 0x00085f3e68010305ull,
0xa4685f3e68010028ull, 0x00095f3e68030030ull, 0x00095f3e68010416ull,
0x0001c00000000000ull, 0x00065cb942080000ull, 0xa046552a4e09312dull,
0xa446c639ff000500ull, 0x0006debd00010001ull, 0x0006403e72010001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x00065cb942080000ull, 0x0006552a4e09312dull,
0x00065cb968082868ull, 0x0006410246090000ull, 0x9060813901000000ull,
0x0004c639ff000800ull, 0x0004400072010000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x00045cb942080000ull,
0x9084552a4e09312dull, 0x90a4c639ff000900ull, 0x00045f80680100f0ull,
0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x00045cb942080000ull, 0x9004552a4e09312dull,
0x0004c639ff000a00ull, 0x0004400072010000ull, 0x00048181ff00ff00ull,
0x0007820101000100ull, 0x0006898100ff00ffull, 0x00048301ffff0180ull,
0x0008d5ab10001000ull, 0x0004d4a900010001ull, 0x0001c00000000000ull,
0x00045cb942080000ull, 0x9024552a4e09312dull, 0x0004c639ff000b00ull,
0x90445f80680100f0ull, 0x000459b368020070ull, 0x000401024000000cull,
0x0006823fffffffffull, 0x00088281ffffffffull, 0x000ad5ab20002000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x000c8b3fffffc200ull, 0x000c8b01ffff0001ull,
0x000ddebd00020002ull, 0x00045cb942080000ull, 0x0004552a4e09312dull,
0x00045cb968082868ull, 0x0004410246090000ull, 0x0000813901000000ull,
0x000481b800080008ull, 0x9846c639ff001200ull, 0x9861c00000000000ull,
0x00064180680100f0ull, 0x0006400372010000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x000683891f000200ull,
0x000ed52a00800080ull, 0x000e5e3c68020070ull, 0x00065cb942080000ull,
0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
0x0000813d00020002ull, 0x0004893901000000ull, 0x9004893800040004ull,
0x9024c639ff001300ull, 0x00044180680100f0ull, 0x9044400372010001ull,
0x0001c00000000000ull, 0x00045f3e68010044ull, 0x0004debd00040004ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000483891f000200ull, 0x000ed52a00800080ull, 0x000e5e3c68020070ull,
0x00045cb942080000ull, 0x0004552a4e09312dull, 0x00045cb968082868ull,
0x0004410246090000ull, 0x000581b902000000ull, 0x9826c639ff001800ull,
0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001900ull,
0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001a00ull,
0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001b00ull,
0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
0x0006410246090000ull, 0x9000813902000000ull, 0x000481b800400040ull,
0x00068981ffff8847ull, 0x00068581ffff8848ull, 0x0006debd00080008ull,
0x0006c639ff001e00ull, 0x0006010240000002ull, 0x9801c00000000000ull,
0x9821c00000000000ull, 0x00065f80680100f0ull, 0x0006403f72010000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
0x0006010240000004ull, 0x0006823902000000ull, 0x00065f3e68010629ull,
0xac28828101000100ull, 0x000b010240000004ull, 0xa42b820101000100ull,
0x0009010240000004ull, 0xac29828101000100ull, 0x000b010240000004ull,
0xa42b820101000100ull, 0x0009010240000004ull, 0xac29828101000100ull,
0x000b010240000004ull, 0x0006823904000000ull, 0x0008d4a907c00200ull,
0x0008593268020070ull, 0x0008dcb902000200ull, 0x9000813902000000ull,
0x0001c00000000000ull, 0x00040181840005ffull, 0x0006010240000008ull,
0x9801c00000000000ull, 0x0006debd00200020ull, 0x00048181ffff0806ull,
0x0006d4a907c00180ull, 0x00048201ffff8035ull, 0x00068581ffff8035ull,
0x0008d4a907c001c0ull, 0x0006dcb97c007c00ull, 0x00048201ffff0800ull,
0x00088601ffff86ddull, 0x00068581ffff0800ull, 0x00068581ffff86ddull,
0x0008d4a907c00200ull, 0x0009dcb97c007c00ull, 0x0007823d00200020ull,
0x000685bd00200020ull, 0x0008d4a907c00140ull, 0x0004010240000002ull,
0x0006593268020070ull, 0x000042a486020000ull, 0x000a15ab74000124ull,
0x9000813904000000ull, 0x0001c00000000000ull, 0x00048181f0004000ull,
0x9886593268020070ull, 0x0006d4a907c00200ull, 0x00068201ff000000ull,
0xa40815ab74000345ull, 0x0009debd01000100ull, 0xa429418068010038ull,
0x00095a3468010870ull, 0x0009028386000005ull, 0x000a068186000014ull,
0xacca15ab74000343ull, 0xacebc639ff002200ull, 0x000b5f80680100f0ull,
0xac8b403f72010000ull, 0x000b8203000f0005ull, 0x000b5a3468010070ull,
0x0009d4a907c00240ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x000b5cb942080000ull, 0xad0b552a4e09312dull,
0xad2bc639ff002700ull, 0x000b5f80680100f0ull, 0xac6b403f72010001ull,
0x0001c00000000000ull, 0x000b82013fff0000ull, 0x0009d52a00010001ull,
0x0009d4a9f8006800ull, 0x0009593268020870ull, 0x0006418068030230ull,
0x000b5cb942080000ull, 0x000b552a4e09312dull, 0x0006410240030000ull,
0x9c01c00000000000ull, 0x0001c00000000000ull, 0x00078201f0006000ull,
0x0008593268020070ull, 0x0008d4a907c00280ull, 0xa069d4a907c00000ull,
0x00085a3468010874ull, 0x0008818100ff0000ull, 0x000615ab74000345ull,
0x00075a3468010078ull, 0x9c8741b9680040f0ull, 0x9ca7c603ff001f00ull,
0x00075f80680100f0ull, 0x0007403f72010001ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0007418342080000ull,
0x9cc7552a4e09312dull, 0x9ce7c603ff002000ull, 0x00075f80680100f0ull,
0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0007418342080000ull, 0x9d07552a4e09312dull,
0x9d27c603ff002100ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0007418342080000ull, 0x0007552a4e09312dull, 0x9d475c80680300f0ull,
0x9d67c639ff002200ull, 0x00075f80680100f0ull, 0x0007403f72010000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x00075cb942080000ull, 0x0007552a4e09312dull, 0x9d8741b9680040f0ull,
0x9da7c603ff002400ull, 0x00075f80680100f0ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0007418342080000ull, 0x9dc7552a4e09312dull,
0x9de7c603ff002500ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0007418342080000ull, 0x0007552a4e09312dull, 0x0007010240000020ull,
0x9c01c00000000000ull, 0x9c27c603ff002600ull, 0x00075f80680100f0ull,
0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0007418342080000ull, 0x0007552a4e09312dull,
0x9c475c80680300f0ull, 0x9c67c639ff002700ull, 0x00075f80680100f0ull,
0x0007403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x00075cb942080000ull, 0x0007552a4e09312dull,
0x0007010240000008ull, 0xa80782b400ff0000ull, 0x000ad4a907c002c0ull,
0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff003cull,
0x000ad4a907c002c0ull, 0x000a5a3468010078ull, 0x000a410244010000ull,
0xa80782b400ff002bull, 0x000ad4a907c002c0ull, 0x000a5a3468010078ull,
0x000a410244010000ull, 0xa80782b400ff002cull, 0x000ad4a9ffc06ac0ull,
0x000a593268020870ull, 0x000ad52a00010001ull, 0x000a5a3468010078ull,
0x000a010240000008ull, 0x0007debd01000100ull, 0x000481bd01000100ull,
0x0006c639ff002300ull, 0x000641aa68034000ull, 0x000641a968034846ull,
0x0006403472030001ull, 0x0004822907000200ull, 0x000915ab74000341ull,
0x000082aa00010001ull, 0x000a86ab00ff0045ull, 0x000adcb978007800ull,
0x0000822907000200ull, 0x00088a3908000000ull, 0x00065cb942080000ull,
0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
0x000042a486020000ull, 0x000a15ab74000343ull, 0x000081b940004000ull,
0x000685a907c00000ull, 0x000782b807000100ull, 0x000a41b268004070ull,
0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
0x982881b400ff0011ull, 0x9881c00000000000ull, 0x00064180680100f0ull,
0x00068283ffff12b5ull, 0x000a8a8108000800ull, 0x000ad4a9f8009800ull,
0x00068303ffff17c1ull, 0x000c8b01c0000000ull, 0xb0ac5bb768010a58ull,
0x000cd4a9f800b800ull, 0x000c8281ffff6558ull, 0x000adbb701000100ull,
0x000c8281ffff86ddull, 0x000a8681ffff0800ull, 0x000adbb702000200ull,
0x000682a9c8009800ull, 0x000adebd02000200ull, 0x000a593268020870ull,
0x000a010240000008ull, 0x9c21c00000000000ull, 0x0007813400ff002full,
0x90048201ffff6558ull, 0x00098381ffff0800ull, 0x00088281b0002000ull,
0x000a593268020870ull, 0x000ad4a9f800a800ull, 0x000adebd02000200ull,
0x000e593268020870ull, 0x000ed4a9f800a000ull, 0x000e010240000004ull,
0x000e828180008000ull, 0x000a010240000004ull, 0x000e828120002000ull,
0x000a010240000004ull, 0x000e828110001000ull, 0x000a010240000004ull,
0x000082bd02000200ull, 0xa80ac639ff002800ull, 0xa861c00000000000ull,
0x000a418368010526ull, 0xa84a418368010878ull, 0x000a5bb768030078ull,
0x000a400172030000ull, 0x000a5b00680100f0ull, 0x000041b468034878ull,
0x00005fbf68030878ull, 0x00068229c8009800ull, 0x0008010248000008ull,
0xa001c00000000000ull, 0x000843a486020000ull, 0x00088101ffff0000ull,
0x000415ab74000464ull, 0x000e15ab74000461ull, 0x0008010240000008ull,
0x000c41b76800425aull, 0x000c410240030000ull, 0x000a010240000008ull,
0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
0x000a410246090000ull, 0x0000422486020000ull, 0x000815ab74000461ull,
0x000081b940004000ull, 0x000685a9f8000000ull, 0x000782b807000200ull,
0x000a41b268004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
0x000a410246090000ull, 0x000081a9f800b800ull, 0x000689b701000100ull,
0x000685a9f8009800ull, 0x000685a9f800a800ull, 0x00078229f800b800ull,
0x000601024000000cull, 0x9801c00000000000ull, 0x00088a3702000200ull,
0x00088629f800a000ull, 0x00068101ffff8100ull, 0x0004010240000004ull,
0x9801c00000000000ull, 0x0009dcb910001000ull, 0x00068101ffff86ddull,
0x00048501ffff0800ull, 0x0005dcb978003800ull, 0x0006010240000002ull,
0x000081a9f8000000ull, 0x9007813910000000ull, 0x0001c00000000000ull,
0x00048181f0004000ull, 0x988658b168020070ull, 0x0006d428001f0008ull,
0x00068201ff000000ull, 0xa40815ab74000545ull, 0x0009debd04000400ull,
0xa429418068010038ull, 0x00095a3468010870ull, 0x0009028386000005ull,
0xac8a068186000014ull, 0x000a15ab74000543ull, 0x000b5a3468010070ull,
0xac6b8303000f0005ull, 0x000dd428001f0009ull, 0x000b83013fff0000ull,
0x000dd42803e001a0ull, 0x000d58b168020870ull, 0x000ddcb960006000ull,
0x0006418068030230ull, 0x0006410240030000ull, 0x9c01c00000000000ull,
0x0001c00000000000ull, 0x00078201f0006000ull, 0x000858b168020070ull,
0xa068d428001f000aull, 0x00085a3468010874ull, 0x0008818100ff0000ull,
0x000615ab74000545ull, 0x00075a3468010078ull, 0x0007010240000028ull,
0xa80782b400ff0000ull, 0x000ad428001f000bull, 0x000a5a3468010078ull,
0x000a410244010000ull, 0xa80782b400ff003cull, 0x000ad428001f000bull,
0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff002bull,
0x000ad428001f000bull, 0x000a5a3468010078ull, 0x000a410244010000ull,
0xa80782b400ff002cull, 0x000ad42803ff01abull, 0x000adcb960006000ull,
0x000a58b168020870ull, 0x000a5a3468010078ull, 0x000a010240000008ull,
0x0007debd04000400ull, 0x000481bd04000400ull, 0x0006c639ff002b00ull,
0x0006832803e001a0ull, 0x000cc18300010001ull, 0x000dc18300010000ull,
0x000641a868034840ull, 0x0006403472030001ull, 0x00048228001c0008ull,
0x000915ab74000541ull, 0x000082ab00ff0045ull, 0x000adcb960006000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x00065cb942080000ull,
0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
0x000042a486020000ull, 0x000a15ab74000543ull, 0x000081b940004000ull,
0x000685a8001f0000ull, 0x000782b807000300ull, 0x000a41b168004070ull,
0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
0x00008329ff000200ull, 0x000c8728001c0008ull, 0x000c813920000000ull,
0x000481b400ff006cull, 0x0006d42803e001c0ull, 0x000658b168020870ull,
0xa047823400ff0033ull, 0x0008d42803e00180ull, 0xa0685f80680100f0ull,
0xa007823400ff0032ull, 0x0008d42803e00180ull, 0xa0285f80680100f0ull,
0x0007822803e00180ull, 0x0008c639ff002e00ull, 0x0008403f72010000ull,
0x000858b168020870ull, 0x00085abf680040f0ull, 0x00085d80680100f0ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x00085cb942080000ull, 0x0008552a4e09312dull, 0x00085cb968082868ull,
0x0008410246090000ull, 0x986981b400ff002full, 0x0006d42803e00280ull,
0x00065a80680100f0ull, 0x000658b168020870ull, 0x000481b400ff0084ull,
0x0006d42803e00240ull, 0x0004823400ff0011ull, 0x0008d42803e00220ull,
0x98c481b400ff0006ull, 0x0006d42803e00200ull, 0x00065ebd68010b31ull,
0x000641806801003cull, 0x0006028386000005ull, 0x000a15ab74000661ull,
0x0006418068030230ull, 0x0008c180ffff0008ull, 0x0008863400ff0006ull,
0x0008418240030000ull, 0x000842a486030000ull, 0x000a15ab74000661ull,
0x9008863400ff0084ull, 0x0004c639ff002f00ull, 0x0004400072010001ull,
0x000858b168020870ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x00085cb942080000ull, 0x9028552a4e09312dull,
0x0004c639ff003000ull, 0x0004403472010000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x000858b168020870ull, 0x0001c00000000000ull,
0x000081b940004000ull, 0x000685a803e00000ull, 0x00045cb942080000ull,
0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
0x000483891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
0x000042a486020000ull, 0x000a15ab74000661ull, 0x000782b807000400ull,
0x000a41b168004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull, 0x000041bf68034878ull, 0x00005a3468030878ull,
0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
0x000a410246090000ull, 0x00005fb968004250ull, 0x0000003f70000000ull,
0x000041b968034070ull, 0x0000512268030070ull, 0x0000813800200020ull,
0x0004413a68024070ull, 0x9001c00000000000ull, 0x000081b800200020ull,
0x9026898180008000ull, 0x0004890110001000ull, 0x000456ad680100a0ull,
0x0006898180008000ull, 0x000652a56801001dull, 0x000456ad68090b5bull,
0x00055680680900f0ull, 0x0005debd00400040ull, 0x00005600680800f0ull,
0x0000833d00200020ull, 0x000c872907c00000ull, 0x000dd62c20000000ull,
0x0000822902800280ull, 0x000841b268034070ull, 0x000982a8000a000aull,
0x000a41b168034070ull, 0x000b822907c00000ull, 0x0000003f70000800ull,
0x000941b268034070ull, 0x0000418048030000ull, 0x0000018340000008ull,
0x0009018348000004ull, 0x000050a168030c20ull, 0x000082aa00800080ull,
0x000850a168080c2bull, 0x0000820800010001ull, 0x000850a168000c20ull,
0x000752a56808001eull, 0x000a822a00400040ull, 0x00088a0900010001ull,
0x000841bc68034078ull, 0x000941bc68034070ull, 0x000a583068030870ull,
0x0000813d00400000ull, 0x0005c180ffff0000ull, 0x00058288001e0000ull,
0x000b8208001e0008ull, 0x00085d2168004030ull, 0x00098308001e0010ull,
0x00088608001e0010ull, 0x000c5d2168004070ull, 0x0008418068080025ull,
0x000841ba6803a0f0ull, 0x000856ad40030000ull, 0x0008c180ffff0000ull,
0x0005820807000500ull, 0x00088a3d00010001ull, 0x000841be68004050ull,
0x0005828807000300ull, 0x000a8abd00040004ull, 0x000a41be68004040ull,
0x0005820807000100ull, 0x00088a2a00800080ull, 0x0008413068004078ull,
0xa021c00000000000ull, 0x0005828807000200ull, 0x000841806801002dull,
0x000a8abd00080008ull, 0x000a41be68004026ull, 0x0005820807000400ull,
0x00088a2907000200ull, 0x000841b46800405aull, 0x000556ad40030000ull,
0x000081bd00100010ull, 0x0006c180ffff0000ull, 0x0006822a00800080ull,
0x00088a0900100010ull, 0x0008413c68024070ull, 0xa021c00000000000ull,
0x0006832907000200ull, 0x0008c181f0008000ull, 0x000841834c00ffffull,
0x0006822a00400040ull, 0x00088a0900200020ull, 0x0008413c68024078ull,
0xa021c00000000000ull, 0x000c8b0900400040ull, 0x0008dc01f0008000ull,
0x000841b84c03ffffull, 0x000c8b2a00010000ull, 0x000c41b44c0300ffull,
0x000682a9f800a800ull, 0x000a86a9f8009800ull, 0x000a8a8904000400ull,
0x000a41b64c03ffffull, 0x000a41b74c0300ffull, 0x0000828901000100ull,
0x000a822803e00180ull, 0x0008413168024078ull, 0x0008833400ff0033ull,
0x000c010240000004ull, 0xa001c00000000000ull, 0xa021c00000000000ull,
0x000841814c03ffffull, 0x000841814c03ffffull, 0x000a822803e00280ull,
0x000841b54c03ffffull, 0x000682287c005800ull, 0x00088a0902000200ull,
0x0008413068024070ull, 0xa001c00000000000ull, 0x0006830900020002ull,
0x00088281e0002000ull, 0xa84a868108000800ull, 0xa861c00000000000ull,
0x000a41814c03ffffull, 0x000a41814c03ffffull, 0x00065380680300f0ull,
0x000c5321680040b0ull, 0x000dd3260fff0fffull, 0x0006810900800080ull,
0x0000003f70000400ull, 0x000082a907000200ull, 0x000a413268024070ull,
0xa50a822902800280ull, 0x0004893d08000800ull, 0x00098301ffffffffull,
0xa4c98381f000e000ull, 0x00095f00680100f0ull, 0xa5295f3e64010000ull,
0x0001c00000000000ull, 0xa4ec8b01ffffffffull, 0x00095d00680100f0ull,
0xa1895d3a64010000ull, 0x000cd5ab80008000ull, 0x00088a01ff00ff00ull,
0x0008d5ab40004000ull, 0x000ed5ab40004000ull, 0x0004893d40000000ull,
0x00005700680800f0ull, 0x00005780680900f0ull, 0x00008229f800a000ull,
0x0008c180ffff0018ull, 0x000857af680320f0ull, 0x0007d72ef1ff0000ull,
0x0007d7aff0000000ull, 0x0004d72e00fc0000ull, 0x0000812c00020002ull,
0x0004892907c00200ull, 0x000441a7680040f0ull, 0x000441be4c03ffffull,
0x000441ba4c03ffffull, 0x000481a803c00200ull, 0x0006413168024078ull,
0x9801c00000000000ull, 0x9821c00000000000ull, 0x00065f80680100f0ull,
0x00065fbf64010000ull, 0x000641bf4c03ffffull, 0x000452a568030250ull,
0x0000000008000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
0x0001c00000000000ull
};

View file

@ -0,0 +1,213 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Backward compatibility for packet transmission using legacy PKO command.
*/
#ifndef __CVMX_PKO_H__
#define __CVMX_PKO_H__
extern cvmx_pko_return_value_t
cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, uint64_t addr, bool tag_sw);
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
* once before this, and the same parameters must be passed to both
* cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
*
* WARNING: This function may have to look up the proper PKO port in
* the IPD port to PKO port map, and is thus slower than calling
* cvmx_pko_send_packet_finish_pkoid() directly if the PKO port
* identifier is known.
*
* @param ipd_port The IPD port corresponding the to pko port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet to send
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
* or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_pko_send_packet_finish(u64 ipd_port, uint64_t queue,
cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
use_locking ==
CVMX_PKO_LOCK_ATOMIC_TAG);
}
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(ipd_port, queue, 2);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
(result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
* once before this, and the same parameters must be passed to both
* cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
*
* WARNING: This function may have to look up the proper PKO port in
* the IPD port to PKO port map, and is thus slower than calling
* cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port
* identifier is known.
*
* @param ipd_port The IPD port corresponding the to pko port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet to send
* @param addr Physical address of a work queue entry or physical address to zero
* on complete.
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
* or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_pko_send_packet_finish3(u64 ipd_port, uint64_t queue,
cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, uint64_t addr,
cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
use_locking ==
CVMX_PKO_LOCK_ATOMIC_TAG);
}
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64, addr);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(ipd_port, queue, 3);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
(result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
* once before this, and the same parameters must be passed to both
* cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
*
* @param pko_port Port to send it on
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet to send
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
* or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_pko_send_packet_finish_pkoid(int pko_port, uint64_t queue,
cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
use_locking ==
CVMX_PKO_LOCK_ATOMIC_TAG);
}
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell_pkoid(pko_port, queue, 2);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
(result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
* once before this, and the same parameters must be passed to both
* cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
*
* @param pko_port The PKO port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet to send
* @param addr Plysical address of a work queue entry or physical address to zero
* on complete.
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
* or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_pko_send_packet_finish3_pkoid(u64 pko_port, uint64_t queue,
cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, uint64_t addr,
cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
use_locking ==
CVMX_PKO_LOCK_ATOMIC_TAG);
}
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64, addr);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell_pkoid(pko_port, queue, 3);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
(result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
#endif /* __CVMX_PKO_H__ */

View file

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#ifndef __CVMX_PKO3_RESOURCES_H__
#define __CVMX_PKO3_RESOURCES_H__
/*
* Allocate or reserve contiguous list of PKO queues.
*
* @param node is the node number for PKO queues.
* @param level is the PKO queue level.
* @param owner is the owner of PKO queue resources.
* @param base_queue is the PKO queue base number(specify -1 to allocate).
* @param num_queues is the number of PKO queues that have to be reserved or allocated.
* @return returns queue_base if successful or -1 on failure.
*/
int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
int num_queues);
/**
* Free an allocated/reserved PKO queues for a certain level and owner
*
* @param node on which to allocate/reserve PKO queues
* @param level of PKO queue
* @param owner of reserved/allocated resources
* @return 0 on success, -1 on failure
*/
int cvmx_pko_free_queues(int node, int level, int owner);
int __cvmx_pko3_dq_param_setup(unsigned node);
int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level);
#endif /* __CVMX_PKO3_RESOURCES_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#ifndef __CVMX_RANGE_H__
#define __CVMX_RANGE_H__
int cvmx_range_init(u64 range_addr, int size);
int cvmx_range_alloc(u64 range_addr, uint64_t owner, uint64_t cnt, int align);
int cvmx_range_alloc_ordered(u64 range_addr, uint64_t owner, u64 cnt, int align,
int reverse);
int cvmx_range_alloc_non_contiguos(u64 range_addr, uint64_t owner, u64 cnt,
int elements[]);
int cvmx_range_reserve(u64 range_addr, uint64_t owner, u64 base, uint64_t cnt);
int cvmx_range_free_with_base(u64 range_addr, int base, int cnt);
int cvmx_range_free_with_owner(u64 range_addr, uint64_t owner);
u64 cvmx_range_get_owner(u64 range_addr, uint64_t base);
void cvmx_range_show(uint64_t range_addr);
int cvmx_range_memory_size(int nelements);
int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count);
#endif // __CVMX_RANGE_H__

View file

@ -6,6 +6,7 @@
#ifndef __CVMX_REGS_H__
#define __CVMX_REGS_H__
#include <log.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
@ -32,6 +33,7 @@
/* Regs */
#define CVMX_CIU3_NMI 0x0001010000000160ULL
#define CVMX_CIU3_ISCX_W1C(x) (0x0001010090000000ull + ((x) & 1048575) * 8)
#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
@ -55,11 +57,19 @@
#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
/* IOBDMA/LMTDMA IO addresses */
#define CVMX_LMTDMA_ORDERED_IO_ADDR 0xffffffffffffa400ull
#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
#define VASTR(...) #__VA_ARGS__
#define CVMX_PKO_LMTLINE 2ull
#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
#define CVMX_RDHWR(result, regstr) \
asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
@ -67,6 +77,13 @@
asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
#define CVMX_POP(result, input) \
asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
#define CVMX_MF_COP0(val, cop0) \
asm("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val))
#define CVMX_MT_COP0(val, cop0) \
asm("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val))
#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
#define CVMX_SYNC asm volatile("sync\n" : : : "memory")
#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
@ -81,6 +98,18 @@
#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
/** a normal prefetch */
#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
/** normal prefetches that use the pref instruction */
#define CVMX_PREFETCH_PREFX(X, address, offset) \
asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
#define CVMX_PREFETCH_PREF0(address, offset) \
CVMX_PREFETCH_PREFX(0, address, offset)
/*
* The macros cvmx_likely and cvmx_unlikely use the
* __builtin_expect GCC operation to control branch
@ -405,6 +434,30 @@ static inline unsigned int cvmx_get_local_core_num(void)
return core_num & core_mask;
}
/**
* Given a CSR address return the node number of that address
*
* @param addr Address to extract node number from
*
* @return node number
*/
static inline u8 cvmx_csr_addr_to_node(u64 addr)
{
return (addr >> CVMX_NODE_IO_SHIFT) & CVMX_NODE_MASK;
}
/**
* Strip the node address bits from a CSR address
*
* @param addr CSR address to strip the node bits from
*
* @return CSR address with the node bits set to zero
*/
static inline u64 cvmx_csr_addr_strip_node(u64 addr)
{
return addr & ~((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT);
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
@ -428,14 +481,45 @@ static inline u32 cvmx_pop(u32 val)
#define cvmx_printf printf
#define cvmx_vprintf vprintf
#if defined(DEBUG)
void cvmx_warn(const char *format, ...) __printf(1, 2);
#else
void cvmx_warn(const char *format, ...);
#endif
/* Use common debug macros */
#define cvmx_warn debug
#define cvmx_warn_if debug_cond
#define cvmx_warn_if(expression, format, ...) \
if (expression) \
cvmx_warn(format, ##__VA_ARGS__)
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location,
* and returns previous value.
*
* Memory access ordering is enforced before/after the atomic operation,
* so no additional 'sync' instructions are required.
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline int32_t cvmx_atomic_fetch_and_add32(int32_t * ptr, int32_t incr)
{
int32_t val;
val = *ptr;
*ptr += incr;
return val;
}
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*/
static inline void cvmx_atomic_add32_nosync(int32_t * ptr, int32_t incr)
{
*ptr += incr;
}
#endif /* __CVMX_REGS_H__ */

View file

@ -0,0 +1,226 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon xcv.
*/
#ifndef __CVMX_XCV_DEFS_H__
#define __CVMX_XCV_DEFS_H__
#define CVMX_XCV_BATCH_CRD_RET (0x00011800DB000100ull)
#define CVMX_XCV_COMP_CTL (0x00011800DB000020ull)
#define CVMX_XCV_CTL (0x00011800DB000030ull)
#define CVMX_XCV_DLL_CTL (0x00011800DB000010ull)
#define CVMX_XCV_ECO (0x00011800DB000200ull)
#define CVMX_XCV_INBND_STATUS (0x00011800DB000080ull)
#define CVMX_XCV_INT (0x00011800DB000040ull)
#define CVMX_XCV_RESET (0x00011800DB000000ull)
/**
* cvmx_xcv_batch_crd_ret
*/
union cvmx_xcv_batch_crd_ret {
u64 u64;
struct cvmx_xcv_batch_crd_ret_s {
u64 reserved_1_63 : 63;
u64 crd_ret : 1;
} s;
struct cvmx_xcv_batch_crd_ret_s cn73xx;
};
typedef union cvmx_xcv_batch_crd_ret cvmx_xcv_batch_crd_ret_t;
/**
* cvmx_xcv_comp_ctl
*
* This register controls programmable compensation.
*
*/
union cvmx_xcv_comp_ctl {
u64 u64;
struct cvmx_xcv_comp_ctl_s {
u64 drv_byp : 1;
u64 reserved_61_62 : 2;
u64 cmp_pctl : 5;
u64 reserved_53_55 : 3;
u64 cmp_nctl : 5;
u64 reserved_45_47 : 3;
u64 drv_pctl : 5;
u64 reserved_37_39 : 3;
u64 drv_nctl : 5;
u64 reserved_31_31 : 1;
u64 pctl_lock : 1;
u64 pctl_sat : 1;
u64 reserved_28_28 : 1;
u64 nctl_lock : 1;
u64 reserved_1_26 : 26;
u64 nctl_sat : 1;
} s;
struct cvmx_xcv_comp_ctl_s cn73xx;
};
typedef union cvmx_xcv_comp_ctl cvmx_xcv_comp_ctl_t;
/**
* cvmx_xcv_ctl
*
* This register contains the status control bits.
*
*/
union cvmx_xcv_ctl {
u64 u64;
struct cvmx_xcv_ctl_s {
u64 reserved_4_63 : 60;
u64 lpbk_ext : 1;
u64 lpbk_int : 1;
u64 speed : 2;
} s;
struct cvmx_xcv_ctl_s cn73xx;
};
typedef union cvmx_xcv_ctl cvmx_xcv_ctl_t;
/**
* cvmx_xcv_dll_ctl
*
* The RGMII timing specification requires that devices transmit clock and
* data synchronously. The specification requires external sources (namely
* the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of
* delay.
*
* To eliminate the need for the PC board delays, the RGMII interface has optional
* on-board DLLs for both transmit and receive. For correct operation, at most one
* of the transmitter, board, or receiver involved in an RGMII link should
* introduce delay. By default/reset, the RGMII receivers delay the received clock,
* and the RGMII transmitters do not delay the transmitted clock. Whether this
* default works as-is with a given link partner depends on the behavior of the
* link partner and the PC board.
*
* These are the possible modes of RGMII receive operation:
*
* * XCV_DLL_CTL[CLKRX_BYP] = 0 (reset value) - The RGMII
* receive interface introduces clock delay using its internal DLL.
* This mode is appropriate if neither the remote
* transmitter nor the PC board delays the clock.
*
* * XCV_DLL_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The
* RGMII receive interface introduces no clock delay. This mode
* is appropriate if either the remote transmitter or the PC board
* delays the clock.
*
* These are the possible modes of RGMII transmit operation:
*
* * XCV_DLL_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) -
* The RGMII transmit interface introduces no clock
* delay. This mode is appropriate is either the remote receiver
* or the PC board delays the clock.
*
* * XCV_DLL_CTL[CLKTX_BYP] = 0 - The RGMII transmit
* interface introduces clock delay using its internal DLL.
* This mode is appropriate if neither the remote receiver
* nor the PC board delays the clock.
*/
union cvmx_xcv_dll_ctl {
u64 u64;
struct cvmx_xcv_dll_ctl_s {
u64 reserved_32_63 : 32;
u64 lock : 1;
u64 clk_set : 7;
u64 clkrx_byp : 1;
u64 clkrx_set : 7;
u64 clktx_byp : 1;
u64 clktx_set : 7;
u64 reserved_2_7 : 6;
u64 refclk_sel : 2;
} s;
struct cvmx_xcv_dll_ctl_s cn73xx;
};
typedef union cvmx_xcv_dll_ctl cvmx_xcv_dll_ctl_t;
/**
* cvmx_xcv_eco
*/
union cvmx_xcv_eco {
u64 u64;
struct cvmx_xcv_eco_s {
u64 reserved_16_63 : 48;
u64 eco_rw : 16;
} s;
struct cvmx_xcv_eco_s cn73xx;
};
typedef union cvmx_xcv_eco cvmx_xcv_eco_t;
/**
* cvmx_xcv_inbnd_status
*
* This register contains RGMII in-band status.
*
*/
union cvmx_xcv_inbnd_status {
u64 u64;
struct cvmx_xcv_inbnd_status_s {
u64 reserved_4_63 : 60;
u64 duplex : 1;
u64 speed : 2;
u64 link : 1;
} s;
struct cvmx_xcv_inbnd_status_s cn73xx;
};
typedef union cvmx_xcv_inbnd_status cvmx_xcv_inbnd_status_t;
/**
* cvmx_xcv_int
*
* This register controls interrupts.
*
*/
union cvmx_xcv_int {
u64 u64;
struct cvmx_xcv_int_s {
u64 reserved_7_63 : 57;
u64 tx_ovrflw : 1;
u64 tx_undflw : 1;
u64 incomp_byte : 1;
u64 duplex : 1;
u64 reserved_2_2 : 1;
u64 speed : 1;
u64 link : 1;
} s;
struct cvmx_xcv_int_s cn73xx;
};
typedef union cvmx_xcv_int cvmx_xcv_int_t;
/**
* cvmx_xcv_reset
*
* This register controls reset.
*
*/
union cvmx_xcv_reset {
u64 u64;
struct cvmx_xcv_reset_s {
u64 enable : 1;
u64 reserved_16_62 : 47;
u64 clkrst : 1;
u64 reserved_12_14 : 3;
u64 dllrst : 1;
u64 reserved_8_10 : 3;
u64 comp : 1;
u64 reserved_4_6 : 3;
u64 tx_pkt_rst_n : 1;
u64 tx_dat_rst_n : 1;
u64 rx_pkt_rst_n : 1;
u64 rx_dat_rst_n : 1;
} s;
struct cvmx_xcv_reset_s cn73xx;
};
typedef union cvmx_xcv_reset cvmx_xcv_reset_t;
#endif

View file

@ -1,17 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
* Copyright (C) 2020-2022 Marvell International Ltd.
*/
#ifndef __OCTEON_ETH_H__
#define __OCTEON_ETH_H__
#include <phy.h>
#include <miiphy.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/octeon_fdt.h>
struct eth_device;
@ -27,33 +23,25 @@ struct octeon_eth_info {
struct phy_device *phydev; /** PHY device */
struct eth_device *ethdev; /** Eth device this priv is part of */
int mii_addr;
int phy_fdt_offset; /** Offset of PHY info in device tree */
int fdt_offset; /** Offset of Eth interface in DT */
int phy_offset; /** Offset of PHY dev in device tree */
int phy_fdt_offset; /** Offset of PHY info in device tree */
int fdt_offset; /** Offset of Eth interface in DT */
int phy_offset; /** Offset of PHY device in device tree */
enum cvmx_phy_type phy_device_type; /** Type of PHY */
/* current link status, use to reconfigure on status changes */
u64 packets_sent;
u64 packets_received;
u32 link_speed : 2;
u32 link_duplex : 1;
u32 link_status : 1;
u32 loopback : 1;
u32 enabled : 1;
u32 is_c45 : 1; /** Set if we need to use clause 45 */
u32 vitesse_sfp_config : 1; /** Need Vitesse SFP config */
u32 ti_gpio_config : 1; /** Need TI GPIO config */
u32 bgx_mac_set : 1; /** Has the BGX MAC been set already */
u64 last_bgx_mac; /** Last BGX MAC address set */
u64 gmx_base; /** Base address to access GMX CSRs */
bool mod_abs; /** True if module is absent */
/**
* User defined function to check if a SFP+ module is absent or not.
*
* @param dev Ethernet device
* @param data User supplied data
*/
int (*check_mod_abs)(struct eth_device *dev, void *data);
uint32_t link_speed : 2;
uint32_t link_duplex : 1;
uint32_t link_status : 1;
uint32_t loopback : 1;
uint32_t enabled : 1;
uint32_t is_c45 : 1; /** Set if we need to use clause 45 */
uint32_t vitesse_sfp_config : 1; /** Need Vitesse SFP config */
uint32_t ti_gpio_config : 1; /** Need TI GPIO configuration */
uint32_t bgx_mac_set : 1; /** Has the BGX MAC been set already */
u64 last_bgx_mac; /** Last BGX MAC address set */
u64 gmx_base; /** Base address to access GMX CSRs */
bool mod_abs; /** True if module is absent */
/** User supplied data for check_mod_abs */
void *mod_abs_data;
@ -71,12 +59,20 @@ struct octeon_eth_info {
* @return 0 for success, otherwise error
*/
int (*mod_abs_changed)(struct eth_device *dev, bool mod_abs);
/** SDK phy information data structure */
cvmx_phy_info_t phy_info;
struct udevice *mdio_dev;
struct mii_dev *bus;
struct phy_device *phy_dev;
#ifdef CONFIG_OCTEON_SFP
/** Information about connected SFP/SFP+/SFP28/QSFP+/QSFP28 module */
struct octeon_sfp_info sfp;
#endif
cvmx_wqe_t *work;
};
/**
@ -136,6 +132,6 @@ void octeon_eth_register_mod_abs_changed(struct eth_device *dev,
*
* NOTE: If the module state is changed then the module callback is called.
*/
void octeon_phy_port_check(struct eth_device *dev);
void octeon_phy_port_check(struct udevice *dev);
#endif /* __OCTEON_ETH_H__ */

View file

@ -13,6 +13,7 @@
#include <button.h>
#include <clk.h>
#include <dm.h>
#include <dm/of_extra.h>
#include <env.h>
#include <fdt_support.h>
#include <init.h>
@ -216,35 +217,35 @@ static int mox_get_topology(const u8 **ptopology, int *psize, int *pis_sd)
#define SW_SMI_CMD_R(d, r) (0x9800 | (((d) & 0x1f) << 5) | ((r) & 0x1f))
#define SW_SMI_CMD_W(d, r) (0x9400 | (((d) & 0x1f) << 5) | ((r) & 0x1f))
static int sw_multi_read(struct mii_dev *bus, int sw, int dev, int reg)
static int sw_multi_read(struct udevice *bus, int sw, int dev, int reg)
{
bus->write(bus, sw, 0, 0, SW_SMI_CMD_R(dev, reg));
dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 0, SW_SMI_CMD_R(dev, reg));
mdelay(5);
return bus->read(bus, sw, 0, 1);
return dm_mdio_read(bus, sw, MDIO_DEVAD_NONE, 1);
}
static void sw_multi_write(struct mii_dev *bus, int sw, int dev, int reg,
static void sw_multi_write(struct udevice *bus, int sw, int dev, int reg,
u16 val)
{
bus->write(bus, sw, 0, 1, val);
bus->write(bus, sw, 0, 0, SW_SMI_CMD_W(dev, reg));
dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 1, val);
dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 0, SW_SMI_CMD_W(dev, reg));
mdelay(5);
}
static int sw_scratch_read(struct mii_dev *bus, int sw, int reg)
static int sw_scratch_read(struct udevice *bus, int sw, int reg)
{
sw_multi_write(bus, sw, 0x1c, 0x1a, (reg & 0x7f) << 8);
return sw_multi_read(bus, sw, 0x1c, 0x1a) & 0xff;
}
static void sw_led_write(struct mii_dev *bus, int sw, int port, int reg,
static void sw_led_write(struct udevice *bus, int sw, int port, int reg,
u16 val)
{
sw_multi_write(bus, sw, port, 0x16, 0x8000 | ((reg & 7) << 12)
| (val & 0x7ff));
}
static void sw_blink_leds(struct mii_dev *bus, int peridot, int topaz)
static void sw_blink_leds(struct udevice *bus, int peridot, int topaz)
{
int i, p;
struct {
@ -275,7 +276,7 @@ static void sw_blink_leds(struct mii_dev *bus, int peridot, int topaz)
}
}
static void check_switch_address(struct mii_dev *bus, int addr)
static void check_switch_address(struct udevice *bus, int addr)
{
if (sw_scratch_read(bus, addr, 0x70) >> 3 != addr)
printf("Check of switch MDIO address failed for 0x%02x\n",
@ -374,36 +375,22 @@ static void mox_phy_modify(struct phy_device *phydev, int page, int reg,
static void mox_phy_leds_start_blinking(void)
{
struct phy_device *phydev;
struct mii_dev *bus;
const char *node_name;
int node;
ofnode phy_node;
node = fdt_path_offset(gd->fdt_blob, "ethernet0");
if (node < 0) {
printf("Cannot get eth0!\n");
return;
}
phy_node = ofnode_get_phy_node(ofnode_path("ethernet0"));
if (!ofnode_valid(phy_node))
goto err;
node_name = fdt_get_name(gd->fdt_blob, node, NULL);
if (!node_name) {
printf("Cannot get eth0 node name!\n");
return;
}
bus = miiphy_get_dev_by_name(node_name);
if (!bus) {
printf("Cannot get MDIO bus device!\n");
return;
}
phydev = phy_find_by_mask(bus, BIT(1));
if (!phydev) {
printf("Cannot get ethernet PHY!\n");
return;
}
phydev = dm_phy_find_by_ofnode(phy_node);
if (!phydev)
goto err;
mox_phy_modify(phydev, 3, 0x12, 0x700, 0x400);
mox_phy_modify(phydev, 3, 0x10, 0xff, 0xbb);
return;
err:
printf("Cannot get ethernet PHY!\n");
}
static bool read_reset_button(void)
@ -611,6 +598,26 @@ int show_board_info(void)
return 0;
}
static struct udevice *mox_mdio_bus(void)
{
struct udevice *bus;
ofnode node;
node = ofnode_by_compatible(ofnode_null(), "marvell,orion-mdio");
if (!ofnode_valid(node))
goto err;
dm_mdio_probe_devices();
if (uclass_get_device_by_ofnode(UCLASS_MDIO, node, &bus))
goto err;
return bus;
err:
printf("Cannot get MDIO bus device!\n");
return NULL;
}
int last_stage_init(void)
{
struct gpio_desc reset_gpio = {};
@ -636,16 +643,9 @@ int last_stage_init(void)
* 0x70 of Peridot (and potentially Topaz) modules
*/
if (peridot || topaz) {
struct mii_dev *bus;
const char *node_name;
int node;
struct udevice *bus = mox_mdio_bus();
node = fdt_path_offset(gd->fdt_blob, "ethernet0");
node_name = (node >= 0) ? fdt_get_name(gd->fdt_blob, node, NULL) : NULL;
bus = node_name ? miiphy_get_dev_by_name(node_name) : NULL;
if (!bus) {
printf("Cannot get MDIO bus device!\n");
} else {
if (bus) {
int i;
for (i = 0; i < peridot; ++i)

View file

@ -11,6 +11,7 @@
#include <i2c.h>
#include <init.h>
#include <mmc.h>
#include <miiphy.h>
#include <phy.h>
#include <asm/global_data.h>
#include <asm/io.h>
@ -254,14 +255,15 @@ int board_xhci_enable(fdt_addr_t base)
return 0;
}
#ifdef CONFIG_LAST_STAGE_INIT
/* Helper function for accessing switch devices in multi-chip connection mode */
static int mii_multi_chip_mode_write(struct mii_dev *bus, int dev_smi_addr,
static int mii_multi_chip_mode_write(struct udevice *bus, int dev_smi_addr,
int smi_addr, int reg, u16 value)
{
u16 smi_cmd = 0;
if (bus->write(bus, dev_smi_addr, 0,
MVEBU_SW_SMI_DATA_REG, value) != 0) {
if (dm_mdio_write(bus, dev_smi_addr, MDIO_DEVAD_NONE,
MVEBU_SW_SMI_DATA_REG, value) != 0) {
printf("Error writing to the PHY addr=%02x reg=%02x\n",
smi_addr, reg);
return -EFAULT;
@ -272,8 +274,8 @@ static int mii_multi_chip_mode_write(struct mii_dev *bus, int dev_smi_addr,
(1 << SW_SMI_CMD_SMI_OP_OFF) |
(smi_addr << SW_SMI_CMD_DEV_ADDR_OFF) |
(reg << SW_SMI_CMD_REG_ADDR_OFF);
if (bus->write(bus, dev_smi_addr, 0,
MVEBU_SW_SMI_CMD_REG, smi_cmd) != 0) {
if (dm_mdio_write(bus, dev_smi_addr, MDIO_DEVAD_NONE,
MVEBU_SW_SMI_CMD_REG, smi_cmd) != 0) {
printf("Error writing to the PHY addr=%02x reg=%02x\n",
smi_addr, reg);
return -EFAULT;
@ -283,11 +285,22 @@ static int mii_multi_chip_mode_write(struct mii_dev *bus, int dev_smi_addr,
}
/* Bring-up board-specific network stuff */
int board_network_enable(struct mii_dev *bus)
int last_stage_init(void)
{
struct udevice *bus;
ofnode node;
if (!of_machine_is_compatible("globalscale,espressobin"))
return 0;
node = ofnode_by_compatible(ofnode_null(), "marvell,orion-mdio");
if (!ofnode_valid(node) ||
uclass_get_device_by_ofnode(UCLASS_MDIO, node, &bus) ||
device_probe(bus)) {
printf("Cannot find MDIO bus\n");
return 0;
}
/*
* FIXME: remove this code once Topaz driver gets available
* A3720 Community Board Only
@ -327,6 +340,7 @@ int board_network_enable(struct mii_dev *bus)
return 0;
}
#endif
#ifdef CONFIG_OF_BOARD_SETUP
int ft_board_setup(void *blob, struct bd_info *bd)

View file

@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2021 Stefan Roese <sr@denx.de>
* Copyright (C) 2021-2022 Stefan Roese <sr@denx.de>
*/
#include <dm.h>
#include <ram.h>
#include <asm/gpio.h>
#include <mach/octeon_ddr.h>
#include <mach/cvmx-qlm.h>
@ -84,6 +85,52 @@ int board_fix_fdt(void *fdt)
return rc;
}
int board_early_init_f(void)
{
struct gpio_desc gpio = {};
ofnode node;
/* Initial GPIO configuration */
/* GPIO 7: Vitesse reset */
node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
if (ofnode_valid(node)) {
gpio_request_by_name_nodev(node, "los", 0, &gpio, GPIOD_IS_IN);
dm_gpio_free(gpio.dev, &gpio);
gpio_request_by_name_nodev(node, "reset", 0, &gpio,
GPIOD_IS_OUT);
if (dm_gpio_is_valid(&gpio)) {
/* Vitesse reset */
debug("%s: Setting GPIO 7 to 1\n", __func__);
dm_gpio_set_value(&gpio, 1);
}
dm_gpio_free(gpio.dev, &gpio);
}
/* SFP+ transmitters */
ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
gpio_request_by_name_nodev(node, "tx_disable", 0,
&gpio, GPIOD_IS_OUT);
if (dm_gpio_is_valid(&gpio)) {
debug("%s: Setting GPIO %d to 1\n", __func__,
gpio.offset);
dm_gpio_set_value(&gpio, 1);
}
dm_gpio_free(gpio.dev, &gpio);
gpio_request_by_name_nodev(node, "mod_abs", 0, &gpio,
GPIOD_IS_IN);
dm_gpio_free(gpio.dev, &gpio);
gpio_request_by_name_nodev(node, "tx_error", 0, &gpio,
GPIOD_IS_IN);
dm_gpio_free(gpio.dev, &gpio);
gpio_request_by_name_nodev(node, "rx_los", 0, &gpio,
GPIOD_IS_IN);
dm_gpio_free(gpio.dev, &gpio);
}
return 0;
}
void board_configure_qlms(void)
{
octeon_configure_qlm(4, 3000, CVMX_QLM_MODE_SATA_2X1, 0, 0, 0, 0);
@ -100,7 +147,45 @@ void board_configure_qlms(void)
int board_late_init(void)
{
struct gpio_desc gpio = {};
ofnode node;
/* Turn on SFP+ transmitters */
ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
gpio_request_by_name_nodev(node, "tx_disable", 0,
&gpio, GPIOD_IS_OUT);
if (dm_gpio_is_valid(&gpio)) {
debug("%s: Setting GPIO %d to 0\n", __func__,
gpio.offset);
dm_gpio_set_value(&gpio, 0);
}
dm_gpio_free(gpio.dev, &gpio);
}
board_configure_qlms();
return 0;
}
int last_stage_init(void)
{
struct gpio_desc gpio = {};
ofnode node;
node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
if (!ofnode_valid(node)) {
printf("Vitesse SPF DT node not found!");
return 0;
}
gpio_request_by_name_nodev(node, "reset", 0, &gpio, GPIOD_IS_OUT);
if (dm_gpio_is_valid(&gpio)) {
/* Take Vitesse retimer out of reset */
debug("%s: Setting GPIO 7 to 0\n", __func__);
dm_gpio_set_value(&gpio, 0);
mdelay(50);
}
dm_gpio_free(gpio.dev, &gpio);
return 0;
}

View file

@ -62,6 +62,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_SCSI=y

View file

@ -77,6 +77,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_DM_PCI_COMPAT=y
CONFIG_PCI_MVEBU=y

View file

@ -67,6 +67,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_DEBUG_UART_SHIFT=2

View file

@ -62,6 +62,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_SCSI=y

View file

@ -65,6 +65,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_DEBUG_UART_SHIFT=2

View file

@ -65,6 +65,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_DEBUG_UART_SHIFT=2

View file

@ -63,6 +63,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y
CONFIG_SCSI=y

View file

@ -47,6 +47,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_DEBUG_UART_SHIFT=2
CONFIG_SYS_NS16550=y
CONFIG_KIRKWOOD_SPI=y

View file

@ -25,6 +25,7 @@ CONFIG_DISPLAY_BOARDINFO_LATE=y
CONFIG_ARCH_EARLY_INIT_R=y
CONFIG_BOARD_EARLY_INIT_F=y
CONFIG_BOARD_LATE_INIT=y
CONFIG_LAST_STAGE_INIT=y
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_FUSE=y
CONFIG_CMD_GPIO=y
@ -73,9 +74,11 @@ CONFIG_SPI_FLASH_STMICRO=y
CONFIG_SPI_FLASH_WINBOND=y
CONFIG_SPI_FLASH_MTD=y
CONFIG_PHY_MARVELL=y
CONFIG_PHY_FIXED=y
CONFIG_PHY_GIGE=y
CONFIG_E1000=y
CONFIG_MVNETA=y
CONFIG_MVMDIO=y
CONFIG_NVME_PCI=y
CONFIG_PCI=y
CONFIG_PCI_AARDVARK=y

View file

@ -26,6 +26,7 @@ CONFIG_CMD_PART=y
CONFIG_CMD_PCI=y
CONFIG_CMD_USB=y
CONFIG_CMD_DHCP=y
CONFIG_CMD_MII=y
CONFIG_CMD_PING=y
CONFIG_CMD_RTC=y
CONFIG_CMD_TIME=y
@ -36,6 +37,7 @@ CONFIG_AMIGA_PARTITION=y
CONFIG_EFI_PARTITION=y
CONFIG_PARTITION_TYPE_GUID=y
CONFIG_ENV_IS_IN_FLASH=y
CONFIG_TFTP_TSIZE=y
CONFIG_CLK=y
# CONFIG_INPUT is not set
CONFIG_MISC=y
@ -53,7 +55,12 @@ CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH_ATMEL=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_STMICRO=y
CONFIG_PHYLIB=y
CONFIG_PHY_MARVELL=y
CONFIG_DM_MDIO=y
CONFIG_DM_ETH_PHY=y
CONFIG_E1000=y
CONFIG_NET_OCTEON=y
CONFIG_PCI=y
CONFIG_PCIE_OCTEON=y
CONFIG_DM_REGULATOR=y

View file

@ -14,13 +14,16 @@ CONFIG_ARCH_OCTEON=y
CONFIG_TARGET_OCTEON_NIC23=y
# CONFIG_MIPS_CACHE_SETUP is not set
# CONFIG_MIPS_CACHE_DISABLE is not set
CONFIG_MIPS_RELOCATION_TABLE_SIZE=0xc000
CONFIG_DEBUG_UART=y
CONFIG_AHCI=y
CONFIG_OF_BOARD_FIXUP=y
CONFIG_SYS_CONSOLE_ENV_OVERWRITE=y
# CONFIG_SYS_DEVICE_NULLDEV is not set
CONFIG_ARCH_MISC_INIT=y
CONFIG_BOARD_EARLY_INIT_F=y
CONFIG_BOARD_LATE_INIT=y
CONFIG_LAST_STAGE_INIT=y
CONFIG_HUSH_PARSER=y
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_GPIO=y
@ -37,6 +40,7 @@ CONFIG_CMD_FAT=y
CONFIG_CMD_FS_GENERIC=y
CONFIG_EFI_PARTITION=y
CONFIG_ENV_IS_IN_SPI_FLASH=y
CONFIG_TFTP_TSIZE=y
CONFIG_SATA=y
CONFIG_AHCI_MVEBU=y
CONFIG_CLK=y
@ -50,7 +54,11 @@ CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH_ATMEL=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_STMICRO=y
# CONFIG_NETDEVICES is not set
CONFIG_PHYLIB=y
CONFIG_PHYLIB_10G=y
CONFIG_DM_MDIO=y
CONFIG_DM_ETH_PHY=y
CONFIG_NET_OCTEON=y
CONFIG_PCI=y
CONFIG_DM_REGULATOR=y
CONFIG_DM_REGULATOR_FIXED=y

View file

@ -70,6 +70,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_DM_PCI_COMPAT=y
CONFIG_PCI_MVEBU=y

View file

@ -82,6 +82,7 @@ CONFIG_SPI_FLASH_MTD=y
CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MVMDIO=y
CONFIG_NVME_PCI=y
CONFIG_PCI=y
CONFIG_PCI_AARDVARK=y

View file

@ -82,9 +82,11 @@ CONFIG_SPI_FLASH_MACRONIX=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_MTD=y
CONFIG_PHY_MARVELL=y
CONFIG_PHY_FIXED=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MII=y
CONFIG_MVMDIO=y
CONFIG_NVME_PCI=y
CONFIG_PCI=y
CONFIG_PCI_MVEBU=y

View file

@ -73,6 +73,7 @@ CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_E1000=y
CONFIG_MVNETA=y
CONFIG_MVMDIO=y
CONFIG_PCI=y
CONFIG_PCI_AARDVARK=y
CONFIG_PHY=y

View file

@ -445,6 +445,7 @@ config MVNETA
bool "Marvell Armada XP/385/3700 network interface support"
depends on ARMADA_XP || ARMADA_38X || ARMADA_3700
select PHYLIB
select DM_MDIO
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 38X and ARMADA 3700 SoCs
@ -495,6 +496,13 @@ config MT7628_ETH
The MediaTek MT7628 ethernet interface is used on MT7628 and
MT7688 based boards.
config NET_OCTEON
bool "MIPS Octeon ethernet support"
depends on ARCH_OCTEON
help
You must select Y to enable network device support for
MIPS Octeon SoCs. If unsure, say n
config NET_OCTEONTX
bool "OcteonTX Ethernet support"
depends on ARCH_OCTEONTX

View file

@ -64,6 +64,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o
obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_MVPP2) += mvpp2.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_NET_OCTEON) += octeon/
obj-$(CONFIG_NET_OCTEONTX) += octeontx/
obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o

View file

@ -40,11 +40,7 @@
DECLARE_GLOBAL_DATA_PTR;
#if !defined(CONFIG_PHYLIB)
# error Marvell mvneta requires PHYLIB
#endif
#define CONFIG_NR_CPUS 1
#define MVNETA_NR_CPUS 1
#define ETH_HLEN 14 /* Total octets in header */
/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
@ -192,7 +188,6 @@ DECLARE_GLOBAL_DATA_PTR;
#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1))
#define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
@ -282,13 +277,11 @@ struct mvneta_port {
unsigned int speed;
int init;
int phyaddr;
struct phy_device *phydev;
#if CONFIG_IS_ENABLED(DM_GPIO)
struct gpio_desc phy_reset_gpio;
struct gpio_desc sfp_tx_disable_gpio;
#endif
struct mii_dev *bus;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@ -414,15 +407,6 @@ static struct buffer_location buffer_loc;
*/
#define BD_SPACE (1 << 20)
/*
* Dummy implementation that can be overwritten by a board
* specific function
*/
__weak int board_network_enable(struct mii_dev *bus)
{
return 0;
}
/* Utility/helper methods */
/* Write helper method */
@ -576,13 +560,6 @@ static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
}
static int mvneta_port_is_fixed_link(struct mvneta_port *pp)
{
/* phy_addr is set to invalid value for fixed link */
return pp->phyaddr > PHY_MAX_ADDR;
}
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@ -791,7 +768,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Set CPU queue access map - all CPUs have access to all RX
* queues and to all TX queues
*/
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
for (cpu = 0; cpu < MVNETA_NR_CPUS; cpu++)
mvreg_write(pp, MVNETA_CPU_MAP(cpu),
(MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@ -834,7 +811,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
/* Enable PHY polling in hardware if not in fixed-link mode */
if (!mvneta_port_is_fixed_link(pp)) {
if (!CONFIG_IS_ENABLED(PHY_FIXED) ||
pp->phydev->phy_id != PHY_FIXED_ID) {
mvreg_write(pp, MVNETA_PHY_ADDR, pp->phydev->addr);
val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
val |= MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
@ -1171,38 +1151,46 @@ static void mvneta_adjust_link(struct udevice *dev)
{
struct mvneta_port *pp = dev_get_priv(dev);
struct phy_device *phydev = pp->phydev;
int status_change = 0;
bool status_change = false;
if (mvneta_port_is_fixed_link(pp)) {
debug("Using fixed link, skip link adjust\n");
return;
}
if (phydev->link &&
(pp->speed != phydev->speed || pp->duplex != phydev->duplex)) {
u32 val;
if (phydev->link) {
if ((pp->speed != phydev->speed) ||
(pp->duplex != phydev->duplex)) {
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
MVNETA_GMAC_CONFIG_FULL_DUPLEX |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_AN_DUPLEX_EN);
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
MVNETA_GMAC_CONFIG_FULL_DUPLEX |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_AN_DUPLEX_EN);
/* FIXME: For fixed-link case, these were the initial settings
* used before the code was converted to use PHY_FIXED. Some of
* these may look nonsensical (for example BYPASS_AN makes sense
* for 1000base-x and 2500base-x modes, AFAIK), and in fact this
* may be changed in the future (when support for inband AN will
* be added). Also, why is ADVERT_FC enabled if we don't enable
* inband AN at all?
*/
if (CONFIG_IS_ENABLED(PHY_FIXED) &&
pp->phydev->phy_id == PHY_FIXED_ID)
val = MVNETA_GMAC_IB_BYPASS_AN_EN |
MVNETA_GMAC_SET_FC_EN |
MVNETA_GMAC_ADVERT_FC_EN |
MVNETA_GMAC_SAMPLE_TX_CFG_EN;
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else if (pp->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
pp->duplex = phydev->duplex;
pp->speed = phydev->speed;
}
pp->duplex = phydev->duplex;
pp->speed = phydev->speed;
}
if (phydev->link != pp->link) {
@ -1212,7 +1200,7 @@ static void mvneta_adjust_link(struct udevice *dev)
}
pp->link = phydev->link;
status_change = 1;
status_change = true;
}
if (status_change) {
@ -1428,118 +1416,6 @@ static int mvneta_init(struct udevice *dev)
/* U-Boot only functions follow here */
/* SMI / MDIO functions */
static int smi_wait_ready(struct mvneta_port *pp)
{
u32 timeout = MVNETA_SMI_TIMEOUT;
u32 smi_reg;
/* wait till the SMI is not busy */
do {
/* read smi register */
smi_reg = mvreg_read(pp, MVNETA_SMI);
if (timeout-- == 0) {
printf("Error: SMI busy timeout\n");
return -EFAULT;
}
} while (smi_reg & MVNETA_SMI_BUSY);
return 0;
}
/*
* mvneta_mdio_read - miiphy_read callback function.
*
* Returns 16bit phy register value, or 0xffff on error
*/
static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
{
struct mvneta_port *pp = bus->priv;
u32 smi_reg;
u32 timeout;
/* check parameters */
if (addr > MVNETA_PHY_ADDR_MASK) {
printf("Error: Invalid PHY address %d\n", addr);
return -EFAULT;
}
if (reg > MVNETA_PHY_REG_MASK) {
printf("Err: Invalid register offset %d\n", reg);
return -EFAULT;
}
/* wait till the SMI is not busy */
if (smi_wait_ready(pp) < 0)
return -EFAULT;
/* fill the phy address and regiser offset and read opcode */
smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS)
| (reg << MVNETA_SMI_REG_ADDR_OFFS)
| MVNETA_SMI_OPCODE_READ;
/* write the smi register */
mvreg_write(pp, MVNETA_SMI, smi_reg);
/* wait till read value is ready */
timeout = MVNETA_SMI_TIMEOUT;
do {
/* read smi register */
smi_reg = mvreg_read(pp, MVNETA_SMI);
if (timeout-- == 0) {
printf("Err: SMI read ready timeout\n");
return -EFAULT;
}
} while (!(smi_reg & MVNETA_SMI_READ_VALID));
/* Wait for the data to update in the SMI register */
for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++)
;
return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK;
}
/*
* mvneta_mdio_write - miiphy_write callback function.
*
* Returns 0 if write succeed, -EINVAL on bad parameters
* -ETIME on timeout
*/
static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
u16 value)
{
struct mvneta_port *pp = bus->priv;
u32 smi_reg;
/* check parameters */
if (addr > MVNETA_PHY_ADDR_MASK) {
printf("Error: Invalid PHY address %d\n", addr);
return -EFAULT;
}
if (reg > MVNETA_PHY_REG_MASK) {
printf("Err: Invalid register offset %d\n", reg);
return -EFAULT;
}
/* wait till the SMI is not busy */
if (smi_wait_ready(pp) < 0)
return -EFAULT;
/* fill the phy addr and reg offset and write opcode and data */
smi_reg = value << MVNETA_SMI_DATA_OFFS;
smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS)
| (reg << MVNETA_SMI_REG_ADDR_OFFS);
smi_reg &= ~MVNETA_SMI_OPCODE_READ;
/* write the smi register */
mvreg_write(pp, MVNETA_SMI, smi_reg);
return 0;
}
static int mvneta_start(struct udevice *dev)
{
struct mvneta_port *pp = dev_get_priv(dev);
@ -1548,57 +1424,28 @@ static int mvneta_start(struct udevice *dev)
mvneta_port_power_up(pp, pp->phy_interface);
if (!pp->init || pp->link == 0) {
if (mvneta_port_is_fixed_link(pp)) {
u32 val;
pp->init = 1;
pp->link = 1;
mvneta_init(dev);
val = MVNETA_GMAC_FORCE_LINK_UP |
MVNETA_GMAC_IB_BYPASS_AN_EN |
MVNETA_GMAC_SET_FC_EN |
MVNETA_GMAC_ADVERT_FC_EN |
MVNETA_GMAC_SAMPLE_TX_CFG_EN;
if (pp->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
if (pp->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else if (pp->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
} else {
/* Set phy address of the port */
mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr);
phydev = phy_connect(pp->bus, pp->phyaddr, dev,
pp->phy_interface);
if (!phydev) {
printf("phy_connect failed\n");
return -ENODEV;
}
pp->phydev = phydev;
phy_config(phydev);
phy_startup(phydev);
if (!phydev->link) {
printf("%s: No link.\n", phydev->dev->name);
return -1;
}
/* Full init on first call */
mvneta_init(dev);
pp->init = 1;
return 0;
phydev = dm_eth_phy_connect(dev);
if (!phydev) {
printf("dm_eth_phy_connect failed\n");
return -ENODEV;
}
}
/* Upon all following calls, this is enough */
mvneta_port_up(pp);
mvneta_port_enable(pp);
pp->phydev = phydev;
phy_config(phydev);
phy_startup(phydev);
if (!phydev->link) {
printf("%s: No link.\n", phydev->dev->name);
return -1;
}
/* Full init on first call */
mvneta_init(dev);
pp->init = 1;
} else {
/* Upon all following calls, this is enough */
mvneta_port_up(pp);
mvneta_port_enable(pp);
}
return 0;
}
@ -1692,18 +1539,11 @@ static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp)
static int mvneta_probe(struct udevice *dev)
{
struct eth_pdata *pdata = dev_get_plat(dev);
struct mvneta_port *pp = dev_get_priv(dev);
#if CONFIG_IS_ENABLED(DM_GPIO)
struct ofnode_phandle_args sfp_args;
#endif
void *blob = (void *)gd->fdt_blob;
int node = dev_of_offset(dev);
struct mii_dev *bus;
unsigned long addr;
void *bd_space;
int ret;
int fl_node;
/*
* Allocate buffer area for descs and rx_buffers. This is only
@ -1729,7 +1569,10 @@ static int mvneta_probe(struct udevice *dev)
buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
}
pp->base = (void __iomem *)pdata->iobase;
pp->base = dev_read_addr_ptr(dev);
pp->phy_interface = dev_read_phy_mode(dev);
if (pp->phy_interface == PHY_INTERFACE_MODE_NA)
return -EINVAL;
/* Configure MBUS address windows */
if (device_is_compatible(dev, "marvell,armada-3700-neta"))
@ -1737,42 +1580,9 @@ static int mvneta_probe(struct udevice *dev)
else
mvneta_conf_mbus_windows(pp);
/* PHY interface is already decoded in mvneta_of_to_plat() */
pp->phy_interface = pdata->phy_interface;
/* fetch 'fixed-link' property from 'neta' node */
fl_node = fdt_subnode_offset(blob, node, "fixed-link");
if (fl_node != -FDT_ERR_NOTFOUND) {
/* set phy_addr to invalid value for fixed link */
pp->phyaddr = PHY_MAX_ADDR + 1;
pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
} else {
/* Now read phyaddr from DT */
addr = fdtdec_get_int(blob, node, "phy", 0);
addr = fdt_node_offset_by_phandle(blob, addr);
pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
}
bus = mdio_alloc();
if (!bus) {
printf("Failed to allocate MDIO bus\n");
return -ENOMEM;
}
bus->read = mvneta_mdio_read;
bus->write = mvneta_mdio_write;
snprintf(bus->name, sizeof(bus->name), dev->name);
bus->priv = (void *)pp;
pp->bus = bus;
ret = mdio_register(bus);
if (ret)
return ret;
#if CONFIG_IS_ENABLED(DM_GPIO)
ret = dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args);
if (!ret && ofnode_is_enabled(sfp_args.node))
if (!dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args) &&
ofnode_is_enabled(sfp_args.node))
gpio_request_by_name_nodev(sfp_args.node, "tx-disable-gpio", 0,
&pp->sfp_tx_disable_gpio, GPIOD_IS_OUT);
@ -1789,7 +1599,7 @@ static int mvneta_probe(struct udevice *dev)
dm_gpio_set_value(&pp->sfp_tx_disable_gpio, 0);
#endif
return board_network_enable(bus);
return 0;
}
static void mvneta_stop(struct udevice *dev)
@ -1808,20 +1618,6 @@ static const struct eth_ops mvneta_ops = {
.write_hwaddr = mvneta_write_hwaddr,
};
static int mvneta_of_to_plat(struct udevice *dev)
{
struct eth_pdata *pdata = dev_get_plat(dev);
pdata->iobase = dev_read_addr(dev);
/* Get phy-mode / phy_interface from DT */
pdata->phy_interface = dev_read_phy_mode(dev);
if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
return -EINVAL;
return 0;
}
static const struct udevice_id mvneta_ids[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
@ -1833,7 +1629,6 @@ U_BOOT_DRIVER(mvneta) = {
.name = "mvneta",
.id = UCLASS_ETH,
.of_match = mvneta_ids,
.of_to_plat = mvneta_of_to_plat,
.probe = mvneta_probe,
.ops = &mvneta_ops,
.priv_auto = sizeof(struct mvneta_port),

View file

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
#
# Copyright (C) 2018-2022 Marvell International Ltd.
#
obj-$(CONFIG_NET_OCTEON) += octeon_eth.o octeon_mdio.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,226 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2022 Marvell International Ltd.
*/
#include <dm.h>
#include <fdt_support.h>
#include <log.h>
#include <miiphy.h>
#include <net.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-smix-defs.h>
#include <mach/cvmx-config.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-mdio.h>
#define CVMX_SMI_DRV_CTL 0x0001180000001828ull
#define DEFAULT_MDIO_SPEED 2500000 /** 2.5 MHz default speed */
/**
* cvmx_smi_drv_ctl
*
* Enables the SMI interface.
*
*/
union cvmx_smi_drv_ctl {
u64 u64;
struct cvmx_smi_drv_ctl_s {
u64 reserved_14_63 : 50;
u64 pctl : 6;
u64 reserved_6_7 : 2;
u64 nctl : 6;
} s;
};
struct octeon_mdiobus {
struct mii_dev *mii_dev;
/**
* The local bus is in the lower 8 bits, followed by the remote bus in
* the top 8 bits. Bit 16 will be set if the bus is non-local.
*/
u32 bus_id;
int node; /** Node number */
int speed; /** Bus speed, normally 2.5 MHz */
int fdt_node; /** Node in FDT */
bool local; /** true if local MDIO bus */
};
static int octeon_mdio_read(struct udevice *mdio_dev, int phy_addr,
int dev_addr, int reg_addr)
{
struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
struct mii_dev *dev = p->mii_dev;
int value;
debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x) - ", __func__,
dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr);
if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
debug("clause 45 mode\n");
value = cvmx_mdio_45_read(p->bus_id & 0xff, phy_addr, dev_addr,
reg_addr);
} else {
value = cvmx_mdio_read(p->bus_id & 0xff, phy_addr, reg_addr);
}
debug("Return value: 0x%x\n", value);
return value;
}
static int octeon_mdio_write(struct udevice *mdio_dev, int phy_addr,
int dev_addr, int reg_addr, u16 value)
{
struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
struct mii_dev *dev = p->mii_dev;
debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x, 0x%x)\n",
__func__, dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr,
value);
if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
debug("clause 45 mode\n");
return cvmx_mdio_45_write(p->bus_id & 0xff, phy_addr, dev_addr,
reg_addr, value);
}
return cvmx_mdio_write(p->bus_id & 0xff, phy_addr, reg_addr, value);
}
/**
* Converts a MDIO register address to a bus number
*
* @param reg_addr MDIO base register address
*
* @return MDIO bus number or -1 if invalid address
*/
int octeon_mdio_reg_addr_to_bus(u64 reg_addr)
{
int bus_base;
int bus;
/* Adjust the bus number based on the node number */
bus_base = cvmx_csr_addr_to_node(reg_addr) * 4;
reg_addr = cvmx_csr_addr_strip_node(reg_addr);
switch (reg_addr) {
case 0x1180000001800:
case 0x1180000003800: /* 68XX/78XX address */
bus = 0;
break;
case 0x1180000001900:
case 0x1180000003880:
bus = 1;
break;
case 0x1180000003900:
bus = 2;
break;
case 0x1180000003980:
bus = 3;
break;
default:
printf("%s: Unknown register address 0x%llx\n", __func__,
reg_addr);
return -1;
}
bus += bus_base;
debug("%s: address 0x%llx is bus %d\n", __func__, reg_addr, bus);
return bus;
}
static int octeon_mdio_probe(struct udevice *dev)
{
struct octeon_mdiobus *p = dev_get_priv(dev);
union cvmx_smi_drv_ctl drv_ctl;
cvmx_smix_clk_t smi_clk;
u64 mdio_addr;
int bus;
u64 sclock;
u32 sample_dly;
u64 denom;
mdio_addr = dev_read_addr(dev);
debug("%s: Translated address: 0x%llx\n", __func__, mdio_addr);
bus = octeon_mdio_reg_addr_to_bus(mdio_addr);
p->bus_id = bus;
debug("%s: bus: %d\n", __func__, bus);
drv_ctl.u64 = csr_rd(CVMX_SMI_DRV_CTL);
drv_ctl.s.pctl = dev_read_u32_default(dev, "cavium,pctl-drive-strength",
drv_ctl.s.pctl);
drv_ctl.s.nctl = dev_read_u32_default(dev, "cavium,nctl-drive-strength",
drv_ctl.s.nctl);
debug("%s: Set MDIO PCTL drive strength to 0x%x and NCTL drive strength to 0x%x\n",
__func__, drv_ctl.s.pctl, drv_ctl.s.nctl);
csr_wr(CVMX_SMI_DRV_CTL, drv_ctl.u64);
/* Set the bus speed, default is 2.5MHz */
p->speed = dev_read_u32_default(dev, "cavium,max-speed",
DEFAULT_MDIO_SPEED);
sclock = gd->bus_clk;
smi_clk.u64 = csr_rd(CVMX_SMIX_CLK(bus & 3));
smi_clk.s.phase = sclock / (p->speed * 2);
/* Allow sample delay to be specified */
sample_dly = dev_read_u32_default(dev, "cavium,sample-delay", 0);
/* Only change the sample delay if it is set, otherwise use
* the default value of 2.
*/
if (sample_dly) {
u32 sample;
denom = (sclock * 1000ULL) / sample_dly;
debug("%s: sclock: %llu, sample_dly: %u ps, denom: %llu\n",
__func__, sclock, sample_dly, denom);
sample = (sclock + denom - 1) / denom;
debug("%s: sample: %u\n", __func__, smi_clk.s.sample);
if (sample < 2) {
printf("%s: warning: cavium,sample-delay %u ps is too small in device tree for %s\n",
__func__, sample_dly, dev->name);
sample = 2;
}
if (sample > (2 * smi_clk.s.phase - 3)) {
printf("%s: warning: cavium,sample-delay %u ps is too large in device tree for %s\n",
__func__, sample_dly, dev->name);
sample = 2 * smi_clk.s.phase - 3;
}
smi_clk.s.sample = sample & 0xf;
smi_clk.s.sample_hi = (sample >> 4) & 0xf;
debug("%s(%s): sample delay: %u ps (%d clocks)\n", __func__,
dev->name, sample_dly, smi_clk.s.sample);
}
csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
debug("mdio clock phase: %d clocks\n", smi_clk.s.phase);
csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
debug("Enabling SMI interface %s\n", dev->name);
csr_wr(CVMX_SMIX_EN(bus & 3), 1);
/* Muxed MDIO bus support removed for now! */
return 0;
}
static const struct mdio_ops octeon_mdio_ops = {
.read = octeon_mdio_read,
.write = octeon_mdio_write,
};
static const struct udevice_id octeon_mdio_ids[] = {
{ .compatible = "cavium,octeon-3860-mdio" },
{}
};
U_BOOT_DRIVER(octeon_mdio) = {
.name = "octeon_mdio",
.id = UCLASS_MDIO,
.of_match = octeon_mdio_ids,
.probe = octeon_mdio_probe,
.ops = &octeon_mdio_ops,
.priv_auto = sizeof(struct octeon_mdiobus),
};

View file

@ -8,7 +8,7 @@
#define __OCTEON_COMMON_H__
#if defined(CONFIG_RAM_OCTEON)
#define CONFIG_SYS_INIT_SP_OFFSET 0x20100000
#define CONFIG_SYS_INIT_SP_OFFSET 0x20180000
#else
/* No DDR init -> run in L2 cache with limited resources */
#define CONFIG_SYS_INIT_SP_OFFSET 0x00180000

View file

@ -16,4 +16,6 @@
#define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_8BIT
#define CONFIG_SYS_FLASH_EMPTY_INFO /* flinfo indicates empty blocks */
#define PHY_ANEG_TIMEOUT 8000 /* PHY needs a longer aneg time */
#endif /* __CONFIG_H__ */

View file

@ -188,6 +188,15 @@ int dm_mdio_write(struct udevice *mdio_dev, int addr, int devad, int reg, u16 va
*/
int dm_mdio_reset(struct udevice *mdio_dev);
/**
* dm_phy_find_by_ofnode - Find PHY device by ofnode
*
* @phynode: PHY's ofnode
*
* Return: pointer to phy_device, or NULL on error
*/
struct phy_device *dm_phy_find_by_ofnode(ofnode phynode);
/**
* dm_mdio_phy_connect - Wrapper over phy_connect for DM MDIO
*

View file

@ -129,6 +129,28 @@ static int dm_mdio_pre_remove(struct udevice *dev)
return 0;
}
struct phy_device *dm_phy_find_by_ofnode(ofnode phynode)
{
struct mdio_perdev_priv *pdata;
struct udevice *mdiodev;
u32 phy_addr;
if (ofnode_read_u32(phynode, "reg", &phy_addr))
return NULL;
if (uclass_get_device_by_ofnode(UCLASS_MDIO,
ofnode_get_parent(phynode),
&mdiodev))
return NULL;
if (device_probe(mdiodev))
return NULL;
pdata = dev_get_uclass_priv(mdiodev);
return phy_find_by_mask(pdata->mii_bus, BIT(phy_addr));
}
struct phy_device *dm_mdio_phy_connect(struct udevice *mdiodev, int phyaddr,
struct udevice *ethdev,
phy_interface_t interface)