/* niu.c: Neptune ethernet driver.
*
- * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/log2.h>
#include <linux/jiffies.h>
#include <linux/crc32.h>
+#include <linux/list.h>
#include <linux/io.h>
#define DRV_MODULE_NAME "niu"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.6"
-#define DRV_MODULE_RELDATE "January 5, 2008"
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "Nov 14, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#ifndef readq
static u64 readq(void __iomem *reg)
{
- return (((u64)readl(reg + 0x4UL) << 32) |
- (u64)readl(reg));
+ return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
}
static void writeq(u64 val, void __iomem *reg)
#define niu_unlock_parent(np, flags) \
spin_unlock_irqrestore(&np->parent->lock, flags)
+static int serdes_init_10g_serdes(struct niu *np);
+
static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
u64 bits, int limit, int delay)
{
}
/* Mode is always 10G fiber. */
-static int serdes_init_niu(struct niu *np)
+static int serdes_init_niu_10g_fiber(struct niu *np)
{
struct niu_link_config *lp = &np->link_config;
u32 tx_cfg, rx_cfg;
return 0;
}
+static int serdes_init_niu_1g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u16 pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 uninitialized_var(sig), mask, val;
+ u32 tx_cfg, rx_cfg;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
+ PLL_TX_CFG_RATE_HALF);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_RATE_HALF);
+
+ if (np->port == 0)
+ rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 1G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_1g_serdes: "
+ "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_1g_serdes: "
+ "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ switch (np->port) {
+ case 0:
+ val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+ mask = val;
+ break;
+
+ case 1:
+ val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+ mask = val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
+ "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int serdes_init_niu_10g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 uninitialized_var(sig), mask, val;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 10G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_10g_serdes: "
+ "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_10g_serdes: "
+ "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ /* check if serdes is ready */
+
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ pr_info(PFX "NIU Port %u signal bits [%08x] are not "
+ "[%08x] for 10G...trying 1G\n",
+ np->port, (int) (sig & mask), (int) val);
+
+ /* 10G failed, try initializing at 1G */
+ err = serdes_init_niu_1g_serdes(np);
+ if (!err) {
+ np->flags &= ~NIU_FLAGS_10G;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else {
+ dev_err(np->device, PFX "Port %u 10G/1G SERDES "
+ "Link Failed \n", np->port);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
{
int err;
static int esr_reset(struct niu *np)
{
- u32 reset;
+ u32 uninitialized_var(reset);
int err;
err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
}
if ((sig & mask) != val) {
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ return 0;
+ }
dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
"[%08x]\n", np->port, (int) (sig & mask), (int) val);
return -ENODEV;
}
-
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
+ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
return 0;
}
return 0;
}
-static int bcm8704_reset(struct niu *np)
+static int serdes_init_1g_serdes(struct niu *np)
{
- int err, limit;
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+ u64 ctrl_val, test_cfg_val, sig, mask, val;
+ int err;
+ u64 reset_val, val_rd;
- err = mdio_read(np, np->phy_addr,
- BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
- if (err < 0)
- return err;
- err |= BMCR_RESET;
- err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
- MII_BMCR, err);
- if (err)
- return err;
+ val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
+ ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
+ ENET_SERDES_PLL_FBDIV0;
+ switch (np->port) {
+ case 0:
+ reset_val = ENET_SERDES_RESET_0;
+ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+ pll_cfg = ENET_SERDES_0_PLL_CFG;
+ break;
+ case 1:
+ reset_val = ENET_SERDES_RESET_1;
+ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+ pll_cfg = ENET_SERDES_1_PLL_CFG;
+ break;
- limit = 1000;
- while (--limit >= 0) {
- err = mdio_read(np, np->phy_addr,
- BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
- if (err < 0)
- return err;
- if (!(err & BMCR_RESET))
- break;
- }
- if (limit < 0) {
- dev_err(np->device, PFX "Port %u PHY will not reset "
- "(bmcr=%04x)\n", np->port, (err & 0xffff));
- return -ENODEV;
+ default:
+ return -EINVAL;
}
- return 0;
-}
+ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+ ENET_SERDES_CTRL_SDET_1 |
+ ENET_SERDES_CTRL_SDET_2 |
+ ENET_SERDES_CTRL_SDET_3 |
+ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+ test_cfg_val = 0;
-/* When written, certain PHY registers need to be read back twice
- * in order for the bits to settle properly.
- */
-static int bcm8704_user_dev3_readback(struct niu *np, int reg)
-{
- int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
- if (err < 0)
- return err;
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
- if (err < 0)
- return err;
- return 0;
-}
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_0_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_1_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_2_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_3_SHIFT));
+ }
-static int bcm8704_init_user_dev3(struct niu *np)
-{
- int err;
+ nw64(ENET_SERDES_RESET, reset_val);
+ mdelay(20);
+ val_rd = nr64(ENET_SERDES_RESET);
+ val_rd &= ~reset_val;
+ nw64(pll_cfg, val);
+ nw64(ctrl_reg, ctrl_val);
+ nw64(test_cfg_reg, test_cfg_val);
+ nw64(ENET_SERDES_RESET, val_rd);
+ mdelay(2000);
- err = mdio_write(np, np->phy_addr,
- BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
- (USER_CONTROL_OPTXRST_LVL |
- USER_CONTROL_OPBIASFLT_LVL |
- USER_CONTROL_OBTMPFLT_LVL |
- USER_CONTROL_OPPRFLT_LVL |
- USER_CONTROL_OPTXFLT_LVL |
- USER_CONTROL_OPRXLOS_LVL |
- USER_CONTROL_OPRXFLT_LVL |
- USER_CONTROL_OPTXON_LVL |
- (0x3f << USER_CONTROL_RES1_SHIFT)));
- if (err)
- return err;
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_ctrl, glue0;
- err = mdio_write(np, np->phy_addr,
- BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
- (USER_PMD_TX_CTL_XFP_CLKEN |
- (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
- (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
- USER_PMD_TX_CTL_TSCK_LPWREN));
- if (err)
- return err;
-
- err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
- if (err)
- return err;
- err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
- if (err)
- return err;
-
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_OPT_DIGITAL_CTRL);
- if (err < 0)
- return err;
- err &= ~USER_ODIG_CTRL_GPIOS;
- err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
- err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_OPT_DIGITAL_CTRL, err);
- if (err)
- return err;
-
- mdelay(1000);
+ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_read_glue0(np, i, &glue0);
+ if (err)
+ return err;
- return 0;
-}
+ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
-static int mrvl88x2011_act_led(struct niu *np, int val)
-{
- int err;
+ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+ ESR_GLUE_CTRL0_THCNT |
+ ESR_GLUE_CTRL0_BLTIME);
+ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+ (BLTIME_300_CYCLES <<
+ ESR_GLUE_CTRL0_BLTIME_SHIFT));
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
- MRVL88X2011_LED_8_TO_11_CTL);
- if (err < 0)
- return err;
+ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_write_glue0(np, i, glue0);
+ if (err)
+ return err;
+ }
- err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
- err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
- return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
- MRVL88X2011_LED_8_TO_11_CTL, err);
-}
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+ mask = val;
+ break;
-static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
-{
- int err;
+ case 1:
+ val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+ mask = val;
+ break;
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
- MRVL88X2011_LED_BLINK_CTL);
- if (err >= 0) {
- err &= ~MRVL88X2011_LED_BLKRATE_MASK;
- err |= (rate << 4);
+ default:
+ return -EINVAL;
+ }
- err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
- MRVL88X2011_LED_BLINK_CTL, err);
+ if ((sig & mask) != val) {
+ dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
+ "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ return -ENODEV;
}
- return err;
+ return 0;
}
-static int xcvr_init_10g_mrvl88x2011(struct niu *np)
+static int link_status_1g_serdes(struct niu *np, int *link_up_p)
{
- int err;
+ struct niu_link_config *lp = &np->link_config;
+ int link_up;
+ u64 val;
+ u16 current_speed;
+ unsigned long flags;
+ u8 current_duplex;
- /* Set LED functions */
- err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
- if (err)
- return err;
+ link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
- /* led activity */
- err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
- if (err)
- return err;
+ spin_lock_irqsave(&np->lock, flags);
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
- MRVL88X2011_GENERAL_CTL);
- if (err < 0)
- return err;
+ val = nr64_pcs(PCS_MII_STAT);
- err |= MRVL88X2011_ENA_XFPREFCLK;
+ if (val & PCS_MII_STAT_LINK_STATUS) {
+ link_up = 1;
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ }
- err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
- MRVL88X2011_GENERAL_CTL, err);
- if (err < 0)
- return err;
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ spin_unlock_irqrestore(&np->lock, flags);
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
- MRVL88X2011_PMA_PMD_CTL_1);
- if (err < 0)
- return err;
+ *link_up_p = link_up;
+ return 0;
+}
- if (np->link_config.loopback_mode == LOOPBACK_MAC)
- err |= MRVL88X2011_LOOPBACK;
- else
- err &= ~MRVL88X2011_LOOPBACK;
+static int link_status_10g_serdes(struct niu *np, int *link_up_p)
+{
+ unsigned long flags;
+ struct niu_link_config *lp = &np->link_config;
+ int link_up = 0;
+ int link_ok = 1;
+ u64 val, val2;
+ u16 current_speed;
+ u8 current_duplex;
- err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
- MRVL88X2011_PMA_PMD_CTL_1, err);
- if (err < 0)
- return err;
+ if (!(np->flags & NIU_FLAGS_10G))
+ return link_status_1g_serdes(np, link_up_p);
- /* Enable PMD */
- return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
- MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+ spin_lock_irqsave(&np->lock, flags);
+
+ val = nr64_xpcs(XPCS_STATUS(0));
+ val2 = nr64_mac(XMAC_INTER2);
+ if (val2 & 0x01000000)
+ link_ok = 0;
+
+ if ((val & 0x1000ULL) && link_ok) {
+ link_up = 1;
+ current_speed = SPEED_10000;
+ current_duplex = DUPLEX_FULL;
+ }
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ spin_unlock_irqrestore(&np->lock, flags);
+ *link_up_p = link_up;
+ return 0;
}
-static int xcvr_init_10g_bcm8704(struct niu *np)
+static int link_status_mii(struct niu *np, int *link_up_p)
{
struct niu_link_config *lp = &np->link_config;
- u16 analog_stat0, tx_alarm_status;
int err;
+ int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
+ int supported, advertising, active_speed, active_duplex;
- err = bcm8704_reset(np);
- if (err)
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (unlikely(err < 0))
return err;
+ bmcr = err;
- err = bcm8704_init_user_dev3(np);
- if (err)
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (unlikely(err < 0))
return err;
+ bmsr = err;
- err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
- MII_BMCR);
- if (err < 0)
+ err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+ if (unlikely(err < 0))
return err;
- err &= ~BMCR_LOOPBACK;
-
- if (lp->loopback_mode == LOOPBACK_MAC)
- err |= BMCR_LOOPBACK;
+ advert = err;
- err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
- MII_BMCR, err);
- if (err)
+ err = mii_read(np, np->phy_addr, MII_LPA);
+ if (unlikely(err < 0))
return err;
+ lpa = err;
-#if 1
- err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
- MII_STAT1000);
- if (err < 0)
- return err;
- pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
- np->port, err);
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (unlikely(err < 0))
+ return err;
+ estatus = err;
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
- if (err < 0)
- return err;
- pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
- np->port, err);
+ err = mii_read(np, np->phy_addr, MII_CTRL1000);
+ if (unlikely(err < 0))
+ return err;
+ ctrl1000 = err;
- err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
- MII_NWAYTEST);
- if (err < 0)
- return err;
- pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
- np->port, err);
-#endif
+ err = mii_read(np, np->phy_addr, MII_STAT1000);
+ if (unlikely(err < 0))
+ return err;
+ stat1000 = err;
+ } else
+ estatus = ctrl1000 = stat1000 = 0;
- /* XXX dig this out it might not be so useful XXX */
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_ANALOG_STATUS0);
- if (err < 0)
- return err;
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_ANALOG_STATUS0);
- if (err < 0)
- return err;
- analog_stat0 = err;
+ supported = 0;
+ if (bmsr & BMSR_ANEGCAPABLE)
+ supported |= SUPPORTED_Autoneg;
+ if (bmsr & BMSR_10HALF)
+ supported |= SUPPORTED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ supported |= SUPPORTED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ supported |= SUPPORTED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ supported |= SUPPORTED_100baseT_Full;
+ if (estatus & ESTATUS_1000_THALF)
+ supported |= SUPPORTED_1000baseT_Half;
+ if (estatus & ESTATUS_1000_TFULL)
+ supported |= SUPPORTED_1000baseT_Full;
+ lp->supported = supported;
+
+ advertising = 0;
+ if (advert & ADVERTISE_10HALF)
+ advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl1000 & ADVERTISE_1000HALF)
+ advertising |= ADVERTISED_1000baseT_Half;
+ if (ctrl1000 & ADVERTISE_1000FULL)
+ advertising |= ADVERTISED_1000baseT_Full;
+
+ if (bmcr & BMCR_ANENABLE) {
+ int neg, neg1000;
+
+ lp->active_autoneg = 1;
+ advertising |= ADVERTISED_Autoneg;
+
+ neg = advert & lpa;
+ neg1000 = (ctrl1000 << 2) & stat1000;
+
+ if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
+ active_speed = SPEED_1000;
+ else if (neg & LPA_100)
+ active_speed = SPEED_100;
+ else if (neg & (LPA_10HALF | LPA_10FULL))
+ active_speed = SPEED_10;
+ else
+ active_speed = SPEED_INVALID;
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_TX_ALARM_STATUS);
- if (err < 0)
- return err;
- err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
- BCM8704_USER_TX_ALARM_STATUS);
- if (err < 0)
- return err;
- tx_alarm_status = err;
+ if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
+ active_duplex = DUPLEX_FULL;
+ else if (active_speed != SPEED_INVALID)
+ active_duplex = DUPLEX_HALF;
+ else
+ active_duplex = DUPLEX_INVALID;
+ } else {
+ lp->active_autoneg = 0;
- if (analog_stat0 != 0x03fc) {
- if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
- pr_info(PFX "Port %u cable not connected "
- "or bad cable.\n", np->port);
- } else if (analog_stat0 == 0x639c) {
- pr_info(PFX "Port %u optical module is bad "
- "or missing.\n", np->port);
- }
+ if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
+ active_speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ active_speed = SPEED_100;
+ else
+ active_speed = SPEED_10;
+
+ if (bmcr & BMCR_FULLDPLX)
+ active_duplex = DUPLEX_FULL;
+ else
+ active_duplex = DUPLEX_HALF;
}
+ lp->active_advertising = advertising;
+ lp->active_speed = active_speed;
+ lp->active_duplex = active_duplex;
+ *link_up_p = !!(bmsr & BMSR_LSTATUS);
+
return 0;
}
-static int xcvr_init_10g(struct niu *np)
+static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
{
- int phy_id, err;
- u64 val;
+ struct niu_link_config *lp = &np->link_config;
+ u16 current_speed, bmsr;
+ unsigned long flags;
+ u8 current_duplex;
+ int err, link_up;
- val = nr64_mac(XMAC_CONFIG);
- val &= ~XMAC_CONFIG_LED_POLARITY;
- val |= XMAC_CONFIG_FORCE_LED_ON;
- nw64_mac(XMAC_CONFIG, val);
+ link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
- /* XXX shared resource, lock parent XXX */
- val = nr64(MIF_CONFIG);
- val |= MIF_CONFIG_INDIRECT_MODE;
- nw64(MIF_CONFIG, val);
+ spin_lock_irqsave(&np->lock, flags);
- phy_id = phy_decode(np->parent->port_phy, np->port);
- phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+ err = -EINVAL;
- /* handle different phy types */
- switch (phy_id & NIU_PHY_ID_MASK) {
- case NIU_PHY_ID_MRVL88X2011:
- err = xcvr_init_10g_mrvl88x2011(np);
- break;
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ goto out;
+
+ bmsr = err;
+ if (bmsr & BMSR_LSTATUS) {
+ u16 adv, lpa, common, estat;
+
+ err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+ if (err < 0)
+ goto out;
+ adv = err;
+
+ err = mii_read(np, np->phy_addr, MII_LPA);
+ if (err < 0)
+ goto out;
+ lpa = err;
+
+ common = adv & lpa;
+
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ goto out;
+ estat = err;
+ link_up = 1;
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
- default: /* bcom 8704 */
- err = xcvr_init_10g_bcm8704(np);
- break;
}
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ err = 0;
- return 0;
+out:
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ *link_up_p = link_up;
+ return err;
}
-static int mii_reset(struct niu *np)
+static int link_status_1g(struct niu *np, int *link_up_p)
{
- int limit, err;
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long flags;
+ int err;
- err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = link_status_mii(np, link_up_p);
+ lp->supported |= SUPPORTED_TP;
+ lp->active_advertising |= ADVERTISED_TP;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return err;
+}
+
+static int bcm8704_reset(struct niu *np)
+{
+ int err, limit;
+
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err < 0 || err == 0xffff)
+ return err;
+ err |= BMCR_RESET;
+ err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ MII_BMCR, err);
if (err)
return err;
limit = 1000;
while (--limit >= 0) {
- udelay(500);
- err = mii_read(np, np->phy_addr, MII_BMCR);
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
if (err < 0)
return err;
if (!(err & BMCR_RESET))
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u MII would not reset, "
- "bmcr[%04x]\n", np->port, err);
+ dev_err(np->device, PFX "Port %u PHY will not reset "
+ "(bmcr=%04x)\n", np->port, (err & 0xffff));
return -ENODEV;
}
+ return 0;
+}
+/* When written, certain PHY registers need to be read back twice
+ * in order for the bits to settle properly.
+ */
+static int bcm8704_user_dev3_readback(struct niu *np, int reg)
+{
+ int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+ if (err < 0)
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+ if (err < 0)
+ return err;
return 0;
}
-static int mii_init_common(struct niu *np)
+static int bcm8706_init_user_dev3(struct niu *np)
{
- struct niu_link_config *lp = &np->link_config;
- u16 bmcr, bmsr, adv, estat;
int err;
- err = mii_reset(np);
- if (err)
- return err;
- err = mii_read(np, np->phy_addr, MII_BMSR);
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL);
if (err < 0)
return err;
- bmsr = err;
-
- estat = 0;
- if (bmsr & BMSR_ESTATEN) {
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- return err;
- estat = err;
- }
-
- bmcr = 0;
- err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ err &= ~USER_ODIG_CTRL_GPIOS;
+ err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+ err |= USER_ODIG_CTRL_RESV2;
+ err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL, err);
if (err)
return err;
- if (lp->loopback_mode == LOOPBACK_MAC) {
- bmcr |= BMCR_LOOPBACK;
- if (lp->active_speed == SPEED_1000)
- bmcr |= BMCR_SPEED1000;
- if (lp->active_duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
- }
+ mdelay(1000);
- if (lp->loopback_mode == LOOPBACK_PHY) {
- u16 aux;
+ return 0;
+}
- aux = (BCM5464R_AUX_CTL_EXT_LB |
- BCM5464R_AUX_CTL_WRITE_1);
- err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
- if (err)
- return err;
- }
+static int bcm8704_init_user_dev3(struct niu *np)
+{
+ int err;
- /* XXX configurable XXX */
- /* XXX for now don't advertise half-duplex or asym pause... XXX */
- adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
- if (bmsr & BMSR_10FULL)
- adv |= ADVERTISE_10FULL;
- if (bmsr & BMSR_100FULL)
- adv |= ADVERTISE_100FULL;
- err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
+ err = mdio_write(np, np->phy_addr,
+ BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
+ (USER_CONTROL_OPTXRST_LVL |
+ USER_CONTROL_OPBIASFLT_LVL |
+ USER_CONTROL_OBTMPFLT_LVL |
+ USER_CONTROL_OPPRFLT_LVL |
+ USER_CONTROL_OPTXFLT_LVL |
+ USER_CONTROL_OPRXLOS_LVL |
+ USER_CONTROL_OPRXFLT_LVL |
+ USER_CONTROL_OPTXON_LVL |
+ (0x3f << USER_CONTROL_RES1_SHIFT)));
if (err)
return err;
- if (bmsr & BMSR_ESTATEN) {
- u16 ctrl1000 = 0;
-
- if (estat & ESTATUS_1000_TFULL)
- ctrl1000 |= ADVERTISE_1000FULL;
- err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
- if (err)
- return err;
- }
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ err = mdio_write(np, np->phy_addr,
+ BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
+ (USER_PMD_TX_CTL_XFP_CLKEN |
+ (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
+ (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
+ USER_PMD_TX_CTL_TSCK_LPWREN));
+ if (err)
+ return err;
- err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
+ if (err)
+ return err;
+ err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
if (err)
return err;
- err = mii_read(np, np->phy_addr, MII_BMCR);
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL);
if (err < 0)
return err;
- err = mii_read(np, np->phy_addr, MII_BMSR);
- if (err < 0)
+ err &= ~USER_ODIG_CTRL_GPIOS;
+ err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+ err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL, err);
+ if (err)
return err;
-#if 0
- pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
- np->port, bmcr, bmsr);
-#endif
+
+ mdelay(1000);
return 0;
}
-static int xcvr_init_1g(struct niu *np)
+static int mrvl88x2011_act_led(struct niu *np, int val)
{
- u64 val;
+ int err;
- /* XXX shared resource, lock parent XXX */
- val = nr64(MIF_CONFIG);
- val &= ~MIF_CONFIG_INDIRECT_MODE;
- nw64(MIF_CONFIG, val);
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_8_TO_11_CTL);
+ if (err < 0)
+ return err;
- return mii_init_common(np);
+ err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
+ err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
+
+ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_8_TO_11_CTL, err);
}
-static int niu_xcvr_init(struct niu *np)
+static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
{
- const struct niu_phy_ops *ops = np->phy_ops;
- int err;
+ int err;
- err = 0;
- if (ops->xcvr_init)
- err = ops->xcvr_init(np);
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_BLINK_CTL);
+ if (err >= 0) {
+ err &= ~MRVL88X2011_LED_BLKRATE_MASK;
+ err |= (rate << 4);
+
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_BLINK_CTL, err);
+ }
return err;
}
-static int niu_serdes_init(struct niu *np)
+static int xcvr_init_10g_mrvl88x2011(struct niu *np)
{
- const struct niu_phy_ops *ops = np->phy_ops;
- int err;
+ int err;
- err = 0;
- if (ops->serdes_init)
- err = ops->serdes_init(np);
+ /* Set LED functions */
+ err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
+ if (err)
+ return err;
- return err;
-}
+ /* led activity */
+ err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
+ if (err)
+ return err;
-static void niu_init_xif(struct niu *);
-static void niu_handle_led(struct niu *, int status);
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_GENERAL_CTL);
+ if (err < 0)
+ return err;
-static int niu_link_status_common(struct niu *np, int link_up)
-{
- struct niu_link_config *lp = &np->link_config;
- struct net_device *dev = np->dev;
- unsigned long flags;
+ err |= MRVL88X2011_ENA_XFPREFCLK;
- if (!netif_carrier_ok(dev) && link_up) {
- niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
- dev->name,
- (lp->active_speed == SPEED_10000 ?
- "10Gb/sec" :
- (lp->active_speed == SPEED_1000 ?
- "1Gb/sec" :
- (lp->active_speed == SPEED_100 ?
- "100Mbit/sec" : "10Mbit/sec"))),
- (lp->active_duplex == DUPLEX_FULL ?
- "full" : "half"));
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_GENERAL_CTL, err);
+ if (err < 0)
+ return err;
- spin_lock_irqsave(&np->lock, flags);
- niu_init_xif(np);
- niu_handle_led(np, 1);
- spin_unlock_irqrestore(&np->lock, flags);
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_CTL_1);
+ if (err < 0)
+ return err;
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev) && !link_up) {
- niuwarn(LINK, "%s: Link is down\n", dev->name);
- spin_lock_irqsave(&np->lock, flags);
- niu_handle_led(np, 0);
- spin_unlock_irqrestore(&np->lock, flags);
- netif_carrier_off(dev);
- }
+ if (np->link_config.loopback_mode == LOOPBACK_MAC)
+ err |= MRVL88X2011_LOOPBACK;
+ else
+ err &= ~MRVL88X2011_LOOPBACK;
- return 0;
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_CTL_1, err);
+ if (err < 0)
+ return err;
+
+ /* Enable PMD */
+ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
}
-static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
-{
- int err, link_up, pma_status, pcs_status;
- link_up = 0;
+static int xcvr_diag_bcm870x(struct niu *np)
+{
+ u16 analog_stat0, tx_alarm_status;
+ int err = 0;
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
- MRVL88X2011_10G_PMD_STATUS_2);
+#if 1
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ MII_STAT1000);
if (err < 0)
- goto out;
+ return err;
+ pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
+ np->port, err);
- /* Check PMA/PMD Register: 1.0001.2 == 1 */
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
- MRVL88X2011_PMA_PMD_STATUS_1);
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
if (err < 0)
- goto out;
-
- pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
+ return err;
+ pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
+ np->port, err);
- /* Check PMC Register : 3.0001.2 == 1: read twice */
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
- MRVL88X2011_PMA_PMD_STATUS_1);
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ MII_NWAYTEST);
if (err < 0)
- goto out;
+ return err;
+ pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
+ np->port, err);
+#endif
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
- MRVL88X2011_PMA_PMD_STATUS_1);
+ /* XXX dig this out it might not be so useful XXX */
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_ANALOG_STATUS0);
if (err < 0)
- goto out;
-
- pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
-
- /* Check XGXS Register : 4.0018.[0-3,12] */
- err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
- MRVL88X2011_10G_XGXS_LANE_STAT);
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_ANALOG_STATUS0);
if (err < 0)
- goto out;
+ return err;
+ analog_stat0 = err;
- if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
- PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
- PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
- 0x800))
- link_up = (pma_status && pcs_status) ? 1 : 0;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_TX_ALARM_STATUS);
+ if (err < 0)
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_TX_ALARM_STATUS);
+ if (err < 0)
+ return err;
+ tx_alarm_status = err;
- np->link_config.active_speed = SPEED_10000;
- np->link_config.active_duplex = DUPLEX_FULL;
- err = 0;
-out:
- mrvl88x2011_act_led(np, (link_up ?
- MRVL88X2011_LED_CTL_PCS_ACT :
- MRVL88X2011_LED_CTL_OFF));
+ if (analog_stat0 != 0x03fc) {
+ if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
+ pr_info(PFX "Port %u cable not connected "
+ "or bad cable.\n", np->port);
+ } else if (analog_stat0 == 0x639c) {
+ pr_info(PFX "Port %u optical module is bad "
+ "or missing.\n", np->port);
+ }
+ }
- *link_up_p = link_up;
- return err;
+ return 0;
}
-static int link_status_10g_bcom(struct niu *np, int *link_up_p)
+static int xcvr_10g_set_lb_bcm870x(struct niu *np)
{
- int err, link_up;
-
- link_up = 0;
-
- err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
- BCM8704_PMD_RCV_SIGDET);
- if (err < 0)
- goto out;
- if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
- err = 0;
- goto out;
- }
+ struct niu_link_config *lp = &np->link_config;
+ int err;
err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
- BCM8704_PCS_10G_R_STATUS);
+ MII_BMCR);
if (err < 0)
- goto out;
- if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
- err = 0;
- goto out;
- }
+ return err;
- err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
- BCM8704_PHYXS_XGXS_LANE_STAT);
- if (err < 0)
- goto out;
+ err &= ~BMCR_LOOPBACK;
- if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
- PHYXS_XGXS_LANE_STAT_MAGIC |
- PHYXS_XGXS_LANE_STAT_LANE3 |
- PHYXS_XGXS_LANE_STAT_LANE2 |
- PHYXS_XGXS_LANE_STAT_LANE1 |
- PHYXS_XGXS_LANE_STAT_LANE0)) {
- err = 0;
- goto out;
- }
+ if (lp->loopback_mode == LOOPBACK_MAC)
+ err |= BMCR_LOOPBACK;
- link_up = 1;
- np->link_config.active_speed = SPEED_10000;
- np->link_config.active_duplex = DUPLEX_FULL;
- err = 0;
+ err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ MII_BMCR, err);
+ if (err)
+ return err;
-out:
- *link_up_p = link_up;
- return err;
+ return 0;
}
-static int link_status_10g(struct niu *np, int *link_up_p)
+static int xcvr_init_10g_bcm8706(struct niu *np)
{
- unsigned long flags;
- int err = -EINVAL;
+ int err = 0;
+ u64 val;
- spin_lock_irqsave(&np->lock, flags);
+ if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
+ (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
+ return err;
- if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
- int phy_id;
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ nw64_mac(XMAC_CONFIG, val);
- phy_id = phy_decode(np->parent->port_phy, np->port);
- phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
- /* handle different phy types */
- switch (phy_id & NIU_PHY_ID_MASK) {
- case NIU_PHY_ID_MRVL88X2011:
- err = link_status_10g_mrvl(np, link_up_p);
- break;
+ err = bcm8704_reset(np);
+ if (err)
+ return err;
- default: /* bcom 8704 */
- err = link_status_10g_bcom(np, link_up_p);
- break;
- }
- }
+ err = xcvr_10g_set_lb_bcm870x(np);
+ if (err)
+ return err;
- spin_unlock_irqrestore(&np->lock, flags);
+ err = bcm8706_init_user_dev3(np);
+ if (err)
+ return err;
- return err;
+ err = xcvr_diag_bcm870x(np);
+ if (err)
+ return err;
+
+ return 0;
}
-static int link_status_1g(struct niu *np, int *link_up_p)
+static int xcvr_init_10g_bcm8704(struct niu *np)
{
- u16 current_speed, bmsr;
- unsigned long flags;
- u8 current_duplex;
- int err, link_up;
+ int err;
- link_up = 0;
- current_speed = SPEED_INVALID;
- current_duplex = DUPLEX_INVALID;
+ err = bcm8704_reset(np);
+ if (err)
+ return err;
- spin_lock_irqsave(&np->lock, flags);
+ err = bcm8704_init_user_dev3(np);
+ if (err)
+ return err;
- err = -EINVAL;
- if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
- goto out;
+ err = xcvr_10g_set_lb_bcm870x(np);
+ if (err)
+ return err;
- err = mii_read(np, np->phy_addr, MII_BMSR);
- if (err < 0)
- goto out;
+ err = xcvr_diag_bcm870x(np);
+ if (err)
+ return err;
- bmsr = err;
- if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa, common, estat;
+ return 0;
+}
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
+static int xcvr_init_10g(struct niu *np)
+{
+ int phy_id, err;
+ u64 val;
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ nw64_mac(XMAC_CONFIG, val);
- common = adv & lpa;
+ /* XXX shared resource, lock parent XXX */
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
- estat = err;
+ phy_id = phy_decode(np->parent->port_phy, np->port);
+ phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
- link_up = 1;
- if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
- current_speed = SPEED_1000;
- if (estat & ESTATUS_1000_TFULL)
- current_duplex = DUPLEX_FULL;
- else
- current_duplex = DUPLEX_HALF;
- } else {
- if (common & ADVERTISE_100BASE4) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_HALF;
- } else if (common & ADVERTISE_100FULL) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_FULL;
- } else if (common & ADVERTISE_100HALF) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_HALF;
- } else if (common & ADVERTISE_10FULL) {
- current_speed = SPEED_10;
- current_duplex = DUPLEX_FULL;
- } else if (common & ADVERTISE_10HALF) {
- current_speed = SPEED_10;
- current_duplex = DUPLEX_HALF;
- } else
- link_up = 0;
- }
- }
- err = 0;
+ /* handle different phy types */
+ switch (phy_id & NIU_PHY_ID_MASK) {
+ case NIU_PHY_ID_MRVL88X2011:
+ err = xcvr_init_10g_mrvl88x2011(np);
+ break;
-out:
- spin_unlock_irqrestore(&np->lock, flags);
+ default: /* bcom 8704 */
+ err = xcvr_init_10g_bcm8704(np);
+ break;
+ }
- *link_up_p = link_up;
- return err;
+ return 0;
}
-static int niu_link_status(struct niu *np, int *link_up_p)
+static int mii_reset(struct niu *np)
{
- const struct niu_phy_ops *ops = np->phy_ops;
- int err;
+ int limit, err;
- err = 0;
- if (ops->link_status)
- err = ops->link_status(np, link_up_p);
+ err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
+ if (err)
+ return err;
- return err;
+ limit = 1000;
+ while (--limit >= 0) {
+ udelay(500);
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ if (!(err & BMCR_RESET))
+ break;
+ }
+ if (limit < 0) {
+ dev_err(np->device, PFX "Port %u MII would not reset, "
+ "bmcr[%04x]\n", np->port, err);
+ return -ENODEV;
+ }
+
+ return 0;
}
-static void niu_timer(unsigned long __opaque)
+static int xcvr_init_1g_rgmii(struct niu *np)
{
- struct niu *np = (struct niu *) __opaque;
- unsigned long off;
- int err, link_up;
-
- err = niu_link_status(np, &link_up);
- if (!err)
- niu_link_status_common(np, link_up);
-
- if (netif_carrier_ok(np->dev))
- off = 5 * HZ;
- else
- off = 1 * HZ;
- np->timer.expires = jiffies + off;
+ int err;
+ u64 val;
+ u16 bmcr, bmsr, estat;
- add_timer(&np->timer);
-}
+ val = nr64(MIF_CONFIG);
+ val &= ~MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
-static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
- .serdes_init = serdes_init_niu,
- .xcvr_init = xcvr_init_10g,
- .link_status = link_status_10g,
-};
+ err = mii_reset(np);
+ if (err)
+ return err;
-static const struct niu_phy_ops phy_ops_10g_fiber = {
- .serdes_init = serdes_init_10g,
- .xcvr_init = xcvr_init_10g,
- .link_status = link_status_10g,
-};
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
-static const struct niu_phy_ops phy_ops_10g_copper = {
- .serdes_init = serdes_init_10g,
- .link_status = link_status_10g, /* XXX */
-};
+ estat = 0;
+ if (bmsr & BMSR_ESTATEN) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ return err;
+ estat = err;
+ }
-static const struct niu_phy_ops phy_ops_1g_fiber = {
- .serdes_init = serdes_init_1g,
- .xcvr_init = xcvr_init_1g,
- .link_status = link_status_1g,
-};
+ bmcr = 0;
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
-static const struct niu_phy_ops phy_ops_1g_copper = {
- .xcvr_init = xcvr_init_1g,
- .link_status = link_status_1g,
-};
+ if (bmsr & BMSR_ESTATEN) {
+ u16 ctrl1000 = 0;
-struct niu_phy_template {
- const struct niu_phy_ops *ops;
- u32 phy_addr_base;
-};
+ if (estat & ESTATUS_1000_TFULL)
+ ctrl1000 |= ADVERTISE_1000FULL;
+ err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
+ if (err)
+ return err;
+ }
-static const struct niu_phy_template phy_template_niu = {
- .ops = &phy_ops_10g_fiber_niu,
- .phy_addr_base = 16,
-};
+ bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
-static const struct niu_phy_template phy_template_10g_fiber = {
- .ops = &phy_ops_10g_fiber,
- .phy_addr_base = 8,
-};
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
-static const struct niu_phy_template phy_template_10g_copper = {
- .ops = &phy_ops_10g_copper,
- .phy_addr_base = 10,
-};
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ bmcr = mii_read(np, np->phy_addr, MII_BMCR);
-static const struct niu_phy_template phy_template_1g_fiber = {
- .ops = &phy_ops_1g_fiber,
- .phy_addr_base = 0,
-};
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
-static const struct niu_phy_template phy_template_1g_copper = {
- .ops = &phy_ops_1g_copper,
- .phy_addr_base = 0,
-};
+ return 0;
+}
-static int niu_determine_phy_disposition(struct niu *np)
+static int mii_init_common(struct niu *np)
{
- struct niu_parent *parent = np->parent;
- u8 plat_type = parent->plat_type;
- const struct niu_phy_template *tp;
- u32 phy_addr_off = 0;
-
- if (plat_type == PLAT_TYPE_NIU) {
- tp = &phy_template_niu;
- phy_addr_off += np->port;
- } else {
- switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) {
- case 0:
- /* 1G copper */
- tp = &phy_template_1g_copper;
- if (plat_type == PLAT_TYPE_VF_P0)
- phy_addr_off = 10;
- else if (plat_type == PLAT_TYPE_VF_P1)
- phy_addr_off = 26;
-
- phy_addr_off += (np->port ^ 0x3);
- break;
-
- case NIU_FLAGS_10G:
- /* 10G copper */
- tp = &phy_template_1g_copper;
- break;
+ struct niu_link_config *lp = &np->link_config;
+ u16 bmcr, bmsr, adv, estat;
+ int err;
- case NIU_FLAGS_FIBER:
- /* 1G fiber */
- tp = &phy_template_1g_fiber;
- break;
+ err = mii_reset(np);
+ if (err)
+ return err;
- case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
- /* 10G fiber */
- tp = &phy_template_10g_fiber;
- if (plat_type == PLAT_TYPE_VF_P0 ||
- plat_type == PLAT_TYPE_VF_P1)
- phy_addr_off = 8;
- phy_addr_off += np->port;
- break;
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
- default:
- return -EINVAL;
- }
+ estat = 0;
+ if (bmsr & BMSR_ESTATEN) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ return err;
+ estat = err;
}
- np->phy_ops = tp->ops;
- np->phy_addr = tp->phy_addr_base + phy_addr_off;
+ bmcr = 0;
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
- return 0;
-}
+ if (lp->loopback_mode == LOOPBACK_MAC) {
+ bmcr |= BMCR_LOOPBACK;
+ if (lp->active_speed == SPEED_1000)
+ bmcr |= BMCR_SPEED1000;
+ if (lp->active_duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ }
-static int niu_init_link(struct niu *np)
-{
- struct niu_parent *parent = np->parent;
- int err, ignore;
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 aux;
- if (parent->plat_type == PLAT_TYPE_NIU) {
- err = niu_xcvr_init(np);
+ aux = (BCM5464R_AUX_CTL_EXT_LB |
+ BCM5464R_AUX_CTL_WRITE_1);
+ err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
if (err)
return err;
- msleep(200);
}
- err = niu_serdes_init(np);
- if (err)
- return err;
- msleep(200);
- err = niu_xcvr_init(np);
- if (!err)
- niu_link_status(np, &ignore);
- return 0;
-}
-static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
-{
- u16 reg0 = addr[4] << 8 | addr[5];
- u16 reg1 = addr[2] << 8 | addr[3];
- u16 reg2 = addr[0] << 8 | addr[1];
+ if (lp->autoneg) {
+ u16 ctrl1000;
+
+ adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+ if ((bmsr & BMSR_10HALF) &&
+ (lp->advertising & ADVERTISED_10baseT_Half))
+ adv |= ADVERTISE_10HALF;
+ if ((bmsr & BMSR_10FULL) &&
+ (lp->advertising & ADVERTISED_10baseT_Full))
+ adv |= ADVERTISE_10FULL;
+ if ((bmsr & BMSR_100HALF) &&
+ (lp->advertising & ADVERTISED_100baseT_Half))
+ adv |= ADVERTISE_100HALF;
+ if ((bmsr & BMSR_100FULL) &&
+ (lp->advertising & ADVERTISED_100baseT_Full))
+ adv |= ADVERTISE_100FULL;
+ err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
+ if (err)
+ return err;
- if (np->flags & NIU_FLAGS_XMAC) {
- nw64_mac(XMAC_ADDR0, reg0);
- nw64_mac(XMAC_ADDR1, reg1);
- nw64_mac(XMAC_ADDR2, reg2);
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ ctrl1000 = 0;
+ if ((estat & ESTATUS_1000_THALF) &&
+ (lp->advertising & ADVERTISED_1000baseT_Half))
+ ctrl1000 |= ADVERTISE_1000HALF;
+ if ((estat & ESTATUS_1000_TFULL) &&
+ (lp->advertising & ADVERTISED_1000baseT_Full))
+ ctrl1000 |= ADVERTISE_1000FULL;
+ err = mii_write(np, np->phy_addr,
+ MII_CTRL1000, ctrl1000);
+ if (err)
+ return err;
+ }
+
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
} else {
- nw64_mac(BMAC_ADDR0, reg0);
- nw64_mac(BMAC_ADDR1, reg1);
- nw64_mac(BMAC_ADDR2, reg2);
+ /* !lp->autoneg */
+ int fulldpx;
+
+ if (lp->duplex == DUPLEX_FULL) {
+ bmcr |= BMCR_FULLDPLX;
+ fulldpx = 1;
+ } else if (lp->duplex == DUPLEX_HALF)
+ fulldpx = 0;
+ else
+ return -EINVAL;
+
+ if (lp->speed == SPEED_1000) {
+ /* if X-full requested while not supported, or
+ X-half requested while not supported... */
+ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
+ (!fulldpx && !(estat & ESTATUS_1000_THALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED1000;
+ } else if (lp->speed == SPEED_100) {
+ if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_100HALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED100;
+ } else if (lp->speed == SPEED_10) {
+ if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_10HALF)))
+ return -EINVAL;
+ } else
+ return -EINVAL;
}
-}
-static int niu_num_alt_addr(struct niu *np)
-{
- if (np->flags & NIU_FLAGS_XMAC)
- return XMAC_NUM_ALT_ADDR;
- else
- return BMAC_NUM_ALT_ADDR;
-}
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
-static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
-{
- u16 reg0 = addr[4] << 8 | addr[5];
- u16 reg1 = addr[2] << 8 | addr[3];
- u16 reg2 = addr[0] << 8 | addr[1];
+#if 0
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ bmcr = err;
- if (index >= niu_num_alt_addr(np))
- return -EINVAL;
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
- if (np->flags & NIU_FLAGS_XMAC) {
- nw64_mac(XMAC_ALT_ADDR0(index), reg0);
- nw64_mac(XMAC_ALT_ADDR1(index), reg1);
- nw64_mac(XMAC_ALT_ADDR2(index), reg2);
- } else {
- nw64_mac(BMAC_ALT_ADDR0(index), reg0);
- nw64_mac(BMAC_ALT_ADDR1(index), reg1);
- nw64_mac(BMAC_ALT_ADDR2(index), reg2);
- }
+ pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ np->port, bmcr, bmsr);
+#endif
return 0;
}
-static int niu_enable_alt_mac(struct niu *np, int index, int on)
+static int xcvr_init_1g(struct niu *np)
{
- unsigned long reg;
- u64 val, mask;
-
- if (index >= niu_num_alt_addr(np))
- return -EINVAL;
-
- if (np->flags & NIU_FLAGS_XMAC)
- reg = XMAC_ADDR_CMPEN;
- else
- reg = BMAC_ADDR_CMPEN;
-
- mask = 1 << index;
+ u64 val;
- val = nr64_mac(reg);
- if (on)
- val |= mask;
- else
- val &= ~mask;
- nw64_mac(reg, val);
+ /* XXX shared resource, lock parent XXX */
+ val = nr64(MIF_CONFIG);
+ val &= ~MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
- return 0;
+ return mii_init_common(np);
}
-static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
- int num, int mac_pref)
+static int niu_xcvr_init(struct niu *np)
{
- u64 val = nr64_mac(reg);
- val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
- val |= num;
- if (mac_pref)
- val |= HOST_INFO_MPR;
- nw64_mac(reg, val);
-}
+ const struct niu_phy_ops *ops = np->phy_ops;
+ int err;
-static int __set_rdc_table_num(struct niu *np,
- int xmac_index, int bmac_index,
- int rdc_table_num, int mac_pref)
-{
- unsigned long reg;
+ err = 0;
+ if (ops->xcvr_init)
+ err = ops->xcvr_init(np);
- if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
- return -EINVAL;
- if (np->flags & NIU_FLAGS_XMAC)
- reg = XMAC_HOST_INFO(xmac_index);
- else
- reg = BMAC_HOST_INFO(bmac_index);
- __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
- return 0;
+ return err;
}
-static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
- int mac_pref)
+static int niu_serdes_init(struct niu *np)
{
- return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
-}
+ const struct niu_phy_ops *ops = np->phy_ops;
+ int err;
-static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
- int mac_pref)
-{
- return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
-}
+ err = 0;
+ if (ops->serdes_init)
+ err = ops->serdes_init(np);
-static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
- int table_num, int mac_pref)
-{
- if (idx >= niu_num_alt_addr(np))
- return -EINVAL;
- return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
+ return err;
}
-static u64 vlan_entry_set_parity(u64 reg_val)
+static void niu_init_xif(struct niu *);
+static void niu_handle_led(struct niu *, int status);
+
+static int niu_link_status_common(struct niu *np, int link_up)
{
- u64 port01_mask;
- u64 port23_mask;
+ struct niu_link_config *lp = &np->link_config;
+ struct net_device *dev = np->dev;
+ unsigned long flags;
- port01_mask = 0x00ff;
- port23_mask = 0xff00;
+ if (!netif_carrier_ok(dev) && link_up) {
+ niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
+ dev->name,
+ (lp->active_speed == SPEED_10000 ?
+ "10Gb/sec" :
+ (lp->active_speed == SPEED_1000 ?
+ "1Gb/sec" :
+ (lp->active_speed == SPEED_100 ?
+ "100Mbit/sec" : "10Mbit/sec"))),
+ (lp->active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
- if (hweight64(reg_val & port01_mask) & 1)
- reg_val |= ENET_VLAN_TBL_PARITY0;
- else
- reg_val &= ~ENET_VLAN_TBL_PARITY0;
+ spin_lock_irqsave(&np->lock, flags);
+ niu_init_xif(np);
+ niu_handle_led(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
- if (hweight64(reg_val & port23_mask) & 1)
- reg_val |= ENET_VLAN_TBL_PARITY1;
- else
- reg_val &= ~ENET_VLAN_TBL_PARITY1;
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev) && !link_up) {
+ niuwarn(LINK, "%s: Link is down\n", dev->name);
+ spin_lock_irqsave(&np->lock, flags);
+ niu_handle_led(np, 0);
+ spin_unlock_irqrestore(&np->lock, flags);
+ netif_carrier_off(dev);
+ }
- return reg_val;
+ return 0;
}
-static void vlan_tbl_write(struct niu *np, unsigned long index,
- int port, int vpr, int rdc_table)
+static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
{
- u64 reg_val = nr64(ENET_VLAN_TBL(index));
+ int err, link_up, pma_status, pcs_status;
- reg_val &= ~((ENET_VLAN_TBL_VPR |
- ENET_VLAN_TBL_VLANRDCTBLN) <<
- ENET_VLAN_TBL_SHIFT(port));
- if (vpr)
- reg_val |= (ENET_VLAN_TBL_VPR <<
- ENET_VLAN_TBL_SHIFT(port));
- reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
+ link_up = 0;
- reg_val = vlan_entry_set_parity(reg_val);
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_10G_PMD_STATUS_2);
+ if (err < 0)
+ goto out;
- nw64(ENET_VLAN_TBL(index), reg_val);
-}
+ /* Check PMA/PMD Register: 1.0001.2 == 1 */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
-static void vlan_tbl_clear(struct niu *np)
-{
- int i;
+ pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
- for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
- nw64(ENET_VLAN_TBL(i), 0);
-}
+ /* Check PMC Register : 3.0001.2 == 1: read twice */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
-static int tcam_wait_bit(struct niu *np, u64 bit)
-{
- int limit = 1000;
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
- while (--limit > 0) {
- if (nr64(TCAM_CTL) & bit)
- break;
- udelay(1);
- }
- if (limit < 0)
- return -ENODEV;
+ pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
- return 0;
-}
+ /* Check XGXS Register : 4.0018.[0-3,12] */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
+ MRVL88X2011_10G_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
-static int tcam_flush(struct niu *np, int index)
-{
- nw64(TCAM_KEY_0, 0x00);
- nw64(TCAM_KEY_MASK_0, 0xff);
- nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+ if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
+ 0x800))
+ link_up = (pma_status && pcs_status) ? 1 : 0;
- return tcam_wait_bit(np, TCAM_CTL_STAT);
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+out:
+ mrvl88x2011_act_led(np, (link_up ?
+ MRVL88X2011_LED_CTL_PCS_ACT :
+ MRVL88X2011_LED_CTL_OFF));
+
+ *link_up_p = link_up;
+ return err;
}
-#if 0
-static int tcam_read(struct niu *np, int index,
- u64 *key, u64 *mask)
+static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
{
- int err;
+ int err, link_up;
+ link_up = 0;
- nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
- err = tcam_wait_bit(np, TCAM_CTL_STAT);
- if (!err) {
- key[0] = nr64(TCAM_KEY_0);
- key[1] = nr64(TCAM_KEY_1);
- key[2] = nr64(TCAM_KEY_2);
- key[3] = nr64(TCAM_KEY_3);
- mask[0] = nr64(TCAM_KEY_MASK_0);
- mask[1] = nr64(TCAM_KEY_MASK_1);
- mask[2] = nr64(TCAM_KEY_MASK_2);
- mask[3] = nr64(TCAM_KEY_MASK_3);
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ BCM8704_PMD_RCV_SIGDET);
+ if (err < 0 || err == 0xffff)
+ goto out;
+ if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+ err = 0;
+ goto out;
}
- return err;
-}
-#endif
-static int tcam_write(struct niu *np, int index,
- u64 *key, u64 *mask)
-{
- nw64(TCAM_KEY_0, key[0]);
- nw64(TCAM_KEY_1, key[1]);
- nw64(TCAM_KEY_2, key[2]);
- nw64(TCAM_KEY_3, key[3]);
- nw64(TCAM_KEY_MASK_0, mask[0]);
- nw64(TCAM_KEY_MASK_1, mask[1]);
- nw64(TCAM_KEY_MASK_2, mask[2]);
- nw64(TCAM_KEY_MASK_3, mask[3]);
- nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+ err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ BCM8704_PCS_10G_R_STATUS);
+ if (err < 0)
+ goto out;
- return tcam_wait_bit(np, TCAM_CTL_STAT);
-}
+ if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+ err = 0;
+ goto out;
+ }
-#if 0
-static int tcam_assoc_read(struct niu *np, int index, u64 *data)
-{
- int err;
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ BCM8704_PHYXS_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
+ if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+ PHYXS_XGXS_LANE_STAT_MAGIC |
+ PHYXS_XGXS_LANE_STAT_PATTEST |
+ PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 |
+ PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0)) {
+ err = 0;
+ np->link_config.active_speed = SPEED_INVALID;
+ np->link_config.active_duplex = DUPLEX_INVALID;
+ goto out;
+ }
- nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
- err = tcam_wait_bit(np, TCAM_CTL_STAT);
- if (!err)
- *data = nr64(TCAM_KEY_1);
+ link_up = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+out:
+ *link_up_p = link_up;
return err;
}
-#endif
-static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
+static int link_status_10g_bcom(struct niu *np, int *link_up_p)
{
- nw64(TCAM_KEY_1, assoc_data);
- nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
+ int err, link_up;
- return tcam_wait_bit(np, TCAM_CTL_STAT);
-}
+ link_up = 0;
-static void tcam_enable(struct niu *np, int on)
-{
- u64 val = nr64(FFLP_CFG_1);
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ BCM8704_PMD_RCV_SIGDET);
+ if (err < 0)
+ goto out;
+ if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+ err = 0;
+ goto out;
+ }
- if (on)
- val &= ~FFLP_CFG_1_TCAM_DIS;
- else
- val |= FFLP_CFG_1_TCAM_DIS;
- nw64(FFLP_CFG_1, val);
-}
+ err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ BCM8704_PCS_10G_R_STATUS);
+ if (err < 0)
+ goto out;
+ if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+ err = 0;
+ goto out;
+ }
-static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
-{
- u64 val = nr64(FFLP_CFG_1);
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ BCM8704_PHYXS_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
- val &= ~(FFLP_CFG_1_FFLPINITDONE |
- FFLP_CFG_1_CAMLAT |
- FFLP_CFG_1_CAMRATIO);
- val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
- val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
- nw64(FFLP_CFG_1, val);
+ if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+ PHYXS_XGXS_LANE_STAT_MAGIC |
+ PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 |
+ PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0)) {
+ err = 0;
+ goto out;
+ }
- val = nr64(FFLP_CFG_1);
- val |= FFLP_CFG_1_FFLPINITDONE;
- nw64(FFLP_CFG_1, val);
+ link_up = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+
+out:
+ *link_up_p = link_up;
+ return err;
}
-static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
- int on)
+static int link_status_10g(struct niu *np, int *link_up_p)
{
- unsigned long reg;
- u64 val;
+ unsigned long flags;
+ int err = -EINVAL;
- if (class < CLASS_CODE_ETHERTYPE1 ||
- class > CLASS_CODE_ETHERTYPE2)
- return -EINVAL;
+ spin_lock_irqsave(&np->lock, flags);
- reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
- val = nr64(reg);
- if (on)
- val |= L2_CLS_VLD;
- else
- val &= ~L2_CLS_VLD;
- nw64(reg, val);
+ if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+ int phy_id;
- return 0;
-}
+ phy_id = phy_decode(np->parent->port_phy, np->port);
+ phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
-#if 0
-static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
- u64 ether_type)
-{
- unsigned long reg;
- u64 val;
+ /* handle different phy types */
+ switch (phy_id & NIU_PHY_ID_MASK) {
+ case NIU_PHY_ID_MRVL88X2011:
+ err = link_status_10g_mrvl(np, link_up_p);
+ break;
- if (class < CLASS_CODE_ETHERTYPE1 ||
- class > CLASS_CODE_ETHERTYPE2 ||
- (ether_type & ~(u64)0xffff) != 0)
- return -EINVAL;
+ default: /* bcom 8704 */
+ err = link_status_10g_bcom(np, link_up_p);
+ break;
+ }
+ }
- reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
- val = nr64(reg);
- val &= ~L2_CLS_ETYPE;
- val |= (ether_type << L2_CLS_ETYPE_SHIFT);
- nw64(reg, val);
+ spin_unlock_irqrestore(&np->lock, flags);
- return 0;
+ return err;
}
-#endif
-static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
- int on)
+static int niu_10g_phy_present(struct niu *np)
{
- unsigned long reg;
- u64 val;
+ u64 sig, mask, val;
- if (class < CLASS_CODE_USER_PROG1 ||
- class > CLASS_CODE_USER_PROG4)
- return -EINVAL;
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
- reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
- val = nr64(reg);
- if (on)
- val |= L3_CLS_VALID;
- else
- val &= ~L3_CLS_VALID;
- nw64(reg, val);
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
- return 0;
+ default:
+ return 0;
+ }
+
+ if ((sig & mask) != val)
+ return 0;
+ return 1;
}
-#if 0
-static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
- int ipv6, u64 protocol_id,
- u64 tos_mask, u64 tos_val)
+static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
{
- unsigned long reg;
- u64 val;
+ unsigned long flags;
+ int err = 0;
+ int phy_present;
+ int phy_present_prev;
- if (class < CLASS_CODE_USER_PROG1 ||
- class > CLASS_CODE_USER_PROG4 ||
- (protocol_id & ~(u64)0xff) != 0 ||
- (tos_mask & ~(u64)0xff) != 0 ||
- (tos_val & ~(u64)0xff) != 0)
- return -EINVAL;
+ spin_lock_irqsave(&np->lock, flags);
- reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
- val = nr64(reg);
- val &= ~(L3_CLS_IPVER | L3_CLS_PID |
- L3_CLS_TOSMASK | L3_CLS_TOS);
- if (ipv6)
- val |= L3_CLS_IPVER;
- val |= (protocol_id << L3_CLS_PID_SHIFT);
- val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
- val |= (tos_val << L3_CLS_TOS_SHIFT);
- nw64(reg, val);
+ if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+ phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
+ 1 : 0;
+ phy_present = niu_10g_phy_present(np);
+ if (phy_present != phy_present_prev) {
+ /* state change */
+ if (phy_present) {
+ /* A NEM was just plugged in */
+ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ if (np->phy_ops->xcvr_init)
+ err = np->phy_ops->xcvr_init(np);
+ if (err) {
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI */
+ goto out;
+ }
+ /* debounce */
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ }
+ } else {
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ *link_up_p = 0;
+ niuwarn(LINK, "%s: Hotplug PHY Removed\n",
+ np->dev->name);
+ }
+ }
+out:
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
+ err = link_status_10g_bcm8706(np, link_up_p);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI: it is C10NEM */
+ *link_up_p = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
return 0;
}
-#endif
-static int tcam_early_init(struct niu *np)
+static int niu_link_status(struct niu *np, int *link_up_p)
{
- unsigned long i;
+ const struct niu_phy_ops *ops = np->phy_ops;
int err;
- tcam_enable(np, 0);
- tcam_set_lat_and_ratio(np,
- DEFAULT_TCAM_LATENCY,
- DEFAULT_TCAM_ACCESS_RATIO);
- for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
- err = tcam_user_eth_class_enable(np, i, 0);
- if (err)
- return err;
- }
- for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
- err = tcam_user_ip_class_enable(np, i, 0);
- if (err)
- return err;
- }
+ err = 0;
+ if (ops->link_status)
+ err = ops->link_status(np, link_up_p);
- return 0;
+ return err;
}
-static int tcam_flush_all(struct niu *np)
+static void niu_timer(unsigned long __opaque)
{
- unsigned long i;
+ struct niu *np = (struct niu *) __opaque;
+ unsigned long off;
+ int err, link_up;
- for (i = 0; i < np->parent->tcam_num_entries; i++) {
- int err = tcam_flush(np, i);
- if (err)
- return err;
- }
- return 0;
-}
+ err = niu_link_status(np, &link_up);
+ if (!err)
+ niu_link_status_common(np, link_up);
-static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
-{
- return ((u64)index | (num_entries == 1 ?
- HASH_TBL_ADDR_AUTOINC : 0));
+ if (netif_carrier_ok(np->dev))
+ off = 5 * HZ;
+ else
+ off = 1 * HZ;
+ np->timer.expires = jiffies + off;
+
+ add_timer(&np->timer);
}
-#if 0
-static int hash_read(struct niu *np, unsigned long partition,
- unsigned long index, unsigned long num_entries,
- u64 *data)
-{
- u64 val = hash_addr_regval(index, num_entries);
- unsigned long i;
+static const struct niu_phy_ops phy_ops_10g_serdes = {
+ .serdes_init = serdes_init_10g_serdes,
+ .link_status = link_status_10g_serdes,
+};
- if (partition >= FCRAM_NUM_PARTITIONS ||
- index + num_entries > FCRAM_SIZE)
- return -EINVAL;
+static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
+ .serdes_init = serdes_init_niu_10g_serdes,
+ .link_status = link_status_10g_serdes,
+};
- nw64(HASH_TBL_ADDR(partition), val);
- for (i = 0; i < num_entries; i++)
- data[i] = nr64(HASH_TBL_DATA(partition));
+static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
+ .serdes_init = serdes_init_niu_1g_serdes,
+ .link_status = link_status_1g_serdes,
+};
- return 0;
-}
-#endif
+static const struct niu_phy_ops phy_ops_1g_rgmii = {
+ .xcvr_init = xcvr_init_1g_rgmii,
+ .link_status = link_status_1g_rgmii,
+};
-static int hash_write(struct niu *np, unsigned long partition,
- unsigned long index, unsigned long num_entries,
- u64 *data)
-{
- u64 val = hash_addr_regval(index, num_entries);
- unsigned long i;
+static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
+ .serdes_init = serdes_init_niu_10g_fiber,
+ .xcvr_init = xcvr_init_10g,
+ .link_status = link_status_10g,
+};
- if (partition >= FCRAM_NUM_PARTITIONS ||
- index + (num_entries * 8) > FCRAM_SIZE)
- return -EINVAL;
+static const struct niu_phy_ops phy_ops_10g_fiber = {
+ .serdes_init = serdes_init_10g,
+ .xcvr_init = xcvr_init_10g,
+ .link_status = link_status_10g,
+};
- nw64(HASH_TBL_ADDR(partition), val);
- for (i = 0; i < num_entries; i++)
- nw64(HASH_TBL_DATA(partition), data[i]);
+static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
+ .serdes_init = serdes_init_10g,
+ .xcvr_init = xcvr_init_10g_bcm8706,
+ .link_status = link_status_10g_hotplug,
+};
- return 0;
-}
+static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
+ .serdes_init = serdes_init_niu_10g_fiber,
+ .xcvr_init = xcvr_init_10g_bcm8706,
+ .link_status = link_status_10g_hotplug,
+};
-static void fflp_reset(struct niu *np)
-{
- u64 val;
+static const struct niu_phy_ops phy_ops_10g_copper = {
+ .serdes_init = serdes_init_10g,
+ .link_status = link_status_10g, /* XXX */
+};
- nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
- udelay(10);
- nw64(FFLP_CFG_1, 0);
+static const struct niu_phy_ops phy_ops_1g_fiber = {
+ .serdes_init = serdes_init_1g,
+ .xcvr_init = xcvr_init_1g,
+ .link_status = link_status_1g,
+};
- val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
- nw64(FFLP_CFG_1, val);
-}
+static const struct niu_phy_ops phy_ops_1g_copper = {
+ .xcvr_init = xcvr_init_1g,
+ .link_status = link_status_1g,
+};
-static void fflp_set_timings(struct niu *np)
-{
- u64 val = nr64(FFLP_CFG_1);
+struct niu_phy_template {
+ const struct niu_phy_ops *ops;
+ u32 phy_addr_base;
+};
- val &= ~FFLP_CFG_1_FFLPINITDONE;
- val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
- nw64(FFLP_CFG_1, val);
+static const struct niu_phy_template phy_template_niu_10g_fiber = {
+ .ops = &phy_ops_10g_fiber_niu,
+ .phy_addr_base = 16,
+};
- val = nr64(FFLP_CFG_1);
- val |= FFLP_CFG_1_FFLPINITDONE;
- nw64(FFLP_CFG_1, val);
+static const struct niu_phy_template phy_template_niu_10g_serdes = {
+ .ops = &phy_ops_10g_serdes_niu,
+ .phy_addr_base = 0,
+};
- val = nr64(FCRAM_REF_TMR);
- val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
- val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
- val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
- nw64(FCRAM_REF_TMR, val);
-}
+static const struct niu_phy_template phy_template_niu_1g_serdes = {
+ .ops = &phy_ops_1g_serdes_niu,
+ .phy_addr_base = 0,
+};
-static int fflp_set_partition(struct niu *np, u64 partition,
- u64 mask, u64 base, int enable)
-{
- unsigned long reg;
- u64 val;
+static const struct niu_phy_template phy_template_10g_fiber = {
+ .ops = &phy_ops_10g_fiber,
+ .phy_addr_base = 8,
+};
- if (partition >= FCRAM_NUM_PARTITIONS ||
- (mask & ~(u64)0x1f) != 0 ||
- (base & ~(u64)0x1f) != 0)
- return -EINVAL;
+static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
+ .ops = &phy_ops_10g_fiber_hotplug,
+ .phy_addr_base = 8,
+};
- reg = FLW_PRT_SEL(partition);
+static const struct niu_phy_template phy_template_niu_10g_hotplug = {
+ .ops = &phy_ops_niu_10g_hotplug,
+ .phy_addr_base = 8,
+};
- val = nr64(reg);
- val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
- val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
- val |= (base << FLW_PRT_SEL_BASE_SHIFT);
- if (enable)
- val |= FLW_PRT_SEL_EXT;
- nw64(reg, val);
+static const struct niu_phy_template phy_template_10g_copper = {
+ .ops = &phy_ops_10g_copper,
+ .phy_addr_base = 10,
+};
- return 0;
-}
+static const struct niu_phy_template phy_template_1g_fiber = {
+ .ops = &phy_ops_1g_fiber,
+ .phy_addr_base = 0,
+};
-static int fflp_disable_all_partitions(struct niu *np)
-{
- unsigned long i;
+static const struct niu_phy_template phy_template_1g_copper = {
+ .ops = &phy_ops_1g_copper,
+ .phy_addr_base = 0,
+};
- for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
- int err = fflp_set_partition(np, 0, 0, 0, 0);
- if (err)
- return err;
- }
- return 0;
-}
+static const struct niu_phy_template phy_template_1g_rgmii = {
+ .ops = &phy_ops_1g_rgmii,
+ .phy_addr_base = 0,
+};
-static void fflp_llcsnap_enable(struct niu *np, int on)
-{
- u64 val = nr64(FFLP_CFG_1);
+static const struct niu_phy_template phy_template_10g_serdes = {
+ .ops = &phy_ops_10g_serdes,
+ .phy_addr_base = 0,
+};
- if (on)
- val |= FFLP_CFG_1_LLCSNAP;
- else
- val &= ~FFLP_CFG_1_LLCSNAP;
- nw64(FFLP_CFG_1, val);
-}
+static int niu_atca_port_num[4] = {
+ 0, 0, 11, 10
+};
-static void fflp_errors_enable(struct niu *np, int on)
+static int serdes_init_10g_serdes(struct niu *np)
{
- u64 val = nr64(FFLP_CFG_1);
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+ u64 ctrl_val, test_cfg_val, sig, mask, val;
+ u64 reset_val;
- if (on)
- val &= ~FFLP_CFG_1_ERRORDIS;
- else
- val |= FFLP_CFG_1_ERRORDIS;
- nw64(FFLP_CFG_1, val);
-}
+ switch (np->port) {
+ case 0:
+ reset_val = ENET_SERDES_RESET_0;
+ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+ pll_cfg = ENET_SERDES_0_PLL_CFG;
+ break;
+ case 1:
+ reset_val = ENET_SERDES_RESET_1;
+ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+ pll_cfg = ENET_SERDES_1_PLL_CFG;
+ break;
-static int fflp_hash_clear(struct niu *np)
-{
- struct fcram_hash_ipv4 ent;
- unsigned long i;
+ default:
+ return -EINVAL;
+ }
+ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+ ENET_SERDES_CTRL_SDET_1 |
+ ENET_SERDES_CTRL_SDET_2 |
+ ENET_SERDES_CTRL_SDET_3 |
+ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+ test_cfg_val = 0;
- /* IPV4 hash entry with valid bit clear, rest is don't care. */
- memset(&ent, 0, sizeof(ent));
- ent.header = HASH_HEADER_EXT;
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_0_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_1_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_2_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_3_SHIFT));
+ }
- for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
- int err = hash_write(np, 0, i, 1, (u64 *) &ent);
+ esr_reset(np);
+ nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
+ nw64(ctrl_reg, ctrl_val);
+ nw64(test_cfg_reg, test_cfg_val);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_ctrl, glue0;
+ int err;
+
+ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_read_glue0(np, i, &glue0);
+ if (err)
+ return err;
+
+ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+ ESR_GLUE_CTRL0_THCNT |
+ ESR_GLUE_CTRL0_BLTIME);
+ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+ (BLTIME_300_CYCLES <<
+ ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_write_glue0(np, i, glue0);
if (err)
return err;
}
+
+
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((sig & mask) != val) {
+ int err;
+ err = serdes_init_1g_serdes(np);
+ if (!err) {
+ np->flags &= ~NIU_FLAGS_10G;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else {
+ dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
+ np->port);
+ return -ENODEV;
+ }
+ }
+
return 0;
}
-static int fflp_early_init(struct niu *np)
+static int niu_determine_phy_disposition(struct niu *np)
{
- struct niu_parent *parent;
- unsigned long flags;
- int err;
-
- niu_lock_parent(np, flags);
+ struct niu_parent *parent = np->parent;
+ u8 plat_type = parent->plat_type;
+ const struct niu_phy_template *tp;
+ u32 phy_addr_off = 0;
- parent = np->parent;
- err = 0;
- if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
- niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
- np->port);
- if (np->parent->plat_type != PLAT_TYPE_NIU) {
- fflp_reset(np);
- fflp_set_timings(np);
- err = fflp_disable_all_partitions(np);
- if (err) {
- niudbg(PROBE, "fflp_disable_all_partitions "
- "failed, err=%d\n", err);
- goto out;
+ if (plat_type == PLAT_TYPE_NIU) {
+ switch (np->flags &
+ (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ /* 10G Serdes */
+ tp = &phy_template_niu_10g_serdes;
+ break;
+ case NIU_FLAGS_XCVR_SERDES:
+ /* 1G Serdes */
+ tp = &phy_template_niu_1g_serdes;
+ break;
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ /* 10G Fiber */
+ default:
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ tp = &phy_template_niu_10g_hotplug;
+ if (np->port == 0)
+ phy_addr_off = 8;
+ if (np->port == 1)
+ phy_addr_off = 12;
+ } else {
+ tp = &phy_template_niu_10g_fiber;
+ phy_addr_off += np->port;
}
+ break;
}
+ } else {
+ switch (np->flags &
+ (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case 0:
+ /* 1G copper */
+ tp = &phy_template_1g_copper;
+ if (plat_type == PLAT_TYPE_VF_P0)
+ phy_addr_off = 10;
+ else if (plat_type == PLAT_TYPE_VF_P1)
+ phy_addr_off = 26;
- err = tcam_early_init(np);
- if (err) {
- niudbg(PROBE, "tcam_early_init failed, err=%d\n",
- err);
- goto out;
- }
- fflp_llcsnap_enable(np, 1);
- fflp_errors_enable(np, 0);
- nw64(H1POLY, 0);
- nw64(H2POLY, 0);
+ phy_addr_off += (np->port ^ 0x3);
+ break;
- err = tcam_flush_all(np);
- if (err) {
- niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
- err);
- goto out;
- }
- if (np->parent->plat_type != PLAT_TYPE_NIU) {
- err = fflp_hash_clear(np);
- if (err) {
- niudbg(PROBE, "fflp_hash_clear failed, "
- "err=%d\n", err);
- goto out;
+ case NIU_FLAGS_10G:
+ /* 10G copper */
+ tp = &phy_template_10g_copper;
+ break;
+
+ case NIU_FLAGS_FIBER:
+ /* 1G fiber */
+ tp = &phy_template_1g_fiber;
+ break;
+
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ /* 10G fiber */
+ tp = &phy_template_10g_fiber;
+ if (plat_type == PLAT_TYPE_VF_P0 ||
+ plat_type == PLAT_TYPE_VF_P1)
+ phy_addr_off = 8;
+ phy_addr_off += np->port;
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ tp = &phy_template_10g_fiber_hotplug;
+ if (np->port == 0)
+ phy_addr_off = 8;
+ if (np->port == 1)
+ phy_addr_off = 12;
}
- }
+ break;
- vlan_tbl_clear(np);
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+ case NIU_FLAGS_XCVR_SERDES:
+ switch(np->port) {
+ case 0:
+ case 1:
+ tp = &phy_template_10g_serdes;
+ break;
+ case 2:
+ case 3:
+ tp = &phy_template_1g_rgmii;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ phy_addr_off = niu_atca_port_num[np->port];
+ break;
- niudbg(PROBE, "fflp_early_init: Success\n");
- parent->flags |= PARENT_FLGS_CLS_HWINIT;
+ default:
+ return -EINVAL;
+ }
}
-out:
- niu_unlock_parent(np, flags);
- return err;
-}
-static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
-{
- if (class_code < CLASS_CODE_USER_PROG1 ||
- class_code > CLASS_CODE_SCTP_IPV6)
- return -EINVAL;
+ np->phy_ops = tp->ops;
+ np->phy_addr = tp->phy_addr_base + phy_addr_off;
- nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
return 0;
}
-static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
+static int niu_init_link(struct niu *np)
{
- if (class_code < CLASS_CODE_USER_PROG1 ||
- class_code > CLASS_CODE_SCTP_IPV6)
- return -EINVAL;
+ struct niu_parent *parent = np->parent;
+ int err, ignore;
- nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+ if (parent->plat_type == PLAT_TYPE_NIU) {
+ err = niu_xcvr_init(np);
+ if (err)
+ return err;
+ msleep(200);
+ }
+ err = niu_serdes_init(np);
+ if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
+ return err;
+ msleep(200);
+ err = niu_xcvr_init(np);
+ if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
+ niu_link_status(np, &ignore);
return 0;
}
-static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
- u32 offset, u32 size)
+static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
{
- int i = skb_shinfo(skb)->nr_frags;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- frag->page = page;
- frag->page_offset = offset;
- frag->size = size;
-
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
+ u16 reg0 = addr[4] << 8 | addr[5];
+ u16 reg1 = addr[2] << 8 | addr[3];
+ u16 reg2 = addr[0] << 8 | addr[1];
- skb_shinfo(skb)->nr_frags = i + 1;
+ if (np->flags & NIU_FLAGS_XMAC) {
+ nw64_mac(XMAC_ADDR0, reg0);
+ nw64_mac(XMAC_ADDR1, reg1);
+ nw64_mac(XMAC_ADDR2, reg2);
+ } else {
+ nw64_mac(BMAC_ADDR0, reg0);
+ nw64_mac(BMAC_ADDR1, reg1);
+ nw64_mac(BMAC_ADDR2, reg2);
+ }
}
-static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
+static int niu_num_alt_addr(struct niu *np)
{
- a >>= PAGE_SHIFT;
- a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
-
- return (a & (MAX_RBR_RING_SIZE - 1));
+ if (np->flags & NIU_FLAGS_XMAC)
+ return XMAC_NUM_ALT_ADDR;
+ else
+ return BMAC_NUM_ALT_ADDR;
}
-static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
- struct page ***link)
+static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
{
- unsigned int h = niu_hash_rxaddr(rp, addr);
- struct page *p, **pp;
+ u16 reg0 = addr[4] << 8 | addr[5];
+ u16 reg1 = addr[2] << 8 | addr[3];
+ u16 reg2 = addr[0] << 8 | addr[1];
- addr &= PAGE_MASK;
- pp = &rp->rxhash[h];
- for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
- if (p->index == addr) {
- *link = pp;
- break;
- }
+ if (index >= niu_num_alt_addr(np))
+ return -EINVAL;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ nw64_mac(XMAC_ALT_ADDR0(index), reg0);
+ nw64_mac(XMAC_ALT_ADDR1(index), reg1);
+ nw64_mac(XMAC_ALT_ADDR2(index), reg2);
+ } else {
+ nw64_mac(BMAC_ALT_ADDR0(index), reg0);
+ nw64_mac(BMAC_ALT_ADDR1(index), reg1);
+ nw64_mac(BMAC_ALT_ADDR2(index), reg2);
}
- return p;
+ return 0;
}
-static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
+static int niu_enable_alt_mac(struct niu *np, int index, int on)
{
- unsigned int h = niu_hash_rxaddr(rp, base);
+ unsigned long reg;
+ u64 val, mask;
- page->index = base;
- page->mapping = (struct address_space *) rp->rxhash[h];
- rp->rxhash[h] = page;
+ if (index >= niu_num_alt_addr(np))
+ return -EINVAL;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ reg = XMAC_ADDR_CMPEN;
+ mask = 1 << index;
+ } else {
+ reg = BMAC_ADDR_CMPEN;
+ mask = 1 << (index + 1);
+ }
+
+ val = nr64_mac(reg);
+ if (on)
+ val |= mask;
+ else
+ val &= ~mask;
+ nw64_mac(reg, val);
+
+ return 0;
}
-static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
- gfp_t mask, int start_index)
+static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
+ int num, int mac_pref)
{
- struct page *page;
- u64 addr;
- int i;
-
- page = alloc_page(mask);
- if (!page)
- return -ENOMEM;
+ u64 val = nr64_mac(reg);
+ val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
+ val |= num;
+ if (mac_pref)
+ val |= HOST_INFO_MPR;
+ nw64_mac(reg, val);
+}
- addr = np->ops->map_page(np->device, page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+static int __set_rdc_table_num(struct niu *np,
+ int xmac_index, int bmac_index,
+ int rdc_table_num, int mac_pref)
+{
+ unsigned long reg;
- niu_hash_page(rp, page, addr);
- if (rp->rbr_blocks_per_page > 1)
- atomic_add(rp->rbr_blocks_per_page - 1,
- &compound_head(page)->_count);
+ if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
+ return -EINVAL;
+ if (np->flags & NIU_FLAGS_XMAC)
+ reg = XMAC_HOST_INFO(xmac_index);
+ else
+ reg = BMAC_HOST_INFO(bmac_index);
+ __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
+ return 0;
+}
- for (i = 0; i < rp->rbr_blocks_per_page; i++) {
- __le32 *rbr = &rp->rbr[start_index + i];
+static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
+ int mac_pref)
+{
+ return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
+}
- *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
- addr += rp->rbr_block_size;
- }
+static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
+ int mac_pref)
+{
+ return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
+}
- return 0;
+static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
+ int table_num, int mac_pref)
+{
+ if (idx >= niu_num_alt_addr(np))
+ return -EINVAL;
+ return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
}
-static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+static u64 vlan_entry_set_parity(u64 reg_val)
{
- int index = rp->rbr_index;
+ u64 port01_mask;
+ u64 port23_mask;
- rp->rbr_pending++;
- if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
- int err = niu_rbr_add_page(np, rp, mask, index);
+ port01_mask = 0x00ff;
+ port23_mask = 0xff00;
- if (unlikely(err)) {
- rp->rbr_pending--;
- return;
- }
+ if (hweight64(reg_val & port01_mask) & 1)
+ reg_val |= ENET_VLAN_TBL_PARITY0;
+ else
+ reg_val &= ~ENET_VLAN_TBL_PARITY0;
- rp->rbr_index += rp->rbr_blocks_per_page;
- BUG_ON(rp->rbr_index > rp->rbr_table_size);
- if (rp->rbr_index == rp->rbr_table_size)
- rp->rbr_index = 0;
+ if (hweight64(reg_val & port23_mask) & 1)
+ reg_val |= ENET_VLAN_TBL_PARITY1;
+ else
+ reg_val &= ~ENET_VLAN_TBL_PARITY1;
- if (rp->rbr_pending >= rp->rbr_kick_thresh) {
- nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
- rp->rbr_pending = 0;
- }
- }
+ return reg_val;
}
-static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
+static void vlan_tbl_write(struct niu *np, unsigned long index,
+ int port, int vpr, int rdc_table)
{
- unsigned int index = rp->rcr_index;
- int num_rcr = 0;
+ u64 reg_val = nr64(ENET_VLAN_TBL(index));
- rp->rx_dropped++;
- while (1) {
- struct page *page, **link;
- u64 addr, val;
- u32 rcr_size;
+ reg_val &= ~((ENET_VLAN_TBL_VPR |
+ ENET_VLAN_TBL_VLANRDCTBLN) <<
+ ENET_VLAN_TBL_SHIFT(port));
+ if (vpr)
+ reg_val |= (ENET_VLAN_TBL_VPR <<
+ ENET_VLAN_TBL_SHIFT(port));
+ reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
- num_rcr++;
+ reg_val = vlan_entry_set_parity(reg_val);
- val = le64_to_cpup(&rp->rcr[index]);
- addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
- RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
- page = niu_find_rxpage(rp, addr, &link);
+ nw64(ENET_VLAN_TBL(index), reg_val);
+}
- rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
- RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
- *link = (struct page *) page->mapping;
- np->ops->unmap_page(np->device, page->index,
- PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
- page->mapping = NULL;
- __free_page(page);
- rp->rbr_refill_pending++;
- }
+static void vlan_tbl_clear(struct niu *np)
+{
+ int i;
- index = NEXT_RCR(rp, index);
- if (!(val & RCR_ENTRY_MULTI))
- break;
+ for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
+ nw64(ENET_VLAN_TBL(i), 0);
+}
+
+static int tcam_wait_bit(struct niu *np, u64 bit)
+{
+ int limit = 1000;
+ while (--limit > 0) {
+ if (nr64(TCAM_CTL) & bit)
+ break;
+ udelay(1);
}
- rp->rcr_index = index;
+ if (limit < 0)
+ return -ENODEV;
- return num_rcr;
+ return 0;
}
-static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
+static int tcam_flush(struct niu *np, int index)
{
- unsigned int index = rp->rcr_index;
- struct sk_buff *skb;
- int len, num_rcr;
+ nw64(TCAM_KEY_0, 0x00);
+ nw64(TCAM_KEY_MASK_0, 0xff);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
- skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
- if (unlikely(!skb))
- return niu_rx_pkt_ignore(np, rp);
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
- num_rcr = 0;
- while (1) {
- struct page *page, **link;
- u32 rcr_size, append_size;
- u64 addr, val, off;
+#if 0
+static int tcam_read(struct niu *np, int index,
+ u64 *key, u64 *mask)
+{
+ int err;
- num_rcr++;
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
+ err = tcam_wait_bit(np, TCAM_CTL_STAT);
+ if (!err) {
+ key[0] = nr64(TCAM_KEY_0);
+ key[1] = nr64(TCAM_KEY_1);
+ key[2] = nr64(TCAM_KEY_2);
+ key[3] = nr64(TCAM_KEY_3);
+ mask[0] = nr64(TCAM_KEY_MASK_0);
+ mask[1] = nr64(TCAM_KEY_MASK_1);
+ mask[2] = nr64(TCAM_KEY_MASK_2);
+ mask[3] = nr64(TCAM_KEY_MASK_3);
+ }
+ return err;
+}
+#endif
- val = le64_to_cpup(&rp->rcr[index]);
+static int tcam_write(struct niu *np, int index,
+ u64 *key, u64 *mask)
+{
+ nw64(TCAM_KEY_0, key[0]);
+ nw64(TCAM_KEY_1, key[1]);
+ nw64(TCAM_KEY_2, key[2]);
+ nw64(TCAM_KEY_3, key[3]);
+ nw64(TCAM_KEY_MASK_0, mask[0]);
+ nw64(TCAM_KEY_MASK_1, mask[1]);
+ nw64(TCAM_KEY_MASK_2, mask[2]);
+ nw64(TCAM_KEY_MASK_3, mask[3]);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
- len = (val & RCR_ENTRY_L2_LEN) >>
- RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
-
- addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
- RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
- page = niu_find_rxpage(rp, addr, &link);
-
- rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
- RCR_ENTRY_PKTBUFSZ_SHIFT];
-
- off = addr & ~PAGE_MASK;
- append_size = rcr_size;
- if (num_rcr == 1) {
- int ptype;
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
- off += 2;
- append_size -= 2;
+#if 0
+static int tcam_assoc_read(struct niu *np, int index, u64 *data)
+{
+ int err;
- ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
- if ((ptype == RCR_PKT_TYPE_TCP ||
- ptype == RCR_PKT_TYPE_UDP) &&
- !(val & (RCR_ENTRY_NOPORT |
- RCR_ENTRY_ERROR)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
- }
- if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
+ err = tcam_wait_bit(np, TCAM_CTL_STAT);
+ if (!err)
+ *data = nr64(TCAM_KEY_1);
- niu_rx_skb_append(skb, page, off, append_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
- *link = (struct page *) page->mapping;
- np->ops->unmap_page(np->device, page->index,
- PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
- page->mapping = NULL;
- rp->rbr_refill_pending++;
- } else
- get_page(page);
+ return err;
+}
+#endif
- index = NEXT_RCR(rp, index);
- if (!(val & RCR_ENTRY_MULTI))
- break;
+static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
+{
+ nw64(TCAM_KEY_1, assoc_data);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
- }
- rp->rcr_index = index;
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
- skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
+static void tcam_enable(struct niu *np, int on)
+{
+ u64 val = nr64(FFLP_CFG_1);
- rp->rx_packets++;
- rp->rx_bytes += skb->len;
+ if (on)
+ val &= ~FFLP_CFG_1_TCAM_DIS;
+ else
+ val |= FFLP_CFG_1_TCAM_DIS;
+ nw64(FFLP_CFG_1, val);
+}
- skb->protocol = eth_type_trans(skb, np->dev);
- netif_receive_skb(skb);
+static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
+{
+ u64 val = nr64(FFLP_CFG_1);
- np->dev->last_rx = jiffies;
+ val &= ~(FFLP_CFG_1_FFLPINITDONE |
+ FFLP_CFG_1_CAMLAT |
+ FFLP_CFG_1_CAMRATIO);
+ val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
+ val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
+ nw64(FFLP_CFG_1, val);
- return num_rcr;
+ val = nr64(FFLP_CFG_1);
+ val |= FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
}
-static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
+ int on)
{
- int blocks_per_page = rp->rbr_blocks_per_page;
- int err, index = rp->rbr_index;
+ unsigned long reg;
+ u64 val;
- err = 0;
- while (index < (rp->rbr_table_size - blocks_per_page)) {
- err = niu_rbr_add_page(np, rp, mask, index);
- if (err)
- break;
+ if (class < CLASS_CODE_ETHERTYPE1 ||
+ class > CLASS_CODE_ETHERTYPE2)
+ return -EINVAL;
- index += blocks_per_page;
- }
+ reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+ val = nr64(reg);
+ if (on)
+ val |= L2_CLS_VLD;
+ else
+ val &= ~L2_CLS_VLD;
+ nw64(reg, val);
- rp->rbr_index = index;
- return err;
+ return 0;
}
-static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
+#if 0
+static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
+ u64 ether_type)
{
- int i;
+ unsigned long reg;
+ u64 val;
- for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
- struct page *page;
+ if (class < CLASS_CODE_ETHERTYPE1 ||
+ class > CLASS_CODE_ETHERTYPE2 ||
+ (ether_type & ~(u64)0xffff) != 0)
+ return -EINVAL;
- page = rp->rxhash[i];
- while (page) {
- struct page *next = (struct page *) page->mapping;
- u64 base = page->index;
+ reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+ val = nr64(reg);
+ val &= ~L2_CLS_ETYPE;
+ val |= (ether_type << L2_CLS_ETYPE_SHIFT);
+ nw64(reg, val);
- np->ops->unmap_page(np->device, base, PAGE_SIZE,
- DMA_FROM_DEVICE);
- page->index = 0;
- page->mapping = NULL;
+ return 0;
+}
+#endif
- __free_page(page);
+static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
+ int on)
+{
+ unsigned long reg;
+ u64 val;
- page = next;
- }
- }
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_USER_PROG4)
+ return -EINVAL;
- for (i = 0; i < rp->rbr_table_size; i++)
- rp->rbr[i] = cpu_to_le32(0);
- rp->rbr_index = 0;
+ reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+ val = nr64(reg);
+ if (on)
+ val |= L3_CLS_VALID;
+ else
+ val &= ~L3_CLS_VALID;
+ nw64(reg, val);
+
+ return 0;
}
-static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
+static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
+ int ipv6, u64 protocol_id,
+ u64 tos_mask, u64 tos_val)
{
- struct tx_buff_info *tb = &rp->tx_buffs[idx];
- struct sk_buff *skb = tb->skb;
- struct tx_pkt_hdr *tp;
- u64 tx_flags;
- int i, len;
-
- tp = (struct tx_pkt_hdr *) skb->data;
- tx_flags = le64_to_cpup(&tp->flags);
+ unsigned long reg;
+ u64 val;
- rp->tx_packets++;
- rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
- ((tx_flags & TXHDR_PAD) / 2));
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_USER_PROG4 ||
+ (protocol_id & ~(u64)0xff) != 0 ||
+ (tos_mask & ~(u64)0xff) != 0 ||
+ (tos_val & ~(u64)0xff) != 0)
+ return -EINVAL;
- len = skb_headlen(skb);
- np->ops->unmap_single(np->device, tb->mapping,
- len, DMA_TO_DEVICE);
+ reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+ val = nr64(reg);
+ val &= ~(L3_CLS_IPVER | L3_CLS_PID |
+ L3_CLS_TOSMASK | L3_CLS_TOS);
+ if (ipv6)
+ val |= L3_CLS_IPVER;
+ val |= (protocol_id << L3_CLS_PID_SHIFT);
+ val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
+ val |= (tos_val << L3_CLS_TOS_SHIFT);
+ nw64(reg, val);
- if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
- rp->mark_pending--;
+ return 0;
+}
- tb->skb = NULL;
- do {
- idx = NEXT_TX(rp, idx);
- len -= MAX_TX_DESC_LEN;
- } while (len > 0);
+static int tcam_early_init(struct niu *np)
+{
+ unsigned long i;
+ int err;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- tb = &rp->tx_buffs[idx];
- BUG_ON(tb->skb != NULL);
- np->ops->unmap_page(np->device, tb->mapping,
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- idx = NEXT_TX(rp, idx);
+ tcam_enable(np, 0);
+ tcam_set_lat_and_ratio(np,
+ DEFAULT_TCAM_LATENCY,
+ DEFAULT_TCAM_ACCESS_RATIO);
+ for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
+ err = tcam_user_eth_class_enable(np, i, 0);
+ if (err)
+ return err;
+ }
+ for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
+ err = tcam_user_ip_class_enable(np, i, 0);
+ if (err)
+ return err;
}
- dev_kfree_skb(skb);
-
- return idx;
+ return 0;
}
-#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
-
-static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
+static int tcam_flush_all(struct niu *np)
{
- u16 pkt_cnt, tmp;
- int cons;
- u64 cs;
-
- cs = rp->tx_cs;
- if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
- goto out;
-
- tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
- pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
- (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
+ unsigned long i;
- rp->last_pkt_cnt = tmp;
+ for (i = 0; i < np->parent->tcam_num_entries; i++) {
+ int err = tcam_flush(np, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
- cons = rp->cons;
+static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
+{
+ return ((u64)index | (num_entries == 1 ?
+ HASH_TBL_ADDR_AUTOINC : 0));
+}
- niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
- np->dev->name, pkt_cnt, cons);
+#if 0
+static int hash_read(struct niu *np, unsigned long partition,
+ unsigned long index, unsigned long num_entries,
+ u64 *data)
+{
+ u64 val = hash_addr_regval(index, num_entries);
+ unsigned long i;
- while (pkt_cnt--)
- cons = release_tx_packet(np, rp, cons);
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ index + num_entries > FCRAM_SIZE)
+ return -EINVAL;
- rp->cons = cons;
- smp_mb();
+ nw64(HASH_TBL_ADDR(partition), val);
+ for (i = 0; i < num_entries; i++)
+ data[i] = nr64(HASH_TBL_DATA(partition));
-out:
- if (unlikely(netif_queue_stopped(np->dev) &&
- (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
- netif_tx_lock(np->dev);
- if (netif_queue_stopped(np->dev) &&
- (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
- netif_wake_queue(np->dev);
- netif_tx_unlock(np->dev);
- }
+ return 0;
}
-
-static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
-{
- int qlen, rcr_done = 0, work_done = 0;
- struct rxdma_mailbox *mbox = rp->mbox;
- u64 stat;
-
-#if 1
- stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
- qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
-#else
- stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
- qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
#endif
- mbox->rx_dma_ctl_stat = 0;
- mbox->rcrstat_a = 0;
- niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
- np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+static int hash_write(struct niu *np, unsigned long partition,
+ unsigned long index, unsigned long num_entries,
+ u64 *data)
+{
+ u64 val = hash_addr_regval(index, num_entries);
+ unsigned long i;
- rcr_done = work_done = 0;
- qlen = min(qlen, budget);
- while (work_done < qlen) {
- rcr_done += niu_process_rx_pkt(np, rp);
- work_done++;
- }
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ index + (num_entries * 8) > FCRAM_SIZE)
+ return -EINVAL;
- if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
- unsigned int i;
+ nw64(HASH_TBL_ADDR(partition), val);
+ for (i = 0; i < num_entries; i++)
+ nw64(HASH_TBL_DATA(partition), data[i]);
- for (i = 0; i < rp->rbr_refill_pending; i++)
- niu_rbr_refill(np, rp, GFP_ATOMIC);
- rp->rbr_refill_pending = 0;
- }
+ return 0;
+}
- stat = (RX_DMA_CTL_STAT_MEX |
- ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
- ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
+static void fflp_reset(struct niu *np)
+{
+ u64 val;
- nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+ nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
+ udelay(10);
+ nw64(FFLP_CFG_1, 0);
- return work_done;
+ val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
}
-static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
+static void fflp_set_timings(struct niu *np)
{
- u64 v0 = lp->v0;
- u32 tx_vec = (v0 >> 32);
- u32 rx_vec = (v0 & 0xffffffff);
- int i, work_done = 0;
+ u64 val = nr64(FFLP_CFG_1);
- niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
- np->dev->name, (unsigned long long) v0);
+ val &= ~FFLP_CFG_1_FFLPINITDONE;
+ val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
+ nw64(FFLP_CFG_1, val);
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
- if (tx_vec & (1 << rp->tx_channel))
- niu_tx_work(np, rp);
- nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
- }
+ val = nr64(FFLP_CFG_1);
+ val |= FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ val = nr64(FCRAM_REF_TMR);
+ val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
+ val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
+ val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
+ nw64(FCRAM_REF_TMR, val);
+}
- if (rx_vec & (1 << rp->rx_channel)) {
- int this_work_done;
+static int fflp_set_partition(struct niu *np, u64 partition,
+ u64 mask, u64 base, int enable)
+{
+ unsigned long reg;
+ u64 val;
- this_work_done = niu_rx_work(np, rp,
- budget);
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ (mask & ~(u64)0x1f) != 0 ||
+ (base & ~(u64)0x1f) != 0)
+ return -EINVAL;
- budget -= this_work_done;
- work_done += this_work_done;
- }
- nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
- }
+ reg = FLW_PRT_SEL(partition);
- return work_done;
+ val = nr64(reg);
+ val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
+ val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
+ val |= (base << FLW_PRT_SEL_BASE_SHIFT);
+ if (enable)
+ val |= FLW_PRT_SEL_EXT;
+ nw64(reg, val);
+
+ return 0;
}
-static int niu_poll(struct napi_struct *napi, int budget)
+static int fflp_disable_all_partitions(struct niu *np)
{
- struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
- struct niu *np = lp->np;
- int work_done;
-
- work_done = niu_poll_core(np, lp, budget);
+ unsigned long i;
- if (work_done < budget) {
- netif_rx_complete(np->dev, napi);
- niu_ldg_rearm(np, lp, 1);
+ for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
+ int err = fflp_set_partition(np, 0, 0, 0, 0);
+ if (err)
+ return err;
}
- return work_done;
+ return 0;
}
-static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
- u64 stat)
+static void fflp_llcsnap_enable(struct niu *np, int on)
{
- dev_err(np->device, PFX "%s: RX channel %u errors ( ",
- np->dev->name, rp->rx_channel);
-
- if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
- printk("RBR_TMOUT ");
- if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
- printk("RSP_CNT ");
- if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
- printk("BYTE_EN_BUS ");
- if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
- printk("RSP_DAT ");
- if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
- printk("RCR_ACK ");
- if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
- printk("RCR_SHA_PAR ");
- if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
- printk("RBR_PRE_PAR ");
- if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
- printk("CONFIG ");
- if (stat & RX_DMA_CTL_STAT_RCRINCON)
- printk("RCRINCON ");
- if (stat & RX_DMA_CTL_STAT_RCRFULL)
- printk("RCRFULL ");
- if (stat & RX_DMA_CTL_STAT_RBRFULL)
- printk("RBRFULL ");
- if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
- printk("RBRLOGPAGE ");
- if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
- printk("CFIGLOGPAGE ");
- if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
- printk("DC_FIDO ");
+ u64 val = nr64(FFLP_CFG_1);
- printk(")\n");
+ if (on)
+ val |= FFLP_CFG_1_LLCSNAP;
+ else
+ val &= ~FFLP_CFG_1_LLCSNAP;
+ nw64(FFLP_CFG_1, val);
}
-static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
+static void fflp_errors_enable(struct niu *np, int on)
{
- u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
- int err = 0;
+ u64 val = nr64(FFLP_CFG_1);
+ if (on)
+ val &= ~FFLP_CFG_1_ERRORDIS;
+ else
+ val |= FFLP_CFG_1_ERRORDIS;
+ nw64(FFLP_CFG_1, val);
+}
- if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
- RX_DMA_CTL_STAT_PORT_FATAL))
- err = -EINVAL;
+static int fflp_hash_clear(struct niu *np)
+{
+ struct fcram_hash_ipv4 ent;
+ unsigned long i;
- if (err) {
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
- np->dev->name, rp->rx_channel,
- (unsigned long long) stat);
+ /* IPV4 hash entry with valid bit clear, rest is don't care. */
+ memset(&ent, 0, sizeof(ent));
+ ent.header = HASH_HEADER_EXT;
- niu_log_rxchan_errors(np, rp, stat);
+ for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
+ int err = hash_write(np, 0, i, 1, (u64 *) &ent);
+ if (err)
+ return err;
}
-
- nw64(RX_DMA_CTL_STAT(rp->rx_channel),
- stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
-
- return err;
+ return 0;
}
-static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
- u64 cs)
+static int fflp_early_init(struct niu *np)
{
- dev_err(np->device, PFX "%s: TX channel %u errors ( ",
- np->dev->name, rp->tx_channel);
+ struct niu_parent *parent;
+ unsigned long flags;
+ int err;
- if (cs & TX_CS_MBOX_ERR)
- printk("MBOX ");
- if (cs & TX_CS_PKT_SIZE_ERR)
- printk("PKT_SIZE ");
- if (cs & TX_CS_TX_RING_OFLOW)
- printk("TX_RING_OFLOW ");
- if (cs & TX_CS_PREF_BUF_PAR_ERR)
- printk("PREF_BUF_PAR ");
- if (cs & TX_CS_NACK_PREF)
- printk("NACK_PREF ");
- if (cs & TX_CS_NACK_PKT_RD)
- printk("NACK_PKT_RD ");
- if (cs & TX_CS_CONF_PART_ERR)
- printk("CONF_PART ");
- if (cs & TX_CS_PKT_PRT_ERR)
- printk("PKT_PTR ");
+ niu_lock_parent(np, flags);
- printk(")\n");
-}
+ parent = np->parent;
+ err = 0;
+ if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
+ niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
+ np->port);
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ fflp_reset(np);
+ fflp_set_timings(np);
+ err = fflp_disable_all_partitions(np);
+ if (err) {
+ niudbg(PROBE, "fflp_disable_all_partitions "
+ "failed, err=%d\n", err);
+ goto out;
+ }
+ }
-static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
-{
- u64 cs, logh, logl;
+ err = tcam_early_init(np);
+ if (err) {
+ niudbg(PROBE, "tcam_early_init failed, err=%d\n",
+ err);
+ goto out;
+ }
+ fflp_llcsnap_enable(np, 1);
+ fflp_errors_enable(np, 0);
+ nw64(H1POLY, 0);
+ nw64(H2POLY, 0);
- cs = nr64(TX_CS(rp->tx_channel));
- logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
- logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
+ err = tcam_flush_all(np);
+ if (err) {
+ niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
+ err);
+ goto out;
+ }
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ err = fflp_hash_clear(np);
+ if (err) {
+ niudbg(PROBE, "fflp_hash_clear failed, "
+ "err=%d\n", err);
+ goto out;
+ }
+ }
- dev_err(np->device, PFX "%s: TX channel %u error, "
- "cs[%llx] logh[%llx] logl[%llx]\n",
- np->dev->name, rp->tx_channel,
- (unsigned long long) cs,
- (unsigned long long) logh,
- (unsigned long long) logl);
+ vlan_tbl_clear(np);
- niu_log_txchan_errors(np, rp, cs);
+ niudbg(PROBE, "fflp_early_init: Success\n");
+ parent->flags |= PARENT_FLGS_CLS_HWINIT;
+ }
+out:
+ niu_unlock_parent(np, flags);
+ return err;
+}
- return -ENODEV;
+static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
+{
+ if (class_code < CLASS_CODE_USER_PROG1 ||
+ class_code > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
+
+ nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+ return 0;
}
-static int niu_mif_interrupt(struct niu *np)
+static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
{
- u64 mif_status = nr64(MIF_STATUS);
- int phy_mdint = 0;
+ if (class_code < CLASS_CODE_USER_PROG1 ||
+ class_code > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
- if (np->flags & NIU_FLAGS_XMAC) {
- u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
+ nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+ return 0;
+}
- if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
- phy_mdint = 1;
- }
+/* Entries for the ports are interleaved in the TCAM */
+static u16 tcam_get_index(struct niu *np, u16 idx)
+{
+ /* One entry reserved for IP fragment rule */
+ if (idx >= (np->clas.tcam_sz - 1))
+ idx = 0;
+ return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports));
+}
- dev_err(np->device, PFX "%s: MIF interrupt, "
- "stat[%llx] phy_mdint(%d)\n",
- np->dev->name, (unsigned long long) mif_status, phy_mdint);
+static u16 tcam_get_size(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_sz - 1;
+}
- return -ENODEV;
+static u16 tcam_get_valid_entry_cnt(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_valid_entries - 1;
}
-static void niu_xmac_interrupt(struct niu *np)
+static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
+ u32 offset, u32 size)
{
- struct niu_xmac_stats *mp = &np->mac_stats.xmac;
- u64 val;
+ int i = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- val = nr64_mac(XTXMAC_STATUS);
- if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
- mp->tx_frames += TXMAC_FRM_CNT_COUNT;
- if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
- mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
- if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
- mp->tx_fifo_errors++;
- if (val & XTXMAC_STATUS_TXMAC_OFLOW)
- mp->tx_overflow_errors++;
- if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
- mp->tx_max_pkt_size_errors++;
- if (val & XTXMAC_STATUS_TXMAC_UFLOW)
- mp->tx_underflow_errors++;
+ frag->page = page;
+ frag->page_offset = offset;
+ frag->size = size;
- val = nr64_mac(XRXMAC_STATUS);
- if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
- mp->rx_local_faults++;
- if (val & XRXMAC_STATUS_RFLT_DET)
- mp->rx_remote_faults++;
- if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
- mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
- if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
- mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
- mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
- mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
- mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
- mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
- mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
- if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
- mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
- if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
- mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
- if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
- mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
- if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
- mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
- if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
- mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
- if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
- mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
- if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
- mp->rx_octets += RXMAC_BT_CNT_COUNT;
- if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
- mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
- if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
- mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
- if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
- mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXUFLOW)
- mp->rx_underflows++;
- if (val & XRXMAC_STATUS_RXOFLOW)
- mp->rx_overflows++;
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
- val = nr64_mac(XMAC_FC_STAT);
- if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
- mp->pause_off_state++;
- if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
- mp->pause_on_state++;
- if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
- mp->pause_received++;
+ skb_shinfo(skb)->nr_frags = i + 1;
}
-static void niu_bmac_interrupt(struct niu *np)
+static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
{
- struct niu_bmac_stats *mp = &np->mac_stats.bmac;
- u64 val;
+ a >>= PAGE_SHIFT;
+ a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
- val = nr64_mac(BTXMAC_STATUS);
- if (val & BTXMAC_STATUS_UNDERRUN)
- mp->tx_underflow_errors++;
- if (val & BTXMAC_STATUS_MAX_PKT_ERR)
- mp->tx_max_pkt_size_errors++;
- if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
- mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
- if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
- mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
+ return (a & (MAX_RBR_RING_SIZE - 1));
+}
- val = nr64_mac(BRXMAC_STATUS);
- if (val & BRXMAC_STATUS_OVERFLOW)
- mp->rx_overflows++;
- if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
- mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
- if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
- mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
- if (val & BRXMAC_STATUS_CRC_ERR_EXP)
- mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
- if (val & BRXMAC_STATUS_LEN_ERR_EXP)
- mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
+static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
+ struct page ***link)
+{
+ unsigned int h = niu_hash_rxaddr(rp, addr);
+ struct page *p, **pp;
- val = nr64_mac(BMAC_CTRL_STATUS);
- if (val & BMAC_CTRL_STATUS_NOPAUSE)
- mp->pause_off_state++;
- if (val & BMAC_CTRL_STATUS_PAUSE)
- mp->pause_on_state++;
- if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
- mp->pause_received++;
+ addr &= PAGE_MASK;
+ pp = &rp->rxhash[h];
+ for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
+ if (p->index == addr) {
+ *link = pp;
+ break;
+ }
+ }
+
+ return p;
}
-static int niu_mac_interrupt(struct niu *np)
+static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
- if (np->flags & NIU_FLAGS_XMAC)
- niu_xmac_interrupt(np);
- else
- niu_bmac_interrupt(np);
+ unsigned int h = niu_hash_rxaddr(rp, base);
- return 0;
+ page->index = base;
+ page->mapping = (struct address_space *) rp->rxhash[h];
+ rp->rxhash[h] = page;
}
-static void niu_log_device_error(struct niu *np, u64 stat)
+static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
+ gfp_t mask, int start_index)
{
- dev_err(np->device, PFX "%s: Core device errors ( ",
- np->dev->name);
+ struct page *page;
+ u64 addr;
+ int i;
- if (stat & SYS_ERR_MASK_META2)
- printk("META2 ");
- if (stat & SYS_ERR_MASK_META1)
- printk("META1 ");
- if (stat & SYS_ERR_MASK_PEU)
- printk("PEU ");
- if (stat & SYS_ERR_MASK_TXC)
- printk("TXC ");
- if (stat & SYS_ERR_MASK_RDMC)
- printk("RDMC ");
- if (stat & SYS_ERR_MASK_TDMC)
- printk("TDMC ");
- if (stat & SYS_ERR_MASK_ZCP)
- printk("ZCP ");
- if (stat & SYS_ERR_MASK_FFLP)
- printk("FFLP ");
- if (stat & SYS_ERR_MASK_IPP)
- printk("IPP ");
- if (stat & SYS_ERR_MASK_MAC)
- printk("MAC ");
- if (stat & SYS_ERR_MASK_SMX)
- printk("SMX ");
+ page = alloc_page(mask);
+ if (!page)
+ return -ENOMEM;
- printk(")\n");
-}
+ addr = np->ops->map_page(np->device, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
-static int niu_device_error(struct niu *np)
-{
- u64 stat = nr64(SYS_ERR_STAT);
+ niu_hash_page(rp, page, addr);
+ if (rp->rbr_blocks_per_page > 1)
+ atomic_add(rp->rbr_blocks_per_page - 1,
+ &compound_head(page)->_count);
- dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ for (i = 0; i < rp->rbr_blocks_per_page; i++) {
+ __le32 *rbr = &rp->rbr[start_index + i];
- niu_log_device_error(np, stat);
+ *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
+ addr += rp->rbr_block_size;
+ }
- return -ENODEV;
+ return 0;
}
-static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
- u64 v0, u64 v1, u64 v2)
+static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
{
+ int index = rp->rbr_index;
- int i, err = 0;
-
- lp->v0 = v0;
- lp->v1 = v1;
- lp->v2 = v2;
+ rp->rbr_pending++;
+ if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
+ int err = niu_rbr_add_page(np, rp, mask, index);
- if (v1 & 0x00000000ffffffffULL) {
- u32 rx_vec = (v1 & 0xffffffff);
+ if (unlikely(err)) {
+ rp->rbr_pending--;
+ return;
+ }
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ rp->rbr_index += rp->rbr_blocks_per_page;
+ BUG_ON(rp->rbr_index > rp->rbr_table_size);
+ if (rp->rbr_index == rp->rbr_table_size)
+ rp->rbr_index = 0;
- if (rx_vec & (1 << rp->rx_channel)) {
- int r = niu_rx_error(np, rp);
- if (r) {
- err = r;
- } else {
- if (!v0)
- nw64(RX_DMA_CTL_STAT(rp->rx_channel),
- RX_DMA_CTL_STAT_MEX);
- }
- }
+ if (rp->rbr_pending >= rp->rbr_kick_thresh) {
+ nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
+ rp->rbr_pending = 0;
}
}
- if (v1 & 0x7fffffff00000000ULL) {
- u32 tx_vec = (v1 >> 32) & 0x7fffffff;
+}
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
+{
+ unsigned int index = rp->rcr_index;
+ int num_rcr = 0;
- if (tx_vec & (1 << rp->tx_channel)) {
- int r = niu_tx_error(np, rp);
- if (r)
- err = r;
- }
- }
- }
- if ((v0 | v1) & 0x8000000000000000ULL) {
- int r = niu_mif_interrupt(np);
- if (r)
- err = r;
- }
- if (v2) {
- if (v2 & 0x01ef) {
- int r = niu_mac_interrupt(np);
- if (r)
- err = r;
- }
- if (v2 & 0x0210) {
- int r = niu_device_error(np);
- if (r)
- err = r;
+ rp->rx_dropped++;
+ while (1) {
+ struct page *page, **link;
+ u64 addr, val;
+ u32 rcr_size;
+
+ num_rcr++;
+
+ val = le64_to_cpup(&rp->rcr[index]);
+ addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+ page = niu_find_rxpage(rp, addr, &link);
+
+ rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+ RCR_ENTRY_PKTBUFSZ_SHIFT];
+ if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ *link = (struct page *) page->mapping;
+ np->ops->unmap_page(np->device, page->index,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+ __free_page(page);
+ rp->rbr_refill_pending++;
}
- }
- if (err)
- niu_enable_interrupts(np, 0);
+ index = NEXT_RCR(rp, index);
+ if (!(val & RCR_ENTRY_MULTI))
+ break;
- return err;
+ }
+ rp->rcr_index = index;
+
+ return num_rcr;
}
-static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
- int ldn)
+static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+ struct rx_ring_info *rp)
{
- struct rxdma_mailbox *mbox = rp->mbox;
- u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+ unsigned int index = rp->rcr_index;
+ struct sk_buff *skb;
+ int len, num_rcr;
- stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
- RX_DMA_CTL_STAT_RCRTO);
- nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
+ skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
+ if (unlikely(!skb))
+ return niu_rx_pkt_ignore(np, rp);
- niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
-}
+ num_rcr = 0;
+ while (1) {
+ struct page *page, **link;
+ u32 rcr_size, append_size;
+ u64 addr, val, off;
-static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
- int ldn)
-{
- rp->tx_cs = nr64(TX_CS(rp->tx_channel));
+ num_rcr++;
- niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
- np->dev->name, (unsigned long long) rp->tx_cs);
-}
+ val = le64_to_cpup(&rp->rcr[index]);
-static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
-{
- struct niu_parent *parent = np->parent;
- u32 rx_vec, tx_vec;
- int i;
+ len = (val & RCR_ENTRY_L2_LEN) >>
+ RCR_ENTRY_L2_LEN_SHIFT;
+ len -= ETH_FCS_LEN;
- tx_vec = (v0 >> 32);
- rx_vec = (v0 & 0xffffffff);
+ addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+ page = niu_find_rxpage(rp, addr, &link);
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
- int ldn = LDN_RXDMA(rp->rx_channel);
+ rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+ RCR_ENTRY_PKTBUFSZ_SHIFT];
- if (parent->ldg_map[ldn] != ldg)
- continue;
+ off = addr & ~PAGE_MASK;
+ append_size = rcr_size;
+ if (num_rcr == 1) {
+ int ptype;
- nw64(LD_IM0(ldn), LD_IM0_MASK);
- if (rx_vec & (1 << rp->rx_channel))
- niu_rxchan_intr(np, rp, ldn);
- }
+ off += 2;
+ append_size -= 2;
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
- int ldn = LDN_TXDMA(rp->tx_channel);
+ ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
+ if ((ptype == RCR_PKT_TYPE_TCP ||
+ ptype == RCR_PKT_TYPE_UDP) &&
+ !(val & (RCR_ENTRY_NOPORT |
+ RCR_ENTRY_ERROR)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ if (!(val & RCR_ENTRY_MULTI))
+ append_size = len - skb->len;
- if (parent->ldg_map[ldn] != ldg)
- continue;
+ niu_rx_skb_append(skb, page, off, append_size);
+ if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ *link = (struct page *) page->mapping;
+ np->ops->unmap_page(np->device, page->index,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+ rp->rbr_refill_pending++;
+ } else
+ get_page(page);
- nw64(LD_IM0(ldn), LD_IM0_MASK);
- if (tx_vec & (1 << rp->tx_channel))
- niu_txchan_intr(np, rp, ldn);
- }
-}
+ index = NEXT_RCR(rp, index);
+ if (!(val & RCR_ENTRY_MULTI))
+ break;
-static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
- u64 v0, u64 v1, u64 v2)
-{
- if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
- lp->v0 = v0;
- lp->v1 = v1;
- lp->v2 = v2;
- __niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(np->dev, &lp->napi);
}
+ rp->rcr_index = index;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
+
+ rp->rx_packets++;
+ rp->rx_bytes += skb->len;
+
+ skb->protocol = eth_type_trans(skb, np->dev);
+ skb_record_rx_queue(skb, rp->rx_channel);
+ napi_gro_receive(napi, skb);
+
+ return num_rcr;
}
-static irqreturn_t niu_interrupt(int irq, void *dev_id)
+static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
{
- struct niu_ldg *lp = dev_id;
- struct niu *np = lp->np;
- int ldg = lp->ldg_num;
- unsigned long flags;
- u64 v0, v1, v2;
+ int blocks_per_page = rp->rbr_blocks_per_page;
+ int err, index = rp->rbr_index;
- if (netif_msg_intr(np))
- printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
- lp, ldg);
+ err = 0;
+ while (index < (rp->rbr_table_size - blocks_per_page)) {
+ err = niu_rbr_add_page(np, rp, mask, index);
+ if (err)
+ break;
- spin_lock_irqsave(&np->lock, flags);
+ index += blocks_per_page;
+ }
- v0 = nr64(LDSV0(ldg));
- v1 = nr64(LDSV1(ldg));
- v2 = nr64(LDSV2(ldg));
+ rp->rbr_index = index;
+ return err;
+}
- if (netif_msg_intr(np))
- printk("v0[%llx] v1[%llx] v2[%llx]\n",
- (unsigned long long) v0,
- (unsigned long long) v1,
- (unsigned long long) v2);
+static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
+{
+ int i;
- if (unlikely(!v0 && !v1 && !v2)) {
- spin_unlock_irqrestore(&np->lock, flags);
- return IRQ_NONE;
- }
+ for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
+ struct page *page;
- if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
- int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
- if (err)
- goto out;
+ page = rp->rxhash[i];
+ while (page) {
+ struct page *next = (struct page *) page->mapping;
+ u64 base = page->index;
+
+ np->ops->unmap_page(np->device, base, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+
+ __free_page(page);
+
+ page = next;
+ }
}
- if (likely(v0 & ~((u64)1 << LDN_MIF)))
- niu_schedule_napi(np, lp, v0, v1, v2);
- else
- niu_ldg_rearm(np, lp, 1);
-out:
- spin_unlock_irqrestore(&np->lock, flags);
- return IRQ_HANDLED;
+ for (i = 0; i < rp->rbr_table_size; i++)
+ rp->rbr[i] = cpu_to_le32(0);
+ rp->rbr_index = 0;
}
-static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
+static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
{
- if (rp->mbox) {
- np->ops->free_coherent(np->device,
- sizeof(struct rxdma_mailbox),
- rp->mbox, rp->mbox_dma);
- rp->mbox = NULL;
- }
- if (rp->rcr) {
- np->ops->free_coherent(np->device,
- MAX_RCR_RING_SIZE * sizeof(__le64),
- rp->rcr, rp->rcr_dma);
- rp->rcr = NULL;
- rp->rcr_table_size = 0;
- rp->rcr_index = 0;
- }
- if (rp->rbr) {
- niu_rbr_free(np, rp);
+ struct tx_buff_info *tb = &rp->tx_buffs[idx];
+ struct sk_buff *skb = tb->skb;
+ struct tx_pkt_hdr *tp;
+ u64 tx_flags;
+ int i, len;
- np->ops->free_coherent(np->device,
- MAX_RBR_RING_SIZE * sizeof(__le32),
- rp->rbr, rp->rbr_dma);
- rp->rbr = NULL;
- rp->rbr_table_size = 0;
- rp->rbr_index = 0;
+ tp = (struct tx_pkt_hdr *) skb->data;
+ tx_flags = le64_to_cpup(&tp->flags);
+
+ rp->tx_packets++;
+ rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
+ ((tx_flags & TXHDR_PAD) / 2));
+
+ len = skb_headlen(skb);
+ np->ops->unmap_single(np->device, tb->mapping,
+ len, DMA_TO_DEVICE);
+
+ if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
+ rp->mark_pending--;
+
+ tb->skb = NULL;
+ do {
+ idx = NEXT_TX(rp, idx);
+ len -= MAX_TX_DESC_LEN;
+ } while (len > 0);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ tb = &rp->tx_buffs[idx];
+ BUG_ON(tb->skb != NULL);
+ np->ops->unmap_page(np->device, tb->mapping,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
+ idx = NEXT_TX(rp, idx);
}
- kfree(rp->rxhash);
- rp->rxhash = NULL;
+
+ dev_kfree_skb(skb);
+
+ return idx;
}
-static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
+#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
+
+static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
- if (rp->mbox) {
- np->ops->free_coherent(np->device,
- sizeof(struct txdma_mailbox),
- rp->mbox, rp->mbox_dma);
- rp->mbox = NULL;
- }
- if (rp->descr) {
- int i;
+ struct netdev_queue *txq;
+ u16 pkt_cnt, tmp;
+ int cons, index;
+ u64 cs;
- for (i = 0; i < MAX_TX_RING_SIZE; i++) {
- if (rp->tx_buffs[i].skb)
- (void) release_tx_packet(np, rp, i);
- }
+ index = (rp - np->tx_rings);
+ txq = netdev_get_tx_queue(np->dev, index);
- np->ops->free_coherent(np->device,
- MAX_TX_RING_SIZE * sizeof(__le64),
- rp->descr, rp->descr_dma);
- rp->descr = NULL;
- rp->pending = 0;
- rp->prod = 0;
- rp->cons = 0;
- rp->wrap_bit = 0;
+ cs = rp->tx_cs;
+ if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
+ goto out;
+
+ tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
+ pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
+ (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
+
+ rp->last_pkt_cnt = tmp;
+
+ cons = rp->cons;
+
+ niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
+ np->dev->name, pkt_cnt, cons);
+
+ while (pkt_cnt--)
+ cons = release_tx_packet(np, rp, cons);
+
+ rp->cons = cons;
+ smp_mb();
+
+out:
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
}
-static void niu_free_channels(struct niu *np)
+static inline void niu_sync_rx_discard_stats(struct niu *np,
+ struct rx_ring_info *rp,
+ const int limit)
{
- int i;
+ /* This elaborate scheme is needed for reading the RX discard
+ * counters, as they are only 16-bit and can overflow quickly,
+ * and because the overflow indication bit is not usable as
+ * the counter value does not wrap, but remains at max value
+ * 0xFFFF.
+ *
+ * In theory and in practice counters can be lost in between
+ * reading nr64() and clearing the counter nw64(). For this
+ * reason, the number of counter clearings nw64() is
+ * limited/reduced though the limit parameter.
+ */
+ int rx_channel = rp->rx_channel;
+ u32 misc, wred;
- if (np->rx_rings) {
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ /* RXMISC (Receive Miscellaneous Discard Count), covers the
+ * following discard events: IPP (Input Port Process),
+ * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
+ * Block Ring) prefetch buffer is empty.
+ */
+ misc = nr64(RXMISC(rx_channel));
+ if (unlikely((misc & RXMISC_COUNT) > limit)) {
+ nw64(RXMISC(rx_channel), 0);
+ rp->rx_errors += misc & RXMISC_COUNT;
- niu_free_rx_ring_info(np, rp);
- }
- kfree(np->rx_rings);
- np->rx_rings = NULL;
- np->num_rx_rings = 0;
+ if (unlikely(misc & RXMISC_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "RXMISC discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
+ np->dev->name, rx_channel, misc, misc-limit);
}
- if (np->tx_rings) {
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ /* WRED (Weighted Random Early Discard) by hardware */
+ wred = nr64(RED_DIS_CNT(rx_channel));
+ if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
+ nw64(RED_DIS_CNT(rx_channel), 0);
+ rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
- niu_free_tx_ring_info(np, rp);
- }
- kfree(np->tx_rings);
- np->tx_rings = NULL;
- np->num_tx_rings = 0;
+ if (unlikely(wred & RED_DIS_CNT_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "WRED discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
+ np->dev->name, rx_channel, wred, wred-limit);
}
}
-static int niu_alloc_rx_ring_info(struct niu *np,
- struct rx_ring_info *rp)
+static int niu_rx_work(struct napi_struct *napi, struct niu *np,
+ struct rx_ring_info *rp, int budget)
{
- BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
+ int qlen, rcr_done = 0, work_done = 0;
+ struct rxdma_mailbox *mbox = rp->mbox;
+ u64 stat;
- rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
- GFP_KERNEL);
- if (!rp->rxhash)
- return -ENOMEM;
+#if 1
+ stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+ qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
+#else
+ stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+ qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
+#endif
+ mbox->rx_dma_ctl_stat = 0;
+ mbox->rcrstat_a = 0;
- rp->mbox = np->ops->alloc_coherent(np->device,
- sizeof(struct rxdma_mailbox),
- &rp->mbox_dma, GFP_KERNEL);
- if (!rp->mbox)
- return -ENOMEM;
- if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
- return -EINVAL;
+ niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
+ np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+
+ rcr_done = work_done = 0;
+ qlen = min(qlen, budget);
+ while (work_done < qlen) {
+ rcr_done += niu_process_rx_pkt(napi, np, rp);
+ work_done++;
}
- rp->rcr = np->ops->alloc_coherent(np->device,
- MAX_RCR_RING_SIZE * sizeof(__le64),
- &rp->rcr_dma, GFP_KERNEL);
- if (!rp->rcr)
- return -ENOMEM;
- if ((unsigned long)rp->rcr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
- return -EINVAL;
+ if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
+ unsigned int i;
+
+ for (i = 0; i < rp->rbr_refill_pending; i++)
+ niu_rbr_refill(np, rp, GFP_ATOMIC);
+ rp->rbr_refill_pending = 0;
}
- rp->rcr_table_size = MAX_RCR_RING_SIZE;
- rp->rcr_index = 0;
- rp->rbr = np->ops->alloc_coherent(np->device,
- MAX_RBR_RING_SIZE * sizeof(__le32),
- &rp->rbr_dma, GFP_KERNEL);
- if (!rp->rbr)
+ stat = (RX_DMA_CTL_STAT_MEX |
+ ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
+ ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
+
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+
+ /* Only sync discards stats when qlen indicate potential for drops */
+ if (qlen > 10)
+ niu_sync_rx_discard_stats(np, rp, 0x7FFF);
+
+ return work_done;
+}
+
+static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
+{
+ u64 v0 = lp->v0;
+ u32 tx_vec = (v0 >> 32);
+ u32 rx_vec = (v0 & 0xffffffff);
+ int i, work_done = 0;
+
+ niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
+ np->dev->name, (unsigned long long) v0);
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+ if (tx_vec & (1 << rp->tx_channel))
+ niu_tx_work(np, rp);
+ nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
+ }
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ if (rx_vec & (1 << rp->rx_channel)) {
+ int this_work_done;
+
+ this_work_done = niu_rx_work(&lp->napi, np, rp,
+ budget);
+
+ budget -= this_work_done;
+ work_done += this_work_done;
+ }
+ nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
+ }
+
+ return work_done;
+}
+
+static int niu_poll(struct napi_struct *napi, int budget)
+{
+ struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
+ struct niu *np = lp->np;
+ int work_done;
+
+ work_done = niu_poll_core(np, lp, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ niu_ldg_rearm(np, lp, 1);
+ }
+ return work_done;
+}
+
+static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
+ u64 stat)
+{
+ dev_err(np->device, PFX "%s: RX channel %u errors ( ",
+ np->dev->name, rp->rx_channel);
+
+ if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
+ printk("RBR_TMOUT ");
+ if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
+ printk("RSP_CNT ");
+ if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
+ printk("BYTE_EN_BUS ");
+ if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
+ printk("RSP_DAT ");
+ if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
+ printk("RCR_ACK ");
+ if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
+ printk("RCR_SHA_PAR ");
+ if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
+ printk("RBR_PRE_PAR ");
+ if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
+ printk("CONFIG ");
+ if (stat & RX_DMA_CTL_STAT_RCRINCON)
+ printk("RCRINCON ");
+ if (stat & RX_DMA_CTL_STAT_RCRFULL)
+ printk("RCRFULL ");
+ if (stat & RX_DMA_CTL_STAT_RBRFULL)
+ printk("RBRFULL ");
+ if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
+ printk("RBRLOGPAGE ");
+ if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
+ printk("CFIGLOGPAGE ");
+ if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
+ printk("DC_FIDO ");
+
+ printk(")\n");
+}
+
+static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
+{
+ u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+ int err = 0;
+
+
+ if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
+ RX_DMA_CTL_STAT_PORT_FATAL))
+ err = -EINVAL;
+
+ if (err) {
+ dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
+ np->dev->name, rp->rx_channel,
+ (unsigned long long) stat);
+
+ niu_log_rxchan_errors(np, rp, stat);
+ }
+
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+ stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
+
+ return err;
+}
+
+static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
+ u64 cs)
+{
+ dev_err(np->device, PFX "%s: TX channel %u errors ( ",
+ np->dev->name, rp->tx_channel);
+
+ if (cs & TX_CS_MBOX_ERR)
+ printk("MBOX ");
+ if (cs & TX_CS_PKT_SIZE_ERR)
+ printk("PKT_SIZE ");
+ if (cs & TX_CS_TX_RING_OFLOW)
+ printk("TX_RING_OFLOW ");
+ if (cs & TX_CS_PREF_BUF_PAR_ERR)
+ printk("PREF_BUF_PAR ");
+ if (cs & TX_CS_NACK_PREF)
+ printk("NACK_PREF ");
+ if (cs & TX_CS_NACK_PKT_RD)
+ printk("NACK_PKT_RD ");
+ if (cs & TX_CS_CONF_PART_ERR)
+ printk("CONF_PART ");
+ if (cs & TX_CS_PKT_PRT_ERR)
+ printk("PKT_PTR ");
+
+ printk(")\n");
+}
+
+static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
+{
+ u64 cs, logh, logl;
+
+ cs = nr64(TX_CS(rp->tx_channel));
+ logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
+ logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
+
+ dev_err(np->device, PFX "%s: TX channel %u error, "
+ "cs[%llx] logh[%llx] logl[%llx]\n",
+ np->dev->name, rp->tx_channel,
+ (unsigned long long) cs,
+ (unsigned long long) logh,
+ (unsigned long long) logl);
+
+ niu_log_txchan_errors(np, rp, cs);
+
+ return -ENODEV;
+}
+
+static int niu_mif_interrupt(struct niu *np)
+{
+ u64 mif_status = nr64(MIF_STATUS);
+ int phy_mdint = 0;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
+
+ if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
+ phy_mdint = 1;
+ }
+
+ dev_err(np->device, PFX "%s: MIF interrupt, "
+ "stat[%llx] phy_mdint(%d)\n",
+ np->dev->name, (unsigned long long) mif_status, phy_mdint);
+
+ return -ENODEV;
+}
+
+static void niu_xmac_interrupt(struct niu *np)
+{
+ struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+ u64 val;
+
+ val = nr64_mac(XTXMAC_STATUS);
+ if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
+ mp->tx_frames += TXMAC_FRM_CNT_COUNT;
+ if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
+ mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
+ if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
+ mp->tx_fifo_errors++;
+ if (val & XTXMAC_STATUS_TXMAC_OFLOW)
+ mp->tx_overflow_errors++;
+ if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
+ mp->tx_max_pkt_size_errors++;
+ if (val & XTXMAC_STATUS_TXMAC_UFLOW)
+ mp->tx_underflow_errors++;
+
+ val = nr64_mac(XRXMAC_STATUS);
+ if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
+ mp->rx_local_faults++;
+ if (val & XRXMAC_STATUS_RFLT_DET)
+ mp->rx_remote_faults++;
+ if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
+ mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
+ if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
+ mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
+ mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
+ mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+ mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+ mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
+ mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
+ mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
+ mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
+ mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
+ mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
+ mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
+ mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
+ if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
+ mp->rx_octets += RXMAC_BT_CNT_COUNT;
+ if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
+ mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
+ if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
+ mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
+ if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
+ mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXUFLOW)
+ mp->rx_underflows++;
+ if (val & XRXMAC_STATUS_RXOFLOW)
+ mp->rx_overflows++;
+
+ val = nr64_mac(XMAC_FC_STAT);
+ if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
+ mp->pause_off_state++;
+ if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
+ mp->pause_on_state++;
+ if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
+ mp->pause_received++;
+}
+
+static void niu_bmac_interrupt(struct niu *np)
+{
+ struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+ u64 val;
+
+ val = nr64_mac(BTXMAC_STATUS);
+ if (val & BTXMAC_STATUS_UNDERRUN)
+ mp->tx_underflow_errors++;
+ if (val & BTXMAC_STATUS_MAX_PKT_ERR)
+ mp->tx_max_pkt_size_errors++;
+ if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
+ mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
+ if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
+ mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
+
+ val = nr64_mac(BRXMAC_STATUS);
+ if (val & BRXMAC_STATUS_OVERFLOW)
+ mp->rx_overflows++;
+ if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
+ mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
+ if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
+ mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & BRXMAC_STATUS_CRC_ERR_EXP)
+ mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & BRXMAC_STATUS_LEN_ERR_EXP)
+ mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
+
+ val = nr64_mac(BMAC_CTRL_STATUS);
+ if (val & BMAC_CTRL_STATUS_NOPAUSE)
+ mp->pause_off_state++;
+ if (val & BMAC_CTRL_STATUS_PAUSE)
+ mp->pause_on_state++;
+ if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
+ mp->pause_received++;
+}
+
+static int niu_mac_interrupt(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_xmac_interrupt(np);
+ else
+ niu_bmac_interrupt(np);
+
+ return 0;
+}
+
+static void niu_log_device_error(struct niu *np, u64 stat)
+{
+ dev_err(np->device, PFX "%s: Core device errors ( ",
+ np->dev->name);
+
+ if (stat & SYS_ERR_MASK_META2)
+ printk("META2 ");
+ if (stat & SYS_ERR_MASK_META1)
+ printk("META1 ");
+ if (stat & SYS_ERR_MASK_PEU)
+ printk("PEU ");
+ if (stat & SYS_ERR_MASK_TXC)
+ printk("TXC ");
+ if (stat & SYS_ERR_MASK_RDMC)
+ printk("RDMC ");
+ if (stat & SYS_ERR_MASK_TDMC)
+ printk("TDMC ");
+ if (stat & SYS_ERR_MASK_ZCP)
+ printk("ZCP ");
+ if (stat & SYS_ERR_MASK_FFLP)
+ printk("FFLP ");
+ if (stat & SYS_ERR_MASK_IPP)
+ printk("IPP ");
+ if (stat & SYS_ERR_MASK_MAC)
+ printk("MAC ");
+ if (stat & SYS_ERR_MASK_SMX)
+ printk("SMX ");
+
+ printk(")\n");
+}
+
+static int niu_device_error(struct niu *np)
+{
+ u64 stat = nr64(SYS_ERR_STAT);
+
+ dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
+ np->dev->name, (unsigned long long) stat);
+
+ niu_log_device_error(np, stat);
+
+ return -ENODEV;
+}
+
+static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
+ u64 v0, u64 v1, u64 v2)
+{
+
+ int i, err = 0;
+
+ lp->v0 = v0;
+ lp->v1 = v1;
+ lp->v2 = v2;
+
+ if (v1 & 0x00000000ffffffffULL) {
+ u32 rx_vec = (v1 & 0xffffffff);
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ if (rx_vec & (1 << rp->rx_channel)) {
+ int r = niu_rx_error(np, rp);
+ if (r) {
+ err = r;
+ } else {
+ if (!v0)
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+ RX_DMA_CTL_STAT_MEX);
+ }
+ }
+ }
+ }
+ if (v1 & 0x7fffffff00000000ULL) {
+ u32 tx_vec = (v1 >> 32) & 0x7fffffff;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ if (tx_vec & (1 << rp->tx_channel)) {
+ int r = niu_tx_error(np, rp);
+ if (r)
+ err = r;
+ }
+ }
+ }
+ if ((v0 | v1) & 0x8000000000000000ULL) {
+ int r = niu_mif_interrupt(np);
+ if (r)
+ err = r;
+ }
+ if (v2) {
+ if (v2 & 0x01ef) {
+ int r = niu_mac_interrupt(np);
+ if (r)
+ err = r;
+ }
+ if (v2 & 0x0210) {
+ int r = niu_device_error(np);
+ if (r)
+ err = r;
+ }
+ }
+
+ if (err)
+ niu_enable_interrupts(np, 0);
+
+ return err;
+}
+
+static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
+ int ldn)
+{
+ struct rxdma_mailbox *mbox = rp->mbox;
+ u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+
+ stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
+ RX_DMA_CTL_STAT_RCRTO);
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
+
+ niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
+ np->dev->name, (unsigned long long) stat);
+}
+
+static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
+ int ldn)
+{
+ rp->tx_cs = nr64(TX_CS(rp->tx_channel));
+
+ niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
+ np->dev->name, (unsigned long long) rp->tx_cs);
+}
+
+static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
+{
+ struct niu_parent *parent = np->parent;
+ u32 rx_vec, tx_vec;
+ int i;
+
+ tx_vec = (v0 >> 32);
+ rx_vec = (v0 & 0xffffffff);
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+ int ldn = LDN_RXDMA(rp->rx_channel);
+
+ if (parent->ldg_map[ldn] != ldg)
+ continue;
+
+ nw64(LD_IM0(ldn), LD_IM0_MASK);
+ if (rx_vec & (1 << rp->rx_channel))
+ niu_rxchan_intr(np, rp, ldn);
+ }
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+ int ldn = LDN_TXDMA(rp->tx_channel);
+
+ if (parent->ldg_map[ldn] != ldg)
+ continue;
+
+ nw64(LD_IM0(ldn), LD_IM0_MASK);
+ if (tx_vec & (1 << rp->tx_channel))
+ niu_txchan_intr(np, rp, ldn);
+ }
+}
+
+static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
+ u64 v0, u64 v1, u64 v2)
+{
+ if (likely(napi_schedule_prep(&lp->napi))) {
+ lp->v0 = v0;
+ lp->v1 = v1;
+ lp->v2 = v2;
+ __niu_fastpath_interrupt(np, lp->ldg_num, v0);
+ __napi_schedule(&lp->napi);
+ }
+}
+
+static irqreturn_t niu_interrupt(int irq, void *dev_id)
+{
+ struct niu_ldg *lp = dev_id;
+ struct niu *np = lp->np;
+ int ldg = lp->ldg_num;
+ unsigned long flags;
+ u64 v0, v1, v2;
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
+ lp, ldg);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ v0 = nr64(LDSV0(ldg));
+ v1 = nr64(LDSV1(ldg));
+ v2 = nr64(LDSV2(ldg));
+
+ if (netif_msg_intr(np))
+ printk("v0[%llx] v1[%llx] v2[%llx]\n",
+ (unsigned long long) v0,
+ (unsigned long long) v1,
+ (unsigned long long) v2);
+
+ if (unlikely(!v0 && !v1 && !v2)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
+ int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
+ if (err)
+ goto out;
+ }
+ if (likely(v0 & ~((u64)1 << LDN_MIF)))
+ niu_schedule_napi(np, lp, v0, v1, v2);
+ else
+ niu_ldg_rearm(np, lp, 1);
+out:
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
+{
+ if (rp->mbox) {
+ np->ops->free_coherent(np->device,
+ sizeof(struct rxdma_mailbox),
+ rp->mbox, rp->mbox_dma);
+ rp->mbox = NULL;
+ }
+ if (rp->rcr) {
+ np->ops->free_coherent(np->device,
+ MAX_RCR_RING_SIZE * sizeof(__le64),
+ rp->rcr, rp->rcr_dma);
+ rp->rcr = NULL;
+ rp->rcr_table_size = 0;
+ rp->rcr_index = 0;
+ }
+ if (rp->rbr) {
+ niu_rbr_free(np, rp);
+
+ np->ops->free_coherent(np->device,
+ MAX_RBR_RING_SIZE * sizeof(__le32),
+ rp->rbr, rp->rbr_dma);
+ rp->rbr = NULL;
+ rp->rbr_table_size = 0;
+ rp->rbr_index = 0;
+ }
+ kfree(rp->rxhash);
+ rp->rxhash = NULL;
+}
+
+static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
+{
+ if (rp->mbox) {
+ np->ops->free_coherent(np->device,
+ sizeof(struct txdma_mailbox),
+ rp->mbox, rp->mbox_dma);
+ rp->mbox = NULL;
+ }
+ if (rp->descr) {
+ int i;
+
+ for (i = 0; i < MAX_TX_RING_SIZE; i++) {
+ if (rp->tx_buffs[i].skb)
+ (void) release_tx_packet(np, rp, i);
+ }
+
+ np->ops->free_coherent(np->device,
+ MAX_TX_RING_SIZE * sizeof(__le64),
+ rp->descr, rp->descr_dma);
+ rp->descr = NULL;
+ rp->pending = 0;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+ }
+}
+
+static void niu_free_channels(struct niu *np)
+{
+ int i;
+
+ if (np->rx_rings) {
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_free_rx_ring_info(np, rp);
+ }
+ kfree(np->rx_rings);
+ np->rx_rings = NULL;
+ np->num_rx_rings = 0;
+ }
+
+ if (np->tx_rings) {
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ niu_free_tx_ring_info(np, rp);
+ }
+ kfree(np->tx_rings);
+ np->tx_rings = NULL;
+ np->num_tx_rings = 0;
+ }
+}
+
+static int niu_alloc_rx_ring_info(struct niu *np,
+ struct rx_ring_info *rp)
+{
+ BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
+
+ rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!rp->rxhash)
+ return -ENOMEM;
+
+ rp->mbox = np->ops->alloc_coherent(np->device,
+ sizeof(struct rxdma_mailbox),
+ &rp->mbox_dma, GFP_KERNEL);
+ if (!rp->mbox)
+ return -ENOMEM;
+ if ((unsigned long)rp->mbox & (64UL - 1)) {
+ dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
+ "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ return -EINVAL;
+ }
+
+ rp->rcr = np->ops->alloc_coherent(np->device,
+ MAX_RCR_RING_SIZE * sizeof(__le64),
+ &rp->rcr_dma, GFP_KERNEL);
+ if (!rp->rcr)
+ return -ENOMEM;
+ if ((unsigned long)rp->rcr & (64UL - 1)) {
+ dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
+ "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
+ return -EINVAL;
+ }
+ rp->rcr_table_size = MAX_RCR_RING_SIZE;
+ rp->rcr_index = 0;
+
+ rp->rbr = np->ops->alloc_coherent(np->device,
+ MAX_RBR_RING_SIZE * sizeof(__le32),
+ &rp->rbr_dma, GFP_KERNEL);
+ if (!rp->rbr)
return -ENOMEM;
if ((unsigned long)rp->rbr & (64UL - 1)) {
dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
"RXDMA RBR table %p\n", np->dev->name, rp->rbr);
return -EINVAL;
}
- rp->rbr_table_size = MAX_RBR_RING_SIZE;
- rp->rbr_index = 0;
- rp->rbr_pending = 0;
+ rp->rbr_table_size = MAX_RBR_RING_SIZE;
+ rp->rbr_index = 0;
+ rp->rbr_pending = 0;
+
+ return 0;
+}
+
+static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
+{
+ int mtu = np->dev->mtu;
+
+ /* These values are recommended by the HW designers for fair
+ * utilization of DRR amongst the rings.
+ */
+ rp->max_burst = mtu + 32;
+ if (rp->max_burst > 4096)
+ rp->max_burst = 4096;
+}
+
+static int niu_alloc_tx_ring_info(struct niu *np,
+ struct tx_ring_info *rp)
+{
+ BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
+
+ rp->mbox = np->ops->alloc_coherent(np->device,
+ sizeof(struct txdma_mailbox),
+ &rp->mbox_dma, GFP_KERNEL);
+ if (!rp->mbox)
+ return -ENOMEM;
+ if ((unsigned long)rp->mbox & (64UL - 1)) {
+ dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
+ "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ return -EINVAL;
+ }
+
+ rp->descr = np->ops->alloc_coherent(np->device,
+ MAX_TX_RING_SIZE * sizeof(__le64),
+ &rp->descr_dma, GFP_KERNEL);
+ if (!rp->descr)
+ return -ENOMEM;
+ if ((unsigned long)rp->descr & (64UL - 1)) {
+ dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
+ "TXDMA descr table %p\n", np->dev->name, rp->descr);
+ return -EINVAL;
+ }
+
+ rp->pending = MAX_TX_RING_SIZE;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+
+ /* XXX make these configurable... XXX */
+ rp->mark_freq = rp->pending / 4;
+
+ niu_set_max_burst(np, rp);
+
+ return 0;
+}
+
+static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
+{
+ u16 bss;
+
+ bss = min(PAGE_SHIFT, 15);
+
+ rp->rbr_block_size = 1 << bss;
+ rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
+
+ rp->rbr_sizes[0] = 256;
+ rp->rbr_sizes[1] = 1024;
+ if (np->dev->mtu > ETH_DATA_LEN) {
+ switch (PAGE_SIZE) {
+ case 4 * 1024:
+ rp->rbr_sizes[2] = 4096;
+ break;
+
+ default:
+ rp->rbr_sizes[2] = 8192;
+ break;
+ }
+ } else {
+ rp->rbr_sizes[2] = 2048;
+ }
+ rp->rbr_sizes[3] = rp->rbr_block_size;
+}
+
+static int niu_alloc_channels(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ int first_rx_channel, first_tx_channel;
+ int i, port, err;
+
+ port = np->port;
+ first_rx_channel = first_tx_channel = 0;
+ for (i = 0; i < port; i++) {
+ first_rx_channel += parent->rxchan_per_port[i];
+ first_tx_channel += parent->txchan_per_port[i];
+ }
+
+ np->num_rx_rings = parent->rxchan_per_port[port];
+ np->num_tx_rings = parent->txchan_per_port[port];
+
+ np->dev->real_num_tx_queues = np->num_tx_rings;
+
+ np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np->rx_rings)
+ goto out_err;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ rp->np = np;
+ rp->rx_channel = first_rx_channel + i;
+
+ err = niu_alloc_rx_ring_info(np, rp);
+ if (err)
+ goto out_err;
+
+ niu_size_rbr(np, rp);
+
+ /* XXX better defaults, configurable, etc... XXX */
+ rp->nonsyn_window = 64;
+ rp->nonsyn_threshold = rp->rcr_table_size - 64;
+ rp->syn_window = 64;
+ rp->syn_threshold = rp->rcr_table_size - 64;
+ rp->rcr_pkt_threshold = 16;
+ rp->rcr_timeout = 8;
+ rp->rbr_kick_thresh = RBR_REFILL_MIN;
+ if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
+ rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
+
+ err = niu_rbr_fill(np, rp, GFP_KERNEL);
+ if (err)
+ return err;
+ }
+
+ np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np->tx_rings)
+ goto out_err;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ rp->np = np;
+ rp->tx_channel = first_tx_channel + i;
+
+ err = niu_alloc_tx_ring_info(np, rp);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ niu_free_channels(np);
+ return err;
+}
+
+static int niu_tx_cs_sng_poll(struct niu *np, int channel)
+{
+ int limit = 1000;
+
+ while (--limit > 0) {
+ u64 val = nr64(TX_CS(channel));
+ if (val & TX_CS_SNG_STATE)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int niu_tx_channel_stop(struct niu *np, int channel)
+{
+ u64 val = nr64(TX_CS(channel));
+
+ val |= TX_CS_STOP_N_GO;
+ nw64(TX_CS(channel), val);
+
+ return niu_tx_cs_sng_poll(np, channel);
+}
+
+static int niu_tx_cs_reset_poll(struct niu *np, int channel)
+{
+ int limit = 1000;
+
+ while (--limit > 0) {
+ u64 val = nr64(TX_CS(channel));
+ if (!(val & TX_CS_RST))
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int niu_tx_channel_reset(struct niu *np, int channel)
+{
+ u64 val = nr64(TX_CS(channel));
+ int err;
+
+ val |= TX_CS_RST;
+ nw64(TX_CS(channel), val);
+
+ err = niu_tx_cs_reset_poll(np, channel);
+ if (!err)
+ nw64(TX_RING_KICK(channel), 0);
+
+ return err;
+}
+
+static int niu_tx_channel_lpage_init(struct niu *np, int channel)
+{
+ u64 val;
+
+ nw64(TX_LOG_MASK1(channel), 0);
+ nw64(TX_LOG_VAL1(channel), 0);
+ nw64(TX_LOG_MASK2(channel), 0);
+ nw64(TX_LOG_VAL2(channel), 0);
+ nw64(TX_LOG_PAGE_RELO1(channel), 0);
+ nw64(TX_LOG_PAGE_RELO2(channel), 0);
+ nw64(TX_LOG_PAGE_HDL(channel), 0);
+
+ val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
+ val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
+ nw64(TX_LOG_PAGE_VLD(channel), val);
+
+ /* XXX TXDMA 32bit mode? XXX */
+
+ return 0;
+}
+
+static void niu_txc_enable_port(struct niu *np, int on)
+{
+ unsigned long flags;
+ u64 val, mask;
+
+ niu_lock_parent(np, flags);
+ val = nr64(TXC_CONTROL);
+ mask = (u64)1 << np->port;
+ if (on) {
+ val |= TXC_CONTROL_ENABLE | mask;
+ } else {
+ val &= ~mask;
+ if ((val & ~TXC_CONTROL_ENABLE) == 0)
+ val &= ~TXC_CONTROL_ENABLE;
+ }
+ nw64(TXC_CONTROL, val);
+ niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_set_imask(struct niu *np, u64 imask)
+{
+ unsigned long flags;
+ u64 val;
+
+ niu_lock_parent(np, flags);
+ val = nr64(TXC_INT_MASK);
+ val &= ~TXC_INT_MASK_VAL(np->port);
+ val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
+ niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_port_dma_enable(struct niu *np, int on)
+{
+ u64 val = 0;
+
+ if (on) {
+ int i;
+
+ for (i = 0; i < np->num_tx_rings; i++)
+ val |= (1 << np->tx_rings[i].tx_channel);
+ }
+ nw64(TXC_PORT_DMA(np->port), val);
+}
+
+static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+ int err, channel = rp->tx_channel;
+ u64 val, ring_len;
+
+ err = niu_tx_channel_stop(np, channel);
+ if (err)
+ return err;
+
+ err = niu_tx_channel_reset(np, channel);
+ if (err)
+ return err;
+
+ err = niu_tx_channel_lpage_init(np, channel);
+ if (err)
+ return err;
+
+ nw64(TXC_DMA_MAX(channel), rp->max_burst);
+ nw64(TX_ENT_MSK(channel), 0);
+
+ if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
+ TX_RNG_CFIG_STADDR)) {
+ dev_err(np->device, PFX "%s: TX ring channel %d "
+ "DMA addr (%llx) is not aligned.\n",
+ np->dev->name, channel,
+ (unsigned long long) rp->descr_dma);
+ return -EINVAL;
+ }
+
+ /* The length field in TX_RNG_CFIG is measured in 64-byte
+ * blocks. rp->pending is the number of TX descriptors in
+ * our ring, 8 bytes each, thus we divide by 8 bytes more
+ * to get the proper value the chip wants.
+ */
+ ring_len = (rp->pending / 8);
+
+ val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
+ rp->descr_dma);
+ nw64(TX_RNG_CFIG(channel), val);
+
+ if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
+ ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
+ dev_err(np->device, PFX "%s: TX ring channel %d "
+ "MBOX addr (%llx) is has illegal bits.\n",
+ np->dev->name, channel,
+ (unsigned long long) rp->mbox_dma);
+ return -EINVAL;
+ }
+ nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
+ nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
+
+ nw64(TX_CS(channel), 0);
+
+ rp->last_pkt_cnt = 0;
+
+ return 0;
+}
+
+static void niu_init_rdc_groups(struct niu *np)
+{
+ struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
+ int i, first_table_num = tp->first_table_num;
+
+ for (i = 0; i < tp->num_tables; i++) {
+ struct rdc_table *tbl = &tp->tables[i];
+ int this_table = first_table_num + i;
+ int slot;
+
+ for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
+ nw64(RDC_TBL(this_table, slot),
+ tbl->rxdma_channel[slot]);
+ }
+
+ nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
+}
+
+static void niu_init_drr_weight(struct niu *np)
+{
+ int type = phy_decode(np->parent->port_phy, np->port);
+ u64 val;
+
+ switch (type) {
+ case PORT_TYPE_10G:
+ val = PT_DRR_WEIGHT_DEFAULT_10G;
+ break;
+
+ case PORT_TYPE_1G:
+ default:
+ val = PT_DRR_WEIGHT_DEFAULT_1G;
+ break;
+ }
+ nw64(PT_DRR_WT(np->port), val);
+}
+
+static int niu_init_hostinfo(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int i, err, num_alt = niu_num_alt_addr(np);
+ int first_rdc_table = tp->first_table_num;
+
+ err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ if (err)
+ return err;
+
+ err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_alt; i++) {
+ err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int niu_rx_channel_reset(struct niu *np, int channel)
+{
+ return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
+ RXDMA_CFIG1_RST, 1000, 10,
+ "RXDMA_CFIG1");
+}
+
+static int niu_rx_channel_lpage_init(struct niu *np, int channel)
+{
+ u64 val;
+
+ nw64(RX_LOG_MASK1(channel), 0);
+ nw64(RX_LOG_VAL1(channel), 0);
+ nw64(RX_LOG_MASK2(channel), 0);
+ nw64(RX_LOG_VAL2(channel), 0);
+ nw64(RX_LOG_PAGE_RELO1(channel), 0);
+ nw64(RX_LOG_PAGE_RELO2(channel), 0);
+ nw64(RX_LOG_PAGE_HDL(channel), 0);
+
+ val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
+ val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
+ nw64(RX_LOG_PAGE_VLD(channel), val);
+
+ return 0;
+}
+
+static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
+{
+ u64 val;
+
+ val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
+ ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
+ ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
+ ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
+ nw64(RDC_RED_PARA(rp->rx_channel), val);
+}
+
+static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
+{
+ u64 val = 0;
+
+ *ret = 0;
+ switch (rp->rbr_block_size) {
+ case 4 * 1024:
+ val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 16 * 1024:
+ val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 32 * 1024:
+ val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD2;
+ switch (rp->rbr_sizes[2]) {
+ case 2 * 1024:
+ val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 4 * 1024:
+ val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 16 * 1024:
+ val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD1;
+ switch (rp->rbr_sizes[1]) {
+ case 1 * 1024:
+ val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 2 * 1024:
+ val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 4 * 1024:
+ val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD0;
+ switch (rp->rbr_sizes[0]) {
+ case 256:
+ val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 512:
+ val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 1 * 1024:
+ val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 2 * 1024:
+ val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ret = val;
+ return 0;
+}
+
+static int niu_enable_rx_channel(struct niu *np, int channel, int on)
+{
+ u64 val = nr64(RXDMA_CFIG1(channel));
+ int limit;
+
+ if (on)
+ val |= RXDMA_CFIG1_EN;
+ else
+ val &= ~RXDMA_CFIG1_EN;
+ nw64(RXDMA_CFIG1(channel), val);
+
+ limit = 1000;
+ while (--limit > 0) {
+ if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
+ break;
+ udelay(10);
+ }
+ if (limit <= 0)
+ return -ENODEV;
+ return 0;
+}
+
+static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+ int err, channel = rp->rx_channel;
+ u64 val;
+
+ err = niu_rx_channel_reset(np, channel);
+ if (err)
+ return err;
+
+ err = niu_rx_channel_lpage_init(np, channel);
+ if (err)
+ return err;
+
+ niu_rx_channel_wred_init(np, rp);
+
+ nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
+ nw64(RX_DMA_CTL_STAT(channel),
+ (RX_DMA_CTL_STAT_MEX |
+ RX_DMA_CTL_STAT_RCRTHRES |
+ RX_DMA_CTL_STAT_RCRTO |
+ RX_DMA_CTL_STAT_RBR_EMPTY));
+ nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
+ nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
+ nw64(RBR_CFIG_A(channel),
+ ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
+ (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
+ err = niu_compute_rbr_cfig_b(rp, &val);
+ if (err)
+ return err;
+ nw64(RBR_CFIG_B(channel), val);
+ nw64(RCRCFIG_A(channel),
+ ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
+ (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
+ nw64(RCRCFIG_B(channel),
+ ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
+ RCRCFIG_B_ENTOUT |
+ ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
+
+ err = niu_enable_rx_channel(np, channel, 1);
+ if (err)
+ return err;
+
+ nw64(RBR_KICK(channel), rp->rbr_index);
+
+ val = nr64(RX_DMA_CTL_STAT(channel));
+ val |= RX_DMA_CTL_STAT_RBR_EMPTY;
+ nw64(RX_DMA_CTL_STAT(channel), val);
+
+ return 0;
+}
+
+static int niu_init_rx_channels(struct niu *np)
+{
+ unsigned long flags;
+ u64 seed = jiffies_64;
+ int err, i;
+
+ niu_lock_parent(np, flags);
+ nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
+ nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
+ niu_unlock_parent(np, flags);
+
+ /* XXX RXDMA 32bit mode? XXX */
+
+ niu_init_rdc_groups(np);
+ niu_init_drr_weight(np);
+
+ err = niu_init_hostinfo(np);
+ if (err)
+ return err;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ err = niu_init_one_rx_channel(np, rp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int niu_set_ip_frag_rule(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_classifier *cp = &np->clas;
+ struct niu_tcam_entry *tp;
+ int index, err;
+
+ index = cp->tcam_top;
+ tp = &parent->tcam[index];
+
+ /* Note that the noport bit is the same in both ipv4 and
+ * ipv6 format TCAM entries.
+ */
+ memset(tp, 0, sizeof(*tp));
+ tp->key[1] = TCAM_V4KEY1_NOPORT;
+ tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
+ tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+ ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
+ err = tcam_write(np, index, tp->key, tp->key_mask);
+ if (err)
+ return err;
+ err = tcam_assoc_write(np, index, tp->assoc_data);
+ if (err)
+ return err;
+ tp->valid = 1;
+ cp->tcam_valid_entries++;
return 0;
}
-static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
+static int niu_init_classifier_hw(struct niu *np)
{
- int mtu = np->dev->mtu;
+ struct niu_parent *parent = np->parent;
+ struct niu_classifier *cp = &np->clas;
+ int i, err;
- /* These values are recommended by the HW designers for fair
- * utilization of DRR amongst the rings.
- */
- rp->max_burst = mtu + 32;
- if (rp->max_burst > 4096)
- rp->max_burst = 4096;
-}
+ nw64(H1POLY, cp->h1_init);
+ nw64(H2POLY, cp->h2_init);
-static int niu_alloc_tx_ring_info(struct niu *np,
- struct tx_ring_info *rp)
-{
- BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
+ err = niu_init_hostinfo(np);
+ if (err)
+ return err;
- rp->mbox = np->ops->alloc_coherent(np->device,
- sizeof(struct txdma_mailbox),
- &rp->mbox_dma, GFP_KERNEL);
- if (!rp->mbox)
- return -ENOMEM;
- if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
- return -EINVAL;
+ for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
+ struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
+
+ vlan_tbl_write(np, i, np->port,
+ vp->vlan_pref, vp->rdc_num);
}
- rp->descr = np->ops->alloc_coherent(np->device,
- MAX_TX_RING_SIZE * sizeof(__le64),
- &rp->descr_dma, GFP_KERNEL);
- if (!rp->descr)
- return -ENOMEM;
- if ((unsigned long)rp->descr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA descr table %p\n", np->dev->name, rp->descr);
- return -EINVAL;
+ for (i = 0; i < cp->num_alt_mac_mappings; i++) {
+ struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
+
+ err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
+ ap->rdc_num, ap->mac_pref);
+ if (err)
+ return err;
}
- rp->pending = MAX_TX_RING_SIZE;
- rp->prod = 0;
- rp->cons = 0;
- rp->wrap_bit = 0;
+ for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
+ int index = i - CLASS_CODE_USER_PROG1;
- /* XXX make these configurable... XXX */
- rp->mark_freq = rp->pending / 4;
+ err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
+ if (err)
+ return err;
+ err = niu_set_flow_key(np, i, parent->flow_key[index]);
+ if (err)
+ return err;
+ }
- niu_set_max_burst(np, rp);
+ err = niu_set_ip_frag_rule(np);
+ if (err)
+ return err;
+
+ tcam_enable(np, 1);
return 0;
}
-static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
+static int niu_zcp_write(struct niu *np, int index, u64 *data)
{
- u16 bss;
-
- bss = min(PAGE_SHIFT, 15);
-
- rp->rbr_block_size = 1 << bss;
- rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
-
- rp->rbr_sizes[0] = 256;
- rp->rbr_sizes[1] = 1024;
- if (np->dev->mtu > ETH_DATA_LEN) {
- switch (PAGE_SIZE) {
- case 4 * 1024:
- rp->rbr_sizes[2] = 4096;
- break;
+ nw64(ZCP_RAM_DATA0, data[0]);
+ nw64(ZCP_RAM_DATA1, data[1]);
+ nw64(ZCP_RAM_DATA2, data[2]);
+ nw64(ZCP_RAM_DATA3, data[3]);
+ nw64(ZCP_RAM_DATA4, data[4]);
+ nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
+ nw64(ZCP_RAM_ACC,
+ (ZCP_RAM_ACC_WRITE |
+ (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+ (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
- default:
- rp->rbr_sizes[2] = 8192;
- break;
- }
- } else {
- rp->rbr_sizes[2] = 2048;
- }
- rp->rbr_sizes[3] = rp->rbr_block_size;
+ return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
}
-static int niu_alloc_channels(struct niu *np)
+static int niu_zcp_read(struct niu *np, int index, u64 *data)
{
- struct niu_parent *parent = np->parent;
- int first_rx_channel, first_tx_channel;
- int i, port, err;
+ int err;
- port = np->port;
- first_rx_channel = first_tx_channel = 0;
- for (i = 0; i < port; i++) {
- first_rx_channel += parent->rxchan_per_port[i];
- first_tx_channel += parent->txchan_per_port[i];
+ err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
+ if (err) {
+ dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
+ "ZCP_RAM_ACC[%llx]\n", np->dev->name,
+ (unsigned long long) nr64(ZCP_RAM_ACC));
+ return err;
}
- np->num_rx_rings = parent->rxchan_per_port[port];
- np->num_tx_rings = parent->txchan_per_port[port];
-
- np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
- GFP_KERNEL);
- err = -ENOMEM;
- if (!np->rx_rings)
- goto out_err;
+ nw64(ZCP_RAM_ACC,
+ (ZCP_RAM_ACC_READ |
+ (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+ (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
+ if (err) {
+ dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
+ "ZCP_RAM_ACC[%llx]\n", np->dev->name,
+ (unsigned long long) nr64(ZCP_RAM_ACC));
+ return err;
+ }
- rp->np = np;
- rp->rx_channel = first_rx_channel + i;
+ data[0] = nr64(ZCP_RAM_DATA0);
+ data[1] = nr64(ZCP_RAM_DATA1);
+ data[2] = nr64(ZCP_RAM_DATA2);
+ data[3] = nr64(ZCP_RAM_DATA3);
+ data[4] = nr64(ZCP_RAM_DATA4);
- err = niu_alloc_rx_ring_info(np, rp);
- if (err)
- goto out_err;
+ return 0;
+}
- niu_size_rbr(np, rp);
+static void niu_zcp_cfifo_reset(struct niu *np)
+{
+ u64 val = nr64(RESET_CFIFO);
- /* XXX better defaults, configurable, etc... XXX */
- rp->nonsyn_window = 64;
- rp->nonsyn_threshold = rp->rcr_table_size - 64;
- rp->syn_window = 64;
- rp->syn_threshold = rp->rcr_table_size - 64;
- rp->rcr_pkt_threshold = 16;
- rp->rcr_timeout = 8;
- rp->rbr_kick_thresh = RBR_REFILL_MIN;
- if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
- rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
+ val |= RESET_CFIFO_RST(np->port);
+ nw64(RESET_CFIFO, val);
+ udelay(10);
- err = niu_rbr_fill(np, rp, GFP_KERNEL);
- if (err)
- return err;
- }
+ val &= ~RESET_CFIFO_RST(np->port);
+ nw64(RESET_CFIFO, val);
+}
- np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
- GFP_KERNEL);
- err = -ENOMEM;
- if (!np->tx_rings)
- goto out_err;
+static int niu_init_zcp(struct niu *np)
+{
+ u64 data[5], rbuf[5];
+ int i, max, err;
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ if (np->port == 0 || np->port == 1)
+ max = ATLAS_P0_P1_CFIFO_ENTRIES;
+ else
+ max = ATLAS_P2_P3_CFIFO_ENTRIES;
+ } else
+ max = NIU_CFIFO_ENTRIES;
- rp->np = np;
- rp->tx_channel = first_tx_channel + i;
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
- err = niu_alloc_tx_ring_info(np, rp);
+ for (i = 0; i < max; i++) {
+ err = niu_zcp_write(np, i, data);
if (err)
- goto out_err;
+ return err;
+ err = niu_zcp_read(np, i, rbuf);
+ if (err)
+ return err;
}
- return 0;
+ niu_zcp_cfifo_reset(np);
+ nw64(CFIFO_ECC(np->port), 0);
+ nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
+ (void) nr64(ZCP_INT_STAT);
+ nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
-out_err:
- niu_free_channels(np);
- return err;
+ return 0;
}
-static int niu_tx_cs_sng_poll(struct niu *np, int channel)
+static void niu_ipp_write(struct niu *np, int index, u64 *data)
{
- int limit = 1000;
+ u64 val = nr64_ipp(IPP_CFIG);
- while (--limit > 0) {
- u64 val = nr64(TX_CS(channel));
- if (val & TX_CS_SNG_STATE)
- return 0;
- }
- return -ENODEV;
+ nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
+ nw64_ipp(IPP_DFIFO_WR_PTR, index);
+ nw64_ipp(IPP_DFIFO_WR0, data[0]);
+ nw64_ipp(IPP_DFIFO_WR1, data[1]);
+ nw64_ipp(IPP_DFIFO_WR2, data[2]);
+ nw64_ipp(IPP_DFIFO_WR3, data[3]);
+ nw64_ipp(IPP_DFIFO_WR4, data[4]);
+ nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
}
-static int niu_tx_channel_stop(struct niu *np, int channel)
+static void niu_ipp_read(struct niu *np, int index, u64 *data)
{
- u64 val = nr64(TX_CS(channel));
-
- val |= TX_CS_STOP_N_GO;
- nw64(TX_CS(channel), val);
-
- return niu_tx_cs_sng_poll(np, channel);
+ nw64_ipp(IPP_DFIFO_RD_PTR, index);
+ data[0] = nr64_ipp(IPP_DFIFO_RD0);
+ data[1] = nr64_ipp(IPP_DFIFO_RD1);
+ data[2] = nr64_ipp(IPP_DFIFO_RD2);
+ data[3] = nr64_ipp(IPP_DFIFO_RD3);
+ data[4] = nr64_ipp(IPP_DFIFO_RD4);
}
-static int niu_tx_cs_reset_poll(struct niu *np, int channel)
+static int niu_ipp_reset(struct niu *np)
{
- int limit = 1000;
-
- while (--limit > 0) {
- u64 val = nr64(TX_CS(channel));
- if (!(val & TX_CS_RST))
- return 0;
- }
- return -ENODEV;
+ return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
+ 1000, 100, "IPP_CFIG");
}
-static int niu_tx_channel_reset(struct niu *np, int channel)
+static int niu_init_ipp(struct niu *np)
{
- u64 val = nr64(TX_CS(channel));
- int err;
+ u64 data[5], rbuf[5], val;
+ int i, max, err;
- val |= TX_CS_RST;
- nw64(TX_CS(channel), val);
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ if (np->port == 0 || np->port == 1)
+ max = ATLAS_P0_P1_DFIFO_ENTRIES;
+ else
+ max = ATLAS_P2_P3_DFIFO_ENTRIES;
+ } else
+ max = NIU_DFIFO_ENTRIES;
- err = niu_tx_cs_reset_poll(np, channel);
- if (!err)
- nw64(TX_RING_KICK(channel), 0);
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
- return err;
-}
+ for (i = 0; i < max; i++) {
+ niu_ipp_write(np, i, data);
+ niu_ipp_read(np, i, rbuf);
+ }
-static int niu_tx_channel_lpage_init(struct niu *np, int channel)
-{
- u64 val;
+ (void) nr64_ipp(IPP_INT_STAT);
+ (void) nr64_ipp(IPP_INT_STAT);
- nw64(TX_LOG_MASK1(channel), 0);
- nw64(TX_LOG_VAL1(channel), 0);
- nw64(TX_LOG_MASK2(channel), 0);
- nw64(TX_LOG_VAL2(channel), 0);
- nw64(TX_LOG_PAGE_RELO1(channel), 0);
- nw64(TX_LOG_PAGE_RELO2(channel), 0);
- nw64(TX_LOG_PAGE_HDL(channel), 0);
+ err = niu_ipp_reset(np);
+ if (err)
+ return err;
- val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
- val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
- nw64(TX_LOG_PAGE_VLD(channel), val);
+ (void) nr64_ipp(IPP_PKT_DIS);
+ (void) nr64_ipp(IPP_BAD_CS_CNT);
+ (void) nr64_ipp(IPP_ECC);
- /* XXX TXDMA 32bit mode? XXX */
+ (void) nr64_ipp(IPP_INT_STAT);
+
+ nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
+
+ val = nr64_ipp(IPP_CFIG);
+ val &= ~IPP_CFIG_IP_MAX_PKT;
+ val |= (IPP_CFIG_IPP_ENABLE |
+ IPP_CFIG_DFIFO_ECC_EN |
+ IPP_CFIG_DROP_BAD_CRC |
+ IPP_CFIG_CKSUM_EN |
+ (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
+ nw64_ipp(IPP_CFIG, val);
return 0;
}
-static void niu_txc_enable_port(struct niu *np, int on)
+static void niu_handle_led(struct niu *np, int status)
{
- unsigned long flags;
- u64 val, mask;
+ u64 val;
+ val = nr64_mac(XMAC_CONFIG);
- niu_lock_parent(np, flags);
- val = nr64(TXC_CONTROL);
- mask = (u64)1 << np->port;
- if (on) {
- val |= TXC_CONTROL_ENABLE | mask;
- } else {
- val &= ~mask;
- if ((val & ~TXC_CONTROL_ENABLE) == 0)
- val &= ~TXC_CONTROL_ENABLE;
+ if ((np->flags & NIU_FLAGS_10G) != 0 &&
+ (np->flags & NIU_FLAGS_FIBER) != 0) {
+ if (status) {
+ val |= XMAC_CONFIG_LED_POLARITY;
+ val &= ~XMAC_CONFIG_FORCE_LED_ON;
+ } else {
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ }
}
- nw64(TXC_CONTROL, val);
- niu_unlock_parent(np, flags);
+
+ nw64_mac(XMAC_CONFIG, val);
}
-static void niu_txc_set_imask(struct niu *np, u64 imask)
+static void niu_init_xif_xmac(struct niu *np)
{
- unsigned long flags;
+ struct niu_link_config *lp = &np->link_config;
u64 val;
- niu_lock_parent(np, flags);
- val = nr64(TXC_INT_MASK);
- val &= ~TXC_INT_MASK_VAL(np->port);
- val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
- niu_unlock_parent(np, flags);
-}
+ if (np->flags & NIU_FLAGS_XCVR_SERDES) {
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_ATCA_GE;
+ nw64(MIF_CONFIG, val);
+ }
-static void niu_txc_port_dma_enable(struct niu *np, int on)
-{
- u64 val = 0;
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
- if (on) {
- int i;
+ val |= XMAC_CONFIG_TX_OUTPUT_EN;
- for (i = 0; i < np->num_tx_rings; i++)
- val |= (1 << np->tx_rings[i].tx_channel);
+ if (lp->loopback_mode == LOOPBACK_MAC) {
+ val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+ val |= XMAC_CONFIG_LOOPBACK;
+ } else {
+ val &= ~XMAC_CONFIG_LOOPBACK;
}
- nw64(TXC_PORT_DMA(np->port), val);
-}
-
-static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
- int err, channel = rp->tx_channel;
- u64 val, ring_len;
- err = niu_tx_channel_stop(np, channel);
- if (err)
- return err;
+ if (np->flags & NIU_FLAGS_10G) {
+ val &= ~XMAC_CONFIG_LFS_DISABLE;
+ } else {
+ val |= XMAC_CONFIG_LFS_DISABLE;
+ if (!(np->flags & NIU_FLAGS_FIBER) &&
+ !(np->flags & NIU_FLAGS_XCVR_SERDES))
+ val |= XMAC_CONFIG_1G_PCS_BYPASS;
+ else
+ val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
+ }
- err = niu_tx_channel_reset(np, channel);
- if (err)
- return err;
+ val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
- err = niu_tx_channel_lpage_init(np, channel);
- if (err)
- return err;
+ if (lp->active_speed == SPEED_100)
+ val |= XMAC_CONFIG_SEL_CLK_25MHZ;
+ else
+ val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
- nw64(TXC_DMA_MAX(channel), rp->max_burst);
- nw64(TX_ENT_MSK(channel), 0);
+ nw64_mac(XMAC_CONFIG, val);
- if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
- TX_RNG_CFIG_STADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "DMA addr (%llx) is not aligned.\n",
- np->dev->name, channel,
- (unsigned long long) rp->descr_dma);
- return -EINVAL;
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_MODE_MASK;
+ if (np->flags & NIU_FLAGS_10G) {
+ val |= XMAC_CONFIG_MODE_XGMII;
+ } else {
+ if (lp->active_speed == SPEED_1000)
+ val |= XMAC_CONFIG_MODE_GMII;
+ else
+ val |= XMAC_CONFIG_MODE_MII;
}
- /* The length field in TX_RNG_CFIG is measured in 64-byte
- * blocks. rp->pending is the number of TX descriptors in
- * our ring, 8 bytes each, thus we divide by 8 bytes more
- * to get the proper value the chip wants.
- */
- ring_len = (rp->pending / 8);
-
- val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
- rp->descr_dma);
- nw64(TX_RNG_CFIG(channel), val);
-
- if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
- ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "MBOX addr (%llx) is has illegal bits.\n",
- np->dev->name, channel,
- (unsigned long long) rp->mbox_dma);
- return -EINVAL;
- }
- nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
- nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
+ nw64_mac(XMAC_CONFIG, val);
+}
- nw64(TX_CS(channel), 0);
+static void niu_init_xif_bmac(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u64 val;
- rp->last_pkt_cnt = 0;
+ val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
- return 0;
-}
+ if (lp->loopback_mode == LOOPBACK_MAC)
+ val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
+ else
+ val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
-static void niu_init_rdc_groups(struct niu *np)
-{
- struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
- int i, first_table_num = tp->first_table_num;
+ if (lp->active_speed == SPEED_1000)
+ val |= BMAC_XIF_CONFIG_GMII_MODE;
+ else
+ val &= ~BMAC_XIF_CONFIG_GMII_MODE;
- for (i = 0; i < tp->num_tables; i++) {
- struct rdc_table *tbl = &tp->tables[i];
- int this_table = first_table_num + i;
- int slot;
+ val &= ~(BMAC_XIF_CONFIG_LINK_LED |
+ BMAC_XIF_CONFIG_LED_POLARITY);
- for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
- nw64(RDC_TBL(this_table, slot),
- tbl->rxdma_channel[slot]);
- }
+ if (!(np->flags & NIU_FLAGS_10G) &&
+ !(np->flags & NIU_FLAGS_FIBER) &&
+ lp->active_speed == SPEED_100)
+ val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
+ else
+ val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
- nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
+ nw64_mac(BMAC_XIF_CONFIG, val);
}
-static void niu_init_drr_weight(struct niu *np)
+static void niu_init_xif(struct niu *np)
{
- int type = phy_decode(np->parent->port_phy, np->port);
- u64 val;
-
- switch (type) {
- case PORT_TYPE_10G:
- val = PT_DRR_WEIGHT_DEFAULT_10G;
- break;
-
- case PORT_TYPE_1G:
- default:
- val = PT_DRR_WEIGHT_DEFAULT_1G;
- break;
- }
- nw64(PT_DRR_WT(np->port), val);
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_xif_xmac(np);
+ else
+ niu_init_xif_bmac(np);
}
-static int niu_init_hostinfo(struct niu *np)
+static void niu_pcs_mii_reset(struct niu *np)
{
- struct niu_parent *parent = np->parent;
- struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
- int i, err, num_alt = niu_num_alt_addr(np);
- int first_rdc_table = tp->first_table_num;
-
- err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
- if (err)
- return err;
-
- err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
- if (err)
- return err;
-
- for (i = 0; i < num_alt; i++) {
- err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
- if (err)
- return err;
+ int limit = 1000;
+ u64 val = nr64_pcs(PCS_MII_CTL);
+ val |= PCS_MII_CTL_RST;
+ nw64_pcs(PCS_MII_CTL, val);
+ while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
+ udelay(100);
+ val = nr64_pcs(PCS_MII_CTL);
}
-
- return 0;
}
-static int niu_rx_channel_reset(struct niu *np, int channel)
+static void niu_xpcs_reset(struct niu *np)
{
- return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
- RXDMA_CFIG1_RST, 1000, 10,
- "RXDMA_CFIG1");
+ int limit = 1000;
+ u64 val = nr64_xpcs(XPCS_CONTROL1);
+ val |= XPCS_CONTROL1_RESET;
+ nw64_xpcs(XPCS_CONTROL1, val);
+ while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
+ udelay(100);
+ val = nr64_xpcs(XPCS_CONTROL1);
+ }
}
-static int niu_rx_channel_lpage_init(struct niu *np, int channel)
+static int niu_init_pcs(struct niu *np)
{
+ struct niu_link_config *lp = &np->link_config;
u64 val;
- nw64(RX_LOG_MASK1(channel), 0);
- nw64(RX_LOG_VAL1(channel), 0);
- nw64(RX_LOG_MASK2(channel), 0);
- nw64(RX_LOG_VAL2(channel), 0);
- nw64(RX_LOG_PAGE_RELO1(channel), 0);
- nw64(RX_LOG_PAGE_RELO2(channel), 0);
- nw64(RX_LOG_PAGE_HDL(channel), 0);
-
- val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
- val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
- nw64(RX_LOG_PAGE_VLD(channel), val);
+ switch (np->flags & (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case NIU_FLAGS_FIBER:
+ /* 1G fiber */
+ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+ nw64_pcs(PCS_DPATH_MODE, 0);
+ niu_pcs_mii_reset(np);
+ break;
- return 0;
-}
+ case NIU_FLAGS_10G:
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ /* 10G SERDES */
+ if (!(np->flags & NIU_FLAGS_XMAC))
+ return -EINVAL;
-static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
-{
- u64 val;
+ /* 10G copper or fiber */
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+ nw64_mac(XMAC_CONFIG, val);
- val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
- ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
- ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
- ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
- nw64(RDC_RED_PARA(rp->rx_channel), val);
-}
+ niu_xpcs_reset(np);
-static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
-{
- u64 val = 0;
+ val = nr64_xpcs(XPCS_CONTROL1);
+ if (lp->loopback_mode == LOOPBACK_PHY)
+ val |= XPCS_CONTROL1_LOOPBACK;
+ else
+ val &= ~XPCS_CONTROL1_LOOPBACK;
+ nw64_xpcs(XPCS_CONTROL1, val);
- switch (rp->rbr_block_size) {
- case 4 * 1024:
- val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
- break;
- case 8 * 1024:
- val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
- break;
- case 16 * 1024:
- val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
- break;
- case 32 * 1024:
- val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
- break;
- default:
- return -EINVAL;
- }
- val |= RBR_CFIG_B_VLD2;
- switch (rp->rbr_sizes[2]) {
- case 2 * 1024:
- val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
- break;
- case 4 * 1024:
- val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
- break;
- case 8 * 1024:
- val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
- break;
- case 16 * 1024:
- val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
+ (void) nr64_xpcs(XPCS_SYMERR_CNT01);
+ (void) nr64_xpcs(XPCS_SYMERR_CNT23);
break;
- default:
- return -EINVAL;
- }
- val |= RBR_CFIG_B_VLD1;
- switch (rp->rbr_sizes[1]) {
- case 1 * 1024:
- val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
- break;
- case 2 * 1024:
- val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
- break;
- case 4 * 1024:
- val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
- break;
- case 8 * 1024:
- val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
- break;
- default:
- return -EINVAL;
- }
- val |= RBR_CFIG_B_VLD0;
- switch (rp->rbr_sizes[0]) {
- case 256:
- val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
- break;
- case 512:
- val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
- break;
- case 1 * 1024:
- val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
+ case NIU_FLAGS_XCVR_SERDES:
+ /* 1G SERDES */
+ niu_pcs_mii_reset(np);
+ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+ nw64_pcs(PCS_DPATH_MODE, 0);
break;
- case 2 * 1024:
- val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
+
+ case 0:
+ /* 1G copper */
+ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+ /* 1G RGMII FIBER */
+ nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
+ niu_pcs_mii_reset(np);
break;
default:
return -EINVAL;
}
- *ret = val;
return 0;
}
-static int niu_enable_rx_channel(struct niu *np, int channel, int on)
+static int niu_reset_tx_xmac(struct niu *np)
{
- u64 val = nr64(RXDMA_CFIG1(channel));
- int limit;
+ return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
+ (XTXMAC_SW_RST_REG_RS |
+ XTXMAC_SW_RST_SOFT_RST),
+ 1000, 100, "XTXMAC_SW_RST");
+}
- if (on)
- val |= RXDMA_CFIG1_EN;
- else
- val &= ~RXDMA_CFIG1_EN;
- nw64(RXDMA_CFIG1(channel), val);
+static int niu_reset_tx_bmac(struct niu *np)
+{
+ int limit;
+ nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
limit = 1000;
- while (--limit > 0) {
- if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
+ while (--limit >= 0) {
+ if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
break;
- udelay(10);
+ udelay(100);
}
- if (limit <= 0)
+ if (limit < 0) {
+ dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
+ "BTXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(BTXMAC_SW_RST));
return -ENODEV;
+ }
+
return 0;
}
-static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+static int niu_reset_tx_mac(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ return niu_reset_tx_xmac(np);
+ else
+ return niu_reset_tx_bmac(np);
+}
+
+static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
+{
+ u64 val;
+
+ val = nr64_mac(XMAC_MIN);
+ val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
+ XMAC_MIN_RX_MIN_PKT_SIZE);
+ val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
+ val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
+ nw64_mac(XMAC_MIN, val);
+
+ nw64_mac(XMAC_MAX, max);
+
+ nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
+
+ val = nr64_mac(XMAC_IPG);
+ if (np->flags & NIU_FLAGS_10G) {
+ val &= ~XMAC_IPG_IPG_XGMII;
+ val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
+ } else {
+ val &= ~XMAC_IPG_IPG_MII_GMII;
+ val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
+ }
+ nw64_mac(XMAC_IPG, val);
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
+ XMAC_CONFIG_STRETCH_MODE |
+ XMAC_CONFIG_VAR_MIN_IPG_EN |
+ XMAC_CONFIG_TX_ENABLE);
+ nw64_mac(XMAC_CONFIG, val);
+
+ nw64_mac(TXMAC_FRM_CNT, 0);
+ nw64_mac(TXMAC_BYTE_CNT, 0);
+}
+
+static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
{
- int err, channel = rp->rx_channel;
u64 val;
- err = niu_rx_channel_reset(np, channel);
- if (err)
- return err;
-
- err = niu_rx_channel_lpage_init(np, channel);
- if (err)
- return err;
-
- niu_rx_channel_wred_init(np, rp);
-
- nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
- nw64(RX_DMA_CTL_STAT(channel),
- (RX_DMA_CTL_STAT_MEX |
- RX_DMA_CTL_STAT_RCRTHRES |
- RX_DMA_CTL_STAT_RCRTO |
- RX_DMA_CTL_STAT_RBR_EMPTY));
- nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
- nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
- nw64(RBR_CFIG_A(channel),
- ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
- (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
- err = niu_compute_rbr_cfig_b(rp, &val);
- if (err)
- return err;
- nw64(RBR_CFIG_B(channel), val);
- nw64(RCRCFIG_A(channel),
- ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
- (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
- nw64(RCRCFIG_B(channel),
- ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
- RCRCFIG_B_ENTOUT |
- ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
-
- err = niu_enable_rx_channel(np, channel, 1);
- if (err)
- return err;
-
- nw64(RBR_KICK(channel), rp->rbr_index);
+ nw64_mac(BMAC_MIN_FRAME, min);
+ nw64_mac(BMAC_MAX_FRAME, max);
- val = nr64(RX_DMA_CTL_STAT(channel));
- val |= RX_DMA_CTL_STAT_RBR_EMPTY;
- nw64(RX_DMA_CTL_STAT(channel), val);
+ nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
+ nw64_mac(BMAC_CTRL_TYPE, 0x8808);
+ nw64_mac(BMAC_PREAMBLE_SIZE, 7);
- return 0;
+ val = nr64_mac(BTXMAC_CONFIG);
+ val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
+ BTXMAC_CONFIG_ENABLE);
+ nw64_mac(BTXMAC_CONFIG, val);
}
-static int niu_init_rx_channels(struct niu *np)
+static void niu_init_tx_mac(struct niu *np)
{
- unsigned long flags;
- u64 seed = jiffies_64;
- int err, i;
-
- niu_lock_parent(np, flags);
- nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
- nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
- niu_unlock_parent(np, flags);
+ u64 min, max;
- /* XXX RXDMA 32bit mode? XXX */
+ min = 64;
+ if (np->dev->mtu > ETH_DATA_LEN)
+ max = 9216;
+ else
+ max = 1522;
- niu_init_rdc_groups(np);
- niu_init_drr_weight(np);
+ /* The XMAC_MIN register only accepts values for TX min which
+ * have the low 3 bits cleared.
+ */
+ BUG_ON(min & 0x7);
- err = niu_init_hostinfo(np);
- if (err)
- return err;
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_tx_xmac(np, min, max);
+ else
+ niu_init_tx_bmac(np, min, max);
+}
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+static int niu_reset_rx_xmac(struct niu *np)
+{
+ int limit;
- err = niu_init_one_rx_channel(np, rp);
- if (err)
- return err;
+ nw64_mac(XRXMAC_SW_RST,
+ XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
+ limit = 1000;
+ while (--limit >= 0) {
+ if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
+ XRXMAC_SW_RST_SOFT_RST)))
+ break;
+ udelay(100);
+ }
+ if (limit < 0) {
+ dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
+ "XRXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(XRXMAC_SW_RST));
+ return -ENODEV;
}
return 0;
}
-static int niu_set_ip_frag_rule(struct niu *np)
+static int niu_reset_rx_bmac(struct niu *np)
{
- struct niu_parent *parent = np->parent;
- struct niu_classifier *cp = &np->clas;
- struct niu_tcam_entry *tp;
- int index, err;
-
- /* XXX fix this allocation scheme XXX */
- index = cp->tcam_index;
- tp = &parent->tcam[index];
+ int limit;
- /* Note that the noport bit is the same in both ipv4 and
- * ipv6 format TCAM entries.
- */
- memset(tp, 0, sizeof(*tp));
- tp->key[1] = TCAM_V4KEY1_NOPORT;
- tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
- tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
- ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
- err = tcam_write(np, index, tp->key, tp->key_mask);
- if (err)
- return err;
- err = tcam_assoc_write(np, index, tp->assoc_data);
- if (err)
- return err;
+ nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
+ limit = 1000;
+ while (--limit >= 0) {
+ if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
+ break;
+ udelay(100);
+ }
+ if (limit < 0) {
+ dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
+ "BRXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(BRXMAC_SW_RST));
+ return -ENODEV;
+ }
return 0;
}
-static int niu_init_classifier_hw(struct niu *np)
+static int niu_reset_rx_mac(struct niu *np)
{
- struct niu_parent *parent = np->parent;
- struct niu_classifier *cp = &np->clas;
- int i, err;
-
- nw64(H1POLY, cp->h1_init);
- nw64(H2POLY, cp->h2_init);
-
- err = niu_init_hostinfo(np);
- if (err)
- return err;
-
- for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
- struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
+ if (np->flags & NIU_FLAGS_XMAC)
+ return niu_reset_rx_xmac(np);
+ else
+ return niu_reset_rx_bmac(np);
+}
- vlan_tbl_write(np, i, np->port,
- vp->vlan_pref, vp->rdc_num);
- }
+static void niu_init_rx_xmac(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int first_rdc_table = tp->first_table_num;
+ unsigned long i;
+ u64 val;
- for (i = 0; i < cp->num_alt_mac_mappings; i++) {
- struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
+ nw64_mac(XMAC_ADD_FILT0, 0);
+ nw64_mac(XMAC_ADD_FILT1, 0);
+ nw64_mac(XMAC_ADD_FILT2, 0);
+ nw64_mac(XMAC_ADD_FILT12_MASK, 0);
+ nw64_mac(XMAC_ADD_FILT00_MASK, 0);
+ for (i = 0; i < MAC_NUM_HASH; i++)
+ nw64_mac(XMAC_HASH_TBL(i), 0);
+ nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
+ niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
- err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
- ap->rdc_num, ap->mac_pref);
- if (err)
- return err;
- }
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
+ XMAC_CONFIG_PROMISCUOUS |
+ XMAC_CONFIG_PROMISC_GROUP |
+ XMAC_CONFIG_ERR_CHK_DIS |
+ XMAC_CONFIG_RX_CRC_CHK_DIS |
+ XMAC_CONFIG_RESERVED_MULTICAST |
+ XMAC_CONFIG_RX_CODEV_CHK_DIS |
+ XMAC_CONFIG_ADDR_FILTER_EN |
+ XMAC_CONFIG_RCV_PAUSE_ENABLE |
+ XMAC_CONFIG_STRIP_CRC |
+ XMAC_CONFIG_PASS_FLOW_CTRL |
+ XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
+ val |= (XMAC_CONFIG_HASH_FILTER_EN);
+ nw64_mac(XMAC_CONFIG, val);
- for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
- int index = i - CLASS_CODE_USER_PROG1;
+ nw64_mac(RXMAC_BT_CNT, 0);
+ nw64_mac(RXMAC_BC_FRM_CNT, 0);
+ nw64_mac(RXMAC_MC_FRM_CNT, 0);
+ nw64_mac(RXMAC_FRAG_CNT, 0);
+ nw64_mac(RXMAC_HIST_CNT1, 0);
+ nw64_mac(RXMAC_HIST_CNT2, 0);
+ nw64_mac(RXMAC_HIST_CNT3, 0);
+ nw64_mac(RXMAC_HIST_CNT4, 0);
+ nw64_mac(RXMAC_HIST_CNT5, 0);
+ nw64_mac(RXMAC_HIST_CNT6, 0);
+ nw64_mac(RXMAC_HIST_CNT7, 0);
+ nw64_mac(RXMAC_MPSZER_CNT, 0);
+ nw64_mac(RXMAC_CRC_ER_CNT, 0);
+ nw64_mac(RXMAC_CD_VIO_CNT, 0);
+ nw64_mac(LINK_FAULT_CNT, 0);
+}
- err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
- if (err)
- return err;
- err = niu_set_flow_key(np, i, parent->flow_key[index]);
- if (err)
- return err;
- }
+static void niu_init_rx_bmac(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int first_rdc_table = tp->first_table_num;
+ unsigned long i;
+ u64 val;
- err = niu_set_ip_frag_rule(np);
- if (err)
- return err;
+ nw64_mac(BMAC_ADD_FILT0, 0);
+ nw64_mac(BMAC_ADD_FILT1, 0);
+ nw64_mac(BMAC_ADD_FILT2, 0);
+ nw64_mac(BMAC_ADD_FILT12_MASK, 0);
+ nw64_mac(BMAC_ADD_FILT00_MASK, 0);
+ for (i = 0; i < MAC_NUM_HASH; i++)
+ nw64_mac(BMAC_HASH_TBL(i), 0);
+ niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+ nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
- tcam_enable(np, 1);
+ val = nr64_mac(BRXMAC_CONFIG);
+ val &= ~(BRXMAC_CONFIG_ENABLE |
+ BRXMAC_CONFIG_STRIP_PAD |
+ BRXMAC_CONFIG_STRIP_FCS |
+ BRXMAC_CONFIG_PROMISC |
+ BRXMAC_CONFIG_PROMISC_GRP |
+ BRXMAC_CONFIG_ADDR_FILT_EN |
+ BRXMAC_CONFIG_DISCARD_DIS);
+ val |= (BRXMAC_CONFIG_HASH_FILT_EN);
+ nw64_mac(BRXMAC_CONFIG, val);
- return 0;
+ val = nr64_mac(BMAC_ADDR_CMPEN);
+ val |= BMAC_ADDR_CMPEN_EN0;
+ nw64_mac(BMAC_ADDR_CMPEN, val);
}
-static int niu_zcp_write(struct niu *np, int index, u64 *data)
+static void niu_init_rx_mac(struct niu *np)
{
- nw64(ZCP_RAM_DATA0, data[0]);
- nw64(ZCP_RAM_DATA1, data[1]);
- nw64(ZCP_RAM_DATA2, data[2]);
- nw64(ZCP_RAM_DATA3, data[3]);
- nw64(ZCP_RAM_DATA4, data[4]);
- nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
- nw64(ZCP_RAM_ACC,
- (ZCP_RAM_ACC_WRITE |
- (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
- (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
+ niu_set_primary_mac(np, np->dev->dev_addr);
- return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
- 1000, 100);
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_rx_xmac(np);
+ else
+ niu_init_rx_bmac(np);
}
-static int niu_zcp_read(struct niu *np, int index, u64 *data)
+static void niu_enable_tx_xmac(struct niu *np, int on)
{
- int err;
-
- err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
- 1000, 100);
- if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
- return err;
- }
-
- nw64(ZCP_RAM_ACC,
- (ZCP_RAM_ACC_READ |
- (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
- (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
-
- err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
- 1000, 100);
- if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
- return err;
- }
-
- data[0] = nr64(ZCP_RAM_DATA0);
- data[1] = nr64(ZCP_RAM_DATA1);
- data[2] = nr64(ZCP_RAM_DATA2);
- data[3] = nr64(ZCP_RAM_DATA3);
- data[4] = nr64(ZCP_RAM_DATA4);
+ u64 val = nr64_mac(XMAC_CONFIG);
- return 0;
+ if (on)
+ val |= XMAC_CONFIG_TX_ENABLE;
+ else
+ val &= ~XMAC_CONFIG_TX_ENABLE;
+ nw64_mac(XMAC_CONFIG, val);
}
-static void niu_zcp_cfifo_reset(struct niu *np)
+static void niu_enable_tx_bmac(struct niu *np, int on)
{
- u64 val = nr64(RESET_CFIFO);
-
- val |= RESET_CFIFO_RST(np->port);
- nw64(RESET_CFIFO, val);
- udelay(10);
+ u64 val = nr64_mac(BTXMAC_CONFIG);
- val &= ~RESET_CFIFO_RST(np->port);
- nw64(RESET_CFIFO, val);
+ if (on)
+ val |= BTXMAC_CONFIG_ENABLE;
+ else
+ val &= ~BTXMAC_CONFIG_ENABLE;
+ nw64_mac(BTXMAC_CONFIG, val);
}
-static int niu_init_zcp(struct niu *np)
+static void niu_enable_tx_mac(struct niu *np, int on)
{
- u64 data[5], rbuf[5];
- int i, max, err;
-
- if (np->parent->plat_type != PLAT_TYPE_NIU) {
- if (np->port == 0 || np->port == 1)
- max = ATLAS_P0_P1_CFIFO_ENTRIES;
- else
- max = ATLAS_P2_P3_CFIFO_ENTRIES;
- } else
- max = NIU_CFIFO_ENTRIES;
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_enable_tx_xmac(np, on);
+ else
+ niu_enable_tx_bmac(np, on);
+}
- data[0] = 0;
- data[1] = 0;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
+static void niu_enable_rx_xmac(struct niu *np, int on)
+{
+ u64 val = nr64_mac(XMAC_CONFIG);
- for (i = 0; i < max; i++) {
- err = niu_zcp_write(np, i, data);
- if (err)
- return err;
- err = niu_zcp_read(np, i, rbuf);
- if (err)
- return err;
- }
+ val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
+ XMAC_CONFIG_PROMISCUOUS);
- niu_zcp_cfifo_reset(np);
- nw64(CFIFO_ECC(np->port), 0);
- nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
- (void) nr64(ZCP_INT_STAT);
- nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
+ if (np->flags & NIU_FLAGS_MCAST)
+ val |= XMAC_CONFIG_HASH_FILTER_EN;
+ if (np->flags & NIU_FLAGS_PROMISC)
+ val |= XMAC_CONFIG_PROMISCUOUS;
- return 0;
+ if (on)
+ val |= XMAC_CONFIG_RX_MAC_ENABLE;
+ else
+ val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
+ nw64_mac(XMAC_CONFIG, val);
}
-static void niu_ipp_write(struct niu *np, int index, u64 *data)
+static void niu_enable_rx_bmac(struct niu *np, int on)
{
- u64 val = nr64_ipp(IPP_CFIG);
+ u64 val = nr64_mac(BRXMAC_CONFIG);
- nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
- nw64_ipp(IPP_DFIFO_WR_PTR, index);
- nw64_ipp(IPP_DFIFO_WR0, data[0]);
- nw64_ipp(IPP_DFIFO_WR1, data[1]);
- nw64_ipp(IPP_DFIFO_WR2, data[2]);
- nw64_ipp(IPP_DFIFO_WR3, data[3]);
- nw64_ipp(IPP_DFIFO_WR4, data[4]);
- nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
-}
+ val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
+ BRXMAC_CONFIG_PROMISC);
-static void niu_ipp_read(struct niu *np, int index, u64 *data)
-{
- nw64_ipp(IPP_DFIFO_RD_PTR, index);
- data[0] = nr64_ipp(IPP_DFIFO_RD0);
- data[1] = nr64_ipp(IPP_DFIFO_RD1);
- data[2] = nr64_ipp(IPP_DFIFO_RD2);
- data[3] = nr64_ipp(IPP_DFIFO_RD3);
- data[4] = nr64_ipp(IPP_DFIFO_RD4);
+ if (np->flags & NIU_FLAGS_MCAST)
+ val |= BRXMAC_CONFIG_HASH_FILT_EN;
+ if (np->flags & NIU_FLAGS_PROMISC)
+ val |= BRXMAC_CONFIG_PROMISC;
+
+ if (on)
+ val |= BRXMAC_CONFIG_ENABLE;
+ else
+ val &= ~BRXMAC_CONFIG_ENABLE;
+ nw64_mac(BRXMAC_CONFIG, val);
}
-static int niu_ipp_reset(struct niu *np)
+static void niu_enable_rx_mac(struct niu *np, int on)
{
- return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
- 1000, 100, "IPP_CFIG");
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_enable_rx_xmac(np, on);
+ else
+ niu_enable_rx_bmac(np, on);
}
-static int niu_init_ipp(struct niu *np)
+static int niu_init_mac(struct niu *np)
{
- u64 data[5], rbuf[5], val;
- int i, max, err;
-
- if (np->parent->plat_type != PLAT_TYPE_NIU) {
- if (np->port == 0 || np->port == 1)
- max = ATLAS_P0_P1_DFIFO_ENTRIES;
- else
- max = ATLAS_P2_P3_DFIFO_ENTRIES;
- } else
- max = NIU_DFIFO_ENTRIES;
-
- data[0] = 0;
- data[1] = 0;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
-
- for (i = 0; i < max; i++) {
- niu_ipp_write(np, i, data);
- niu_ipp_read(np, i, rbuf);
- }
-
- (void) nr64_ipp(IPP_INT_STAT);
- (void) nr64_ipp(IPP_INT_STAT);
+ int err;
- err = niu_ipp_reset(np);
+ niu_init_xif(np);
+ err = niu_init_pcs(np);
if (err)
return err;
- (void) nr64_ipp(IPP_PKT_DIS);
- (void) nr64_ipp(IPP_BAD_CS_CNT);
- (void) nr64_ipp(IPP_ECC);
-
- (void) nr64_ipp(IPP_INT_STAT);
+ err = niu_reset_tx_mac(np);
+ if (err)
+ return err;
+ niu_init_tx_mac(np);
+ err = niu_reset_rx_mac(np);
+ if (err)
+ return err;
+ niu_init_rx_mac(np);
- nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
+ /* This looks hookey but the RX MAC reset we just did will
+ * undo some of the state we setup in niu_init_tx_mac() so we
+ * have to call it again. In particular, the RX MAC reset will
+ * set the XMAC_MAX register back to it's default value.
+ */
+ niu_init_tx_mac(np);
+ niu_enable_tx_mac(np, 1);
- val = nr64_ipp(IPP_CFIG);
- val &= ~IPP_CFIG_IP_MAX_PKT;
- val |= (IPP_CFIG_IPP_ENABLE |
- IPP_CFIG_DFIFO_ECC_EN |
- IPP_CFIG_DROP_BAD_CRC |
- IPP_CFIG_CKSUM_EN |
- (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
- nw64_ipp(IPP_CFIG, val);
+ niu_enable_rx_mac(np, 1);
return 0;
}
-static void niu_handle_led(struct niu *np, int status)
-{
- u64 val;
- val = nr64_mac(XMAC_CONFIG);
-
- if ((np->flags & NIU_FLAGS_10G) != 0 &&
- (np->flags & NIU_FLAGS_FIBER) != 0) {
- if (status) {
- val |= XMAC_CONFIG_LED_POLARITY;
- val &= ~XMAC_CONFIG_FORCE_LED_ON;
- } else {
- val |= XMAC_CONFIG_FORCE_LED_ON;
- val &= ~XMAC_CONFIG_LED_POLARITY;
- }
- }
-
- nw64_mac(XMAC_CONFIG, val);
-}
-
-static void niu_init_xif_xmac(struct niu *np)
+static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
{
- struct niu_link_config *lp = &np->link_config;
- u64 val;
-
- val = nr64_mac(XMAC_CONFIG);
- val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+ (void) niu_tx_channel_stop(np, rp->tx_channel);
+}
- val |= XMAC_CONFIG_TX_OUTPUT_EN;
+static void niu_stop_tx_channels(struct niu *np)
+{
+ int i;
- if (lp->loopback_mode == LOOPBACK_MAC) {
- val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
- val |= XMAC_CONFIG_LOOPBACK;
- } else {
- val &= ~XMAC_CONFIG_LOOPBACK;
- }
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
- if (np->flags & NIU_FLAGS_10G) {
- val &= ~XMAC_CONFIG_LFS_DISABLE;
- } else {
- val |= XMAC_CONFIG_LFS_DISABLE;
- if (!(np->flags & NIU_FLAGS_FIBER))
- val |= XMAC_CONFIG_1G_PCS_BYPASS;
- else
- val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
+ niu_stop_one_tx_channel(np, rp);
}
+}
- val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+ (void) niu_tx_channel_reset(np, rp->tx_channel);
+}
- if (lp->active_speed == SPEED_100)
- val |= XMAC_CONFIG_SEL_CLK_25MHZ;
- else
- val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
+static void niu_reset_tx_channels(struct niu *np)
+{
+ int i;
- nw64_mac(XMAC_CONFIG, val);
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
- val = nr64_mac(XMAC_CONFIG);
- val &= ~XMAC_CONFIG_MODE_MASK;
- if (np->flags & NIU_FLAGS_10G) {
- val |= XMAC_CONFIG_MODE_XGMII;
- } else {
- if (lp->active_speed == SPEED_100)
- val |= XMAC_CONFIG_MODE_MII;
- else
- val |= XMAC_CONFIG_MODE_GMII;
+ niu_reset_one_tx_channel(np, rp);
}
-
- nw64_mac(XMAC_CONFIG, val);
}
-static void niu_init_xif_bmac(struct niu *np)
+static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
{
- struct niu_link_config *lp = &np->link_config;
- u64 val;
-
- val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
-
- if (lp->loopback_mode == LOOPBACK_MAC)
- val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
- else
- val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
-
- if (lp->active_speed == SPEED_1000)
- val |= BMAC_XIF_CONFIG_GMII_MODE;
- else
- val &= ~BMAC_XIF_CONFIG_GMII_MODE;
+ (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
+}
- val &= ~(BMAC_XIF_CONFIG_LINK_LED |
- BMAC_XIF_CONFIG_LED_POLARITY);
+static void niu_stop_rx_channels(struct niu *np)
+{
+ int i;
- if (!(np->flags & NIU_FLAGS_10G) &&
- !(np->flags & NIU_FLAGS_FIBER) &&
- lp->active_speed == SPEED_100)
- val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
- else
- val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
- nw64_mac(BMAC_XIF_CONFIG, val);
+ niu_stop_one_rx_channel(np, rp);
+ }
}
-static void niu_init_xif(struct niu *np)
+static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
{
- if (np->flags & NIU_FLAGS_XMAC)
- niu_init_xif_xmac(np);
- else
- niu_init_xif_bmac(np);
+ int channel = rp->rx_channel;
+
+ (void) niu_rx_channel_reset(np, channel);
+ nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
+ nw64(RX_DMA_CTL_STAT(channel), 0);
+ (void) niu_enable_rx_channel(np, channel, 0);
}
-static void niu_pcs_mii_reset(struct niu *np)
+static void niu_reset_rx_channels(struct niu *np)
{
- u64 val = nr64_pcs(PCS_MII_CTL);
- val |= PCS_MII_CTL_RST;
- nw64_pcs(PCS_MII_CTL, val);
+ int i;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_reset_one_rx_channel(np, rp);
+ }
}
-static void niu_xpcs_reset(struct niu *np)
+static void niu_disable_ipp(struct niu *np)
{
- u64 val = nr64_xpcs(XPCS_CONTROL1);
- val |= XPCS_CONTROL1_RESET;
- nw64_xpcs(XPCS_CONTROL1, val);
+ u64 rd, wr, val;
+ int limit;
+
+ rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+ wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+ limit = 100;
+ while (--limit >= 0 && (rd != wr)) {
+ rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+ wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+ }
+ if (limit < 0 &&
+ (rd != 0 && wr != 1)) {
+ dev_err(np->device, PFX "%s: IPP would not quiesce, "
+ "rd_ptr[%llx] wr_ptr[%llx]\n",
+ np->dev->name,
+ (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ }
+
+ val = nr64_ipp(IPP_CFIG);
+ val &= ~(IPP_CFIG_IPP_ENABLE |
+ IPP_CFIG_DFIFO_ECC_EN |
+ IPP_CFIG_DROP_BAD_CRC |
+ IPP_CFIG_CKSUM_EN);
+ nw64_ipp(IPP_CFIG, val);
+
+ (void) niu_ipp_reset(np);
}
-static int niu_init_pcs(struct niu *np)
+static int niu_init_hw(struct niu *np)
{
- struct niu_link_config *lp = &np->link_config;
- u64 val;
+ int i, err;
- switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) {
- case NIU_FLAGS_FIBER:
- /* 1G fiber */
- nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
- nw64_pcs(PCS_DPATH_MODE, 0);
- niu_pcs_mii_reset(np);
- break;
+ niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
+ niu_txc_enable_port(np, 1);
+ niu_txc_port_dma_enable(np, 1);
+ niu_txc_set_imask(np, 0);
- case NIU_FLAGS_10G:
- case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
- if (!(np->flags & NIU_FLAGS_XMAC))
- return -EINVAL;
+ niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
- /* 10G copper or fiber */
- val = nr64_mac(XMAC_CONFIG);
- val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
- nw64_mac(XMAC_CONFIG, val);
+ err = niu_init_one_tx_channel(np, rp);
+ if (err)
+ return err;
+ }
- niu_xpcs_reset(np);
+ niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
+ err = niu_init_rx_channels(np);
+ if (err)
+ goto out_uninit_tx_channels;
- val = nr64_xpcs(XPCS_CONTROL1);
- if (lp->loopback_mode == LOOPBACK_PHY)
- val |= XPCS_CONTROL1_LOOPBACK;
- else
- val &= ~XPCS_CONTROL1_LOOPBACK;
- nw64_xpcs(XPCS_CONTROL1, val);
+ niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
+ err = niu_init_classifier_hw(np);
+ if (err)
+ goto out_uninit_rx_channels;
- nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
- (void) nr64_xpcs(XPCS_SYMERR_CNT01);
- (void) nr64_xpcs(XPCS_SYMERR_CNT23);
- break;
+ niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
+ err = niu_init_zcp(np);
+ if (err)
+ goto out_uninit_rx_channels;
- case 0:
- /* 1G copper */
- nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
- niu_pcs_mii_reset(np);
- break;
+ niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
+ err = niu_init_ipp(np);
+ if (err)
+ goto out_uninit_rx_channels;
- default:
- return -EINVAL;
- }
+ niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
+ err = niu_init_mac(np);
+ if (err)
+ goto out_uninit_ipp;
return 0;
+
+out_uninit_ipp:
+ niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
+ niu_disable_ipp(np);
+
+out_uninit_rx_channels:
+ niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
+ niu_stop_rx_channels(np);
+ niu_reset_rx_channels(np);
+
+out_uninit_tx_channels:
+ niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
+ niu_stop_tx_channels(np);
+ niu_reset_tx_channels(np);
+
+ return err;
}
-static int niu_reset_tx_xmac(struct niu *np)
+static void niu_stop_hw(struct niu *np)
{
- return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
- (XTXMAC_SW_RST_REG_RS |
- XTXMAC_SW_RST_SOFT_RST),
- 1000, 100, "XTXMAC_SW_RST");
+ niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
+ niu_enable_interrupts(np, 0);
+
+ niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
+ niu_enable_rx_mac(np, 0);
+
+ niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
+ niu_disable_ipp(np);
+
+ niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
+ niu_stop_tx_channels(np);
+
+ niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
+ niu_stop_rx_channels(np);
+
+ niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
+ niu_reset_tx_channels(np);
+
+ niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
+ niu_reset_rx_channels(np);
}
-static int niu_reset_tx_bmac(struct niu *np)
+static void niu_set_irq_name(struct niu *np)
{
- int limit;
+ int port = np->port;
+ int i, j = 1;
- nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
- limit = 1000;
- while (--limit >= 0) {
- if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
- break;
- udelay(100);
- }
- if (limit < 0) {
- dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
- "BTXMAC_SW_RST[%llx]\n",
- np->port,
- (unsigned long long) nr64_mac(BTXMAC_SW_RST));
- return -ENODEV;
- }
+ sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
- return 0;
-}
+ if (port == 0) {
+ sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
+ sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
+ j = 3;
+ }
-static int niu_reset_tx_mac(struct niu *np)
-{
- if (np->flags & NIU_FLAGS_XMAC)
- return niu_reset_tx_xmac(np);
- else
- return niu_reset_tx_bmac(np);
+ for (i = 0; i < np->num_ldg - j; i++) {
+ if (i < np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-rx-%d",
+ np->dev->name, i);
+ else if (i < np->num_tx_rings + np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
+ i - np->num_rx_rings);
+ }
}
-static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
+static int niu_request_irq(struct niu *np)
{
- u64 val;
+ int i, j, err;
- val = nr64_mac(XMAC_MIN);
- val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
- XMAC_MIN_RX_MIN_PKT_SIZE);
- val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
- val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
- nw64_mac(XMAC_MIN, val);
+ niu_set_irq_name(np);
- nw64_mac(XMAC_MAX, max);
+ err = 0;
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
- nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
+ err = request_irq(lp->irq, niu_interrupt,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ np->irq_name[i], lp);
+ if (err)
+ goto out_free_irqs;
- val = nr64_mac(XMAC_IPG);
- if (np->flags & NIU_FLAGS_10G) {
- val &= ~XMAC_IPG_IPG_XGMII;
- val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
- } else {
- val &= ~XMAC_IPG_IPG_MII_GMII;
- val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
}
- nw64_mac(XMAC_IPG, val);
- val = nr64_mac(XMAC_CONFIG);
- val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
- XMAC_CONFIG_STRETCH_MODE |
- XMAC_CONFIG_VAR_MIN_IPG_EN |
- XMAC_CONFIG_TX_ENABLE);
- nw64_mac(XMAC_CONFIG, val);
+ return 0;
- nw64_mac(TXMAC_FRM_CNT, 0);
- nw64_mac(TXMAC_BYTE_CNT, 0);
+out_free_irqs:
+ for (j = 0; j < i; j++) {
+ struct niu_ldg *lp = &np->ldg[j];
+
+ free_irq(lp->irq, lp);
+ }
+ return err;
}
-static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
+static void niu_free_irq(struct niu *np)
{
- u64 val;
-
- nw64_mac(BMAC_MIN_FRAME, min);
- nw64_mac(BMAC_MAX_FRAME, max);
+ int i;
- nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
- nw64_mac(BMAC_CTRL_TYPE, 0x8808);
- nw64_mac(BMAC_PREAMBLE_SIZE, 7);
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
- val = nr64_mac(BTXMAC_CONFIG);
- val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
- BTXMAC_CONFIG_ENABLE);
- nw64_mac(BTXMAC_CONFIG, val);
+ free_irq(lp->irq, lp);
+ }
}
-static void niu_init_tx_mac(struct niu *np)
+static void niu_enable_napi(struct niu *np)
{
- u64 min, max;
+ int i;
- min = 64;
- if (np->dev->mtu > ETH_DATA_LEN)
- max = 9216;
- else
- max = 1522;
+ for (i = 0; i < np->num_ldg; i++)
+ napi_enable(&np->ldg[i].napi);
+}
- /* The XMAC_MIN register only accepts values for TX min which
- * have the low 3 bits cleared.
- */
- BUILD_BUG_ON(min & 0x7);
+static void niu_disable_napi(struct niu *np)
+{
+ int i;
- if (np->flags & NIU_FLAGS_XMAC)
- niu_init_tx_xmac(np, min, max);
- else
- niu_init_tx_bmac(np, min, max);
+ for (i = 0; i < np->num_ldg; i++)
+ napi_disable(&np->ldg[i].napi);
}
-static int niu_reset_rx_xmac(struct niu *np)
+static int niu_open(struct net_device *dev)
{
- int limit;
+ struct niu *np = netdev_priv(dev);
+ int err;
- nw64_mac(XRXMAC_SW_RST,
- XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
- limit = 1000;
- while (--limit >= 0) {
- if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
- XRXMAC_SW_RST_SOFT_RST)))
- break;
- udelay(100);
- }
- if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
- "XRXMAC_SW_RST[%llx]\n",
- np->port,
- (unsigned long long) nr64_mac(XRXMAC_SW_RST));
- return -ENODEV;
- }
+ netif_carrier_off(dev);
- return 0;
-}
+ err = niu_alloc_channels(np);
+ if (err)
+ goto out_err;
-static int niu_reset_rx_bmac(struct niu *np)
-{
- int limit;
+ err = niu_enable_interrupts(np, 0);
+ if (err)
+ goto out_free_channels;
- nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
- limit = 1000;
- while (--limit >= 0) {
- if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
- break;
- udelay(100);
+ err = niu_request_irq(np);
+ if (err)
+ goto out_free_channels;
+
+ niu_enable_napi(np);
+
+ spin_lock_irq(&np->lock);
+
+ err = niu_init_hw(np);
+ if (!err) {
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + HZ;
+ np->timer.data = (unsigned long) np;
+ np->timer.function = niu_timer;
+
+ err = niu_enable_interrupts(np, 1);
+ if (err)
+ niu_stop_hw(np);
}
- if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
- "BRXMAC_SW_RST[%llx]\n",
- np->port,
- (unsigned long long) nr64_mac(BRXMAC_SW_RST));
- return -ENODEV;
+
+ spin_unlock_irq(&np->lock);
+
+ if (err) {
+ niu_disable_napi(np);
+ goto out_free_irq;
}
- return 0;
-}
+ netif_tx_start_all_queues(dev);
-static int niu_reset_rx_mac(struct niu *np)
-{
- if (np->flags & NIU_FLAGS_XMAC)
- return niu_reset_rx_xmac(np);
- else
- return niu_reset_rx_bmac(np);
-}
+ if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+ netif_carrier_on(dev);
-static void niu_init_rx_xmac(struct niu *np)
-{
- struct niu_parent *parent = np->parent;
- struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
- int first_rdc_table = tp->first_table_num;
- unsigned long i;
- u64 val;
+ add_timer(&np->timer);
- nw64_mac(XMAC_ADD_FILT0, 0);
- nw64_mac(XMAC_ADD_FILT1, 0);
- nw64_mac(XMAC_ADD_FILT2, 0);
- nw64_mac(XMAC_ADD_FILT12_MASK, 0);
- nw64_mac(XMAC_ADD_FILT00_MASK, 0);
- for (i = 0; i < MAC_NUM_HASH; i++)
- nw64_mac(XMAC_HASH_TBL(i), 0);
- nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
- niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
- niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+ return 0;
- val = nr64_mac(XMAC_CONFIG);
- val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
- XMAC_CONFIG_PROMISCUOUS |
- XMAC_CONFIG_PROMISC_GROUP |
- XMAC_CONFIG_ERR_CHK_DIS |
- XMAC_CONFIG_RX_CRC_CHK_DIS |
- XMAC_CONFIG_RESERVED_MULTICAST |
- XMAC_CONFIG_RX_CODEV_CHK_DIS |
- XMAC_CONFIG_ADDR_FILTER_EN |
- XMAC_CONFIG_RCV_PAUSE_ENABLE |
- XMAC_CONFIG_STRIP_CRC |
- XMAC_CONFIG_PASS_FLOW_CTRL |
- XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
- val |= (XMAC_CONFIG_HASH_FILTER_EN);
- nw64_mac(XMAC_CONFIG, val);
+out_free_irq:
+ niu_free_irq(np);
- nw64_mac(RXMAC_BT_CNT, 0);
- nw64_mac(RXMAC_BC_FRM_CNT, 0);
- nw64_mac(RXMAC_MC_FRM_CNT, 0);
- nw64_mac(RXMAC_FRAG_CNT, 0);
- nw64_mac(RXMAC_HIST_CNT1, 0);
- nw64_mac(RXMAC_HIST_CNT2, 0);
- nw64_mac(RXMAC_HIST_CNT3, 0);
- nw64_mac(RXMAC_HIST_CNT4, 0);
- nw64_mac(RXMAC_HIST_CNT5, 0);
- nw64_mac(RXMAC_HIST_CNT6, 0);
- nw64_mac(RXMAC_HIST_CNT7, 0);
- nw64_mac(RXMAC_MPSZER_CNT, 0);
- nw64_mac(RXMAC_CRC_ER_CNT, 0);
- nw64_mac(RXMAC_CD_VIO_CNT, 0);
- nw64_mac(LINK_FAULT_CNT, 0);
+out_free_channels:
+ niu_free_channels(np);
+
+out_err:
+ return err;
}
-static void niu_init_rx_bmac(struct niu *np)
-{
- struct niu_parent *parent = np->parent;
- struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
- int first_rdc_table = tp->first_table_num;
- unsigned long i;
- u64 val;
+static void niu_full_shutdown(struct niu *np, struct net_device *dev)
+{
+ cancel_work_sync(&np->reset_task);
+
+ niu_disable_napi(np);
+ netif_tx_stop_all_queues(dev);
+
+ del_timer_sync(&np->timer);
- nw64_mac(BMAC_ADD_FILT0, 0);
- nw64_mac(BMAC_ADD_FILT1, 0);
- nw64_mac(BMAC_ADD_FILT2, 0);
- nw64_mac(BMAC_ADD_FILT12_MASK, 0);
- nw64_mac(BMAC_ADD_FILT00_MASK, 0);
- for (i = 0; i < MAC_NUM_HASH; i++)
- nw64_mac(BMAC_HASH_TBL(i), 0);
- niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
- niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
- nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
+ spin_lock_irq(&np->lock);
- val = nr64_mac(BRXMAC_CONFIG);
- val &= ~(BRXMAC_CONFIG_ENABLE |
- BRXMAC_CONFIG_STRIP_PAD |
- BRXMAC_CONFIG_STRIP_FCS |
- BRXMAC_CONFIG_PROMISC |
- BRXMAC_CONFIG_PROMISC_GRP |
- BRXMAC_CONFIG_ADDR_FILT_EN |
- BRXMAC_CONFIG_DISCARD_DIS);
- val |= (BRXMAC_CONFIG_HASH_FILT_EN);
- nw64_mac(BRXMAC_CONFIG, val);
+ niu_stop_hw(np);
- val = nr64_mac(BMAC_ADDR_CMPEN);
- val |= BMAC_ADDR_CMPEN_EN0;
- nw64_mac(BMAC_ADDR_CMPEN, val);
+ spin_unlock_irq(&np->lock);
}
-static void niu_init_rx_mac(struct niu *np)
+static int niu_close(struct net_device *dev)
{
- niu_set_primary_mac(np, np->dev->dev_addr);
+ struct niu *np = netdev_priv(dev);
- if (np->flags & NIU_FLAGS_XMAC)
- niu_init_rx_xmac(np);
- else
- niu_init_rx_bmac(np);
+ niu_full_shutdown(np, dev);
+
+ niu_free_irq(np);
+
+ niu_free_channels(np);
+
+ niu_handle_led(np, 0);
+
+ return 0;
}
-static void niu_enable_tx_xmac(struct niu *np, int on)
+static void niu_sync_xmac_stats(struct niu *np)
{
- u64 val = nr64_mac(XMAC_CONFIG);
+ struct niu_xmac_stats *mp = &np->mac_stats.xmac;
- if (on)
- val |= XMAC_CONFIG_TX_ENABLE;
- else
- val &= ~XMAC_CONFIG_TX_ENABLE;
- nw64_mac(XMAC_CONFIG, val);
+ mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
+ mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
+
+ mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
+ mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
+ mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
+ mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
+ mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
+ mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
+ mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
+ mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
+ mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
+ mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
+ mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
+ mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
+ mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
+ mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
+ mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
+ mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
}
-static void niu_enable_tx_bmac(struct niu *np, int on)
+static void niu_sync_bmac_stats(struct niu *np)
{
- u64 val = nr64_mac(BTXMAC_CONFIG);
+ struct niu_bmac_stats *mp = &np->mac_stats.bmac;
- if (on)
- val |= BTXMAC_CONFIG_ENABLE;
- else
- val &= ~BTXMAC_CONFIG_ENABLE;
- nw64_mac(BTXMAC_CONFIG, val);
+ mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
+ mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
+
+ mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
+ mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+ mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+ mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
}
-static void niu_enable_tx_mac(struct niu *np, int on)
+static void niu_sync_mac_stats(struct niu *np)
{
if (np->flags & NIU_FLAGS_XMAC)
- niu_enable_tx_xmac(np, on);
+ niu_sync_xmac_stats(np);
else
- niu_enable_tx_bmac(np, on);
+ niu_sync_bmac_stats(np);
}
-static void niu_enable_rx_xmac(struct niu *np, int on)
+static void niu_get_rx_stats(struct niu *np)
{
- u64 val = nr64_mac(XMAC_CONFIG);
+ unsigned long pkts, dropped, errors, bytes;
+ int i;
- val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
- XMAC_CONFIG_PROMISCUOUS);
+ pkts = dropped = errors = bytes = 0;
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
- if (np->flags & NIU_FLAGS_MCAST)
- val |= XMAC_CONFIG_HASH_FILTER_EN;
- if (np->flags & NIU_FLAGS_PROMISC)
- val |= XMAC_CONFIG_PROMISCUOUS;
+ niu_sync_rx_discard_stats(np, rp, 0);
- if (on)
- val |= XMAC_CONFIG_RX_MAC_ENABLE;
- else
- val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
- nw64_mac(XMAC_CONFIG, val);
+ pkts += rp->rx_packets;
+ bytes += rp->rx_bytes;
+ dropped += rp->rx_dropped;
+ errors += rp->rx_errors;
+ }
+ np->dev->stats.rx_packets = pkts;
+ np->dev->stats.rx_bytes = bytes;
+ np->dev->stats.rx_dropped = dropped;
+ np->dev->stats.rx_errors = errors;
}
-static void niu_enable_rx_bmac(struct niu *np, int on)
+static void niu_get_tx_stats(struct niu *np)
{
- u64 val = nr64_mac(BRXMAC_CONFIG);
+ unsigned long pkts, errors, bytes;
+ int i;
- val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
- BRXMAC_CONFIG_PROMISC);
+ pkts = errors = bytes = 0;
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
- if (np->flags & NIU_FLAGS_MCAST)
- val |= BRXMAC_CONFIG_HASH_FILT_EN;
- if (np->flags & NIU_FLAGS_PROMISC)
- val |= BRXMAC_CONFIG_PROMISC;
+ pkts += rp->tx_packets;
+ bytes += rp->tx_bytes;
+ errors += rp->tx_errors;
+ }
+ np->dev->stats.tx_packets = pkts;
+ np->dev->stats.tx_bytes = bytes;
+ np->dev->stats.tx_errors = errors;
+}
- if (on)
- val |= BRXMAC_CONFIG_ENABLE;
- else
- val &= ~BRXMAC_CONFIG_ENABLE;
- nw64_mac(BRXMAC_CONFIG, val);
+static struct net_device_stats *niu_get_stats(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ niu_get_rx_stats(np);
+ niu_get_tx_stats(np);
+
+ return &dev->stats;
}
-static void niu_enable_rx_mac(struct niu *np, int on)
+static void niu_load_hash_xmac(struct niu *np, u16 *hash)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ nw64_mac(XMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash_bmac(struct niu *np, u16 *hash)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ nw64_mac(BMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash(struct niu *np, u16 *hash)
{
if (np->flags & NIU_FLAGS_XMAC)
- niu_enable_rx_xmac(np, on);
+ niu_load_hash_xmac(np, hash);
else
- niu_enable_rx_bmac(np, on);
+ niu_load_hash_bmac(np, hash);
}
-static int niu_init_mac(struct niu *np)
+static void niu_set_rx_mode(struct net_device *dev)
{
- int err;
+ struct niu *np = netdev_priv(dev);
+ int i, alt_cnt, err;
+ struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
+ unsigned long flags;
+ u16 hash[16] = { 0, };
- niu_init_xif(np);
- err = niu_init_pcs(np);
- if (err)
- return err;
+ spin_lock_irqsave(&np->lock, flags);
+ niu_enable_rx_mac(np, 0);
- err = niu_reset_tx_mac(np);
- if (err)
- return err;
- niu_init_tx_mac(np);
- err = niu_reset_rx_mac(np);
- if (err)
- return err;
- niu_init_rx_mac(np);
+ np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
+ if (dev->flags & IFF_PROMISC)
+ np->flags |= NIU_FLAGS_PROMISC;
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ np->flags |= NIU_FLAGS_MCAST;
- /* This looks hookey but the RX MAC reset we just did will
- * undo some of the state we setup in niu_init_tx_mac() so we
- * have to call it again. In particular, the RX MAC reset will
- * set the XMAC_MAX register back to it's default value.
- */
- niu_init_tx_mac(np);
- niu_enable_tx_mac(np, 1);
+ alt_cnt = dev->uc.count;
+ if (alt_cnt > niu_num_alt_addr(np)) {
+ alt_cnt = 0;
+ np->flags |= NIU_FLAGS_PROMISC;
+ }
- niu_enable_rx_mac(np, 1);
+ if (alt_cnt) {
+ int index = 0;
- return 0;
-}
+ list_for_each_entry(ha, &dev->uc.list, list) {
+ err = niu_set_alt_mac(np, index, ha->addr);
+ if (err)
+ printk(KERN_WARNING PFX "%s: Error %d "
+ "adding alt mac %d\n",
+ dev->name, err, index);
+ err = niu_enable_alt_mac(np, index, 1);
+ if (err)
+ printk(KERN_WARNING PFX "%s: Error %d "
+ "enabling alt mac %d\n",
+ dev->name, err, index);
+
+ index++;
+ }
+ } else {
+ int alt_start;
+ if (np->flags & NIU_FLAGS_XMAC)
+ alt_start = 0;
+ else
+ alt_start = 1;
+ for (i = alt_start; i < niu_num_alt_addr(np); i++) {
+ err = niu_enable_alt_mac(np, i, 0);
+ if (err)
+ printk(KERN_WARNING PFX "%s: Error %d "
+ "disabling alt mac %d\n",
+ dev->name, err, i);
+ }
+ }
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 16; i++)
+ hash[i] = 0xffff;
+ } else if (dev->mc_count > 0) {
+ for (addr = dev->mc_list; addr; addr = addr->next) {
+ u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
+
+ crc >>= 24;
+ hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
+ }
+ }
+
+ if (np->flags & NIU_FLAGS_MCAST)
+ niu_load_hash(np, hash);
-static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
- (void) niu_tx_channel_stop(np, rp->tx_channel);
+ niu_enable_rx_mac(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_stop_tx_channels(struct niu *np)
+static int niu_set_mac_addr(struct net_device *dev, void *p)
{
- int i;
-
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ struct niu *np = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned long flags;
- niu_stop_one_tx_channel(np, rp);
- }
-}
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
-static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
- (void) niu_tx_channel_reset(np, rp->tx_channel);
-}
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-static void niu_reset_tx_channels(struct niu *np)
-{
- int i;
+ if (!netif_running(dev))
+ return 0;
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ spin_lock_irqsave(&np->lock, flags);
+ niu_enable_rx_mac(np, 0);
+ niu_set_primary_mac(np, dev->dev_addr);
+ niu_enable_rx_mac(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
- niu_reset_one_tx_channel(np, rp);
- }
+ return 0;
}
-static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
+ return -EOPNOTSUPP;
}
-static void niu_stop_rx_channels(struct niu *np)
+static void niu_netif_stop(struct niu *np)
{
- int i;
+ np->dev->trans_start = jiffies; /* prevent tx timeout */
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_disable_napi(np);
- niu_stop_one_rx_channel(np, rp);
- }
+ netif_tx_disable(np->dev);
}
-static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+static void niu_netif_start(struct niu *np)
{
- int channel = rp->rx_channel;
+ /* NOTE: unconditional netif_wake_queue is only appropriate
+ * so long as all callers are assured to have free tx slots
+ * (such as after niu_init_hw).
+ */
+ netif_tx_wake_all_queues(np->dev);
- (void) niu_rx_channel_reset(np, channel);
- nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
- nw64(RX_DMA_CTL_STAT(channel), 0);
- (void) niu_enable_rx_channel(np, channel, 0);
+ niu_enable_napi(np);
+
+ niu_enable_interrupts(np, 1);
}
-static void niu_reset_rx_channels(struct niu *np)
+static void niu_reset_buffers(struct niu *np)
{
- int i;
-
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ int i, j, k, err;
- niu_reset_one_rx_channel(np, rp);
- }
-}
+ if (np->rx_rings) {
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
-static void niu_disable_ipp(struct niu *np)
-{
- u64 rd, wr, val;
- int limit;
+ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
+ struct page *page;
+
+ page = rp->rxhash[j];
+ while (page) {
+ struct page *next =
+ (struct page *) page->mapping;
+ u64 base = page->index;
+ base = base >> RBR_DESCR_ADDR_SHIFT;
+ rp->rbr[k++] = cpu_to_le32(base);
+ page = next;
+ }
+ }
+ for (; k < MAX_RBR_RING_SIZE; k++) {
+ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
+ if (unlikely(err))
+ break;
+ }
- rd = nr64_ipp(IPP_DFIFO_RD_PTR);
- wr = nr64_ipp(IPP_DFIFO_WR_PTR);
- limit = 100;
- while (--limit >= 0 && (rd != wr)) {
- rd = nr64_ipp(IPP_DFIFO_RD_PTR);
- wr = nr64_ipp(IPP_DFIFO_WR_PTR);
- }
- if (limit < 0 &&
- (rd != 0 && wr != 1)) {
- dev_err(np->device, PFX "%s: IPP would not quiesce, "
- "rd_ptr[%llx] wr_ptr[%llx]\n",
- np->dev->name,
- (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
- (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ rp->rbr_index = rp->rbr_table_size - 1;
+ rp->rcr_index = 0;
+ rp->rbr_pending = 0;
+ rp->rbr_refill_pending = 0;
+ }
}
+ if (np->tx_rings) {
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
- val = nr64_ipp(IPP_CFIG);
- val &= ~(IPP_CFIG_IPP_ENABLE |
- IPP_CFIG_DFIFO_ECC_EN |
- IPP_CFIG_DROP_BAD_CRC |
- IPP_CFIG_CKSUM_EN);
- nw64_ipp(IPP_CFIG, val);
+ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
+ if (rp->tx_buffs[j].skb)
+ (void) release_tx_packet(np, rp, j);
+ }
- (void) niu_ipp_reset(np);
+ rp->pending = MAX_TX_RING_SIZE;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+ }
+ }
}
-static int niu_init_hw(struct niu *np)
+static void niu_reset_task(struct work_struct *work)
{
- int i, err;
-
- niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
- niu_txc_enable_port(np, 1);
- niu_txc_port_dma_enable(np, 1);
- niu_txc_set_imask(np, 0);
-
- niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ struct niu *np = container_of(work, struct niu, reset_task);
+ unsigned long flags;
+ int err;
- err = niu_init_one_tx_channel(np, rp);
- if (err)
- return err;
+ spin_lock_irqsave(&np->lock, flags);
+ if (!netif_running(np->dev)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ return;
}
- niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
- err = niu_init_rx_channels(np);
- if (err)
- goto out_uninit_tx_channels;
+ spin_unlock_irqrestore(&np->lock, flags);
- niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
- err = niu_init_classifier_hw(np);
- if (err)
- goto out_uninit_rx_channels;
+ del_timer_sync(&np->timer);
- niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
- err = niu_init_zcp(np);
- if (err)
- goto out_uninit_rx_channels;
+ niu_netif_stop(np);
- niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
- err = niu_init_ipp(np);
- if (err)
- goto out_uninit_rx_channels;
+ spin_lock_irqsave(&np->lock, flags);
- niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
- err = niu_init_mac(np);
- if (err)
- goto out_uninit_ipp;
+ niu_stop_hw(np);
- return 0;
+ spin_unlock_irqrestore(&np->lock, flags);
-out_uninit_ipp:
- niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
- niu_disable_ipp(np);
+ niu_reset_buffers(np);
-out_uninit_rx_channels:
- niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
- niu_stop_rx_channels(np);
- niu_reset_rx_channels(np);
+ spin_lock_irqsave(&np->lock, flags);
-out_uninit_tx_channels:
- niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
- niu_stop_tx_channels(np);
- niu_reset_tx_channels(np);
+ err = niu_init_hw(np);
+ if (!err) {
+ np->timer.expires = jiffies + HZ;
+ add_timer(&np->timer);
+ niu_netif_start(np);
+ }
- return err;
+ spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_stop_hw(struct niu *np)
+static void niu_tx_timeout(struct net_device *dev)
{
- niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
- niu_enable_interrupts(np, 0);
-
- niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
- niu_enable_rx_mac(np, 0);
-
- niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
- niu_disable_ipp(np);
+ struct niu *np = netdev_priv(dev);
- niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
- niu_stop_tx_channels(np);
+ dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
+ dev->name);
- niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
- niu_stop_rx_channels(np);
+ schedule_work(&np->reset_task);
+}
- niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
- niu_reset_tx_channels(np);
+static void niu_set_txd(struct tx_ring_info *rp, int index,
+ u64 mapping, u64 len, u64 mark,
+ u64 n_frags)
+{
+ __le64 *desc = &rp->descr[index];
- niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
- niu_reset_rx_channels(np);
+ *desc = cpu_to_le64(mark |
+ (n_frags << TX_DESC_NUM_PTR_SHIFT) |
+ (len << TX_DESC_TR_LEN_SHIFT) |
+ (mapping & TX_DESC_SAD));
}
-static int niu_request_irq(struct niu *np)
+static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
+ u64 pad_bytes, u64 len)
{
- int i, j, err;
+ u16 eth_proto, eth_proto_inner;
+ u64 csum_bits, l3off, ihl, ret;
+ u8 ip_proto;
+ int ipv6;
- err = 0;
- for (i = 0; i < np->num_ldg; i++) {
- struct niu_ldg *lp = &np->ldg[i];
+ eth_proto = be16_to_cpu(ehdr->h_proto);
+ eth_proto_inner = eth_proto;
+ if (eth_proto == ETH_P_8021Q) {
+ struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
+ __be16 val = vp->h_vlan_encapsulated_proto;
+
+ eth_proto_inner = be16_to_cpu(val);
+ }
+
+ ipv6 = ihl = 0;
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ ip_proto = ip_hdr(skb)->protocol;
+ ihl = ip_hdr(skb)->ihl;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ ihl = (40 >> 2);
+ ipv6 = 1;
+ break;
+ default:
+ ip_proto = ihl = 0;
+ break;
+ }
+
+ csum_bits = TXHDR_CSUM_NONE;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u64 start, stuff;
+
+ csum_bits = (ip_proto == IPPROTO_TCP ?
+ TXHDR_CSUM_TCP :
+ (ip_proto == IPPROTO_UDP ?
+ TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
- err = request_irq(lp->irq, niu_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- np->dev->name, lp);
- if (err)
- goto out_free_irqs;
+ start = skb_transport_offset(skb) -
+ (pad_bytes + sizeof(struct tx_pkt_hdr));
+ stuff = start + skb->csum_offset;
+ csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
+ csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
}
- return 0;
+ l3off = skb_network_offset(skb) -
+ (pad_bytes + sizeof(struct tx_pkt_hdr));
-out_free_irqs:
- for (j = 0; j < i; j++) {
- struct niu_ldg *lp = &np->ldg[j];
+ ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
+ (len << TXHDR_LEN_SHIFT) |
+ ((l3off / 2) << TXHDR_L3START_SHIFT) |
+ (ihl << TXHDR_IHL_SHIFT) |
+ ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
+ ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
+ (ipv6 ? TXHDR_IP_VER : 0) |
+ csum_bits);
- free_irq(lp->irq, lp);
- }
- return err;
+ return ret;
}
-static void niu_free_irq(struct niu *np)
+static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int i;
+ struct niu *np = netdev_priv(dev);
+ unsigned long align, headroom;
+ struct netdev_queue *txq;
+ struct tx_ring_info *rp;
+ struct tx_pkt_hdr *tp;
+ unsigned int len, nfg;
+ struct ethhdr *ehdr;
+ int prod, i, tlen;
+ u64 mapping, mrk;
- for (i = 0; i < np->num_ldg; i++) {
- struct niu_ldg *lp = &np->ldg[i];
+ i = skb_get_queue_mapping(skb);
+ rp = &np->tx_rings[i];
+ txq = netdev_get_tx_queue(dev, i);
- free_irq(lp->irq, lp);
+ if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_tx_stop_queue(txq);
+ dev_err(np->device, PFX "%s: BUG! Tx ring full when "
+ "queue awake!\n", dev->name);
+ rp->tx_errors++;
+ return NETDEV_TX_BUSY;
}
-}
-
-static void niu_enable_napi(struct niu *np)
-{
- int i;
-
- for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
-}
-static void niu_disable_napi(struct niu *np)
-{
- int i;
+ if (skb->len < ETH_ZLEN) {
+ unsigned int pad_bytes = ETH_ZLEN - skb->len;
- for (i = 0; i < np->num_ldg; i++)
- napi_disable(&np->ldg[i].napi);
-}
+ if (skb_pad(skb, pad_bytes))
+ goto out;
+ skb_put(skb, pad_bytes);
+ }
-static int niu_open(struct net_device *dev)
-{
- struct niu *np = netdev_priv(dev);
- int err;
+ len = sizeof(struct tx_pkt_hdr) + 15;
+ if (skb_headroom(skb) < len) {
+ struct sk_buff *skb_new;
- netif_carrier_off(dev);
+ skb_new = skb_realloc_headroom(skb, len);
+ if (!skb_new) {
+ rp->tx_errors++;
+ goto out_drop;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ } else
+ skb_orphan(skb);
- err = niu_alloc_channels(np);
- if (err)
- goto out_err;
+ align = ((unsigned long) skb->data & (16 - 1));
+ headroom = align + sizeof(struct tx_pkt_hdr);
- err = niu_enable_interrupts(np, 0);
- if (err)
- goto out_free_channels;
+ ehdr = (struct ethhdr *) skb->data;
+ tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
- err = niu_request_irq(np);
- if (err)
- goto out_free_channels;
+ len = skb->len - sizeof(struct tx_pkt_hdr);
+ tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
+ tp->resv = 0;
- niu_enable_napi(np);
+ len = skb_headlen(skb);
+ mapping = np->ops->map_single(np->device, skb->data,
+ len, DMA_TO_DEVICE);
- spin_lock_irq(&np->lock);
+ prod = rp->prod;
- err = niu_init_hw(np);
- if (!err) {
- init_timer(&np->timer);
- np->timer.expires = jiffies + HZ;
- np->timer.data = (unsigned long) np;
- np->timer.function = niu_timer;
+ rp->tx_buffs[prod].skb = skb;
+ rp->tx_buffs[prod].mapping = mapping;
- err = niu_enable_interrupts(np, 1);
- if (err)
- niu_stop_hw(np);
+ mrk = TX_DESC_SOP;
+ if (++rp->mark_counter == rp->mark_freq) {
+ rp->mark_counter = 0;
+ mrk |= TX_DESC_MARK;
+ rp->mark_pending++;
}
- spin_unlock_irq(&np->lock);
-
- if (err) {
- niu_disable_napi(np);
- goto out_free_irq;
+ tlen = len;
+ nfg = skb_shinfo(skb)->nr_frags;
+ while (tlen > 0) {
+ tlen -= MAX_TX_DESC_LEN;
+ nfg++;
}
- netif_start_queue(dev);
+ while (len > 0) {
+ unsigned int this_len = len;
- if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
- netif_carrier_on(dev);
+ if (this_len > MAX_TX_DESC_LEN)
+ this_len = MAX_TX_DESC_LEN;
- add_timer(&np->timer);
+ niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
+ mrk = nfg = 0;
- return 0;
+ prod = NEXT_TX(rp, prod);
+ mapping += this_len;
+ len -= this_len;
+ }
-out_free_irq:
- niu_free_irq(np);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-out_free_channels:
- niu_free_channels(np);
+ len = frag->size;
+ mapping = np->ops->map_page(np->device, frag->page,
+ frag->page_offset, len,
+ DMA_TO_DEVICE);
-out_err:
- return err;
-}
+ rp->tx_buffs[prod].skb = NULL;
+ rp->tx_buffs[prod].mapping = mapping;
-static void niu_full_shutdown(struct niu *np, struct net_device *dev)
-{
- cancel_work_sync(&np->reset_task);
+ niu_set_txd(rp, prod, mapping, len, 0, 0);
- niu_disable_napi(np);
- netif_stop_queue(dev);
+ prod = NEXT_TX(rp, prod);
+ }
- del_timer_sync(&np->timer);
+ if (prod < rp->prod)
+ rp->wrap_bit ^= TX_RING_KICK_WRAP;
+ rp->prod = prod;
- spin_lock_irq(&np->lock);
+ nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
- niu_stop_hw(np);
+ if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(txq);
+ if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
+ netif_tx_wake_queue(txq);
+ }
- spin_unlock_irq(&np->lock);
+out:
+ return NETDEV_TX_OK;
+
+out_drop:
+ rp->tx_errors++;
+ kfree_skb(skb);
+ goto out;
}
-static int niu_close(struct net_device *dev)
+static int niu_change_mtu(struct net_device *dev, int new_mtu)
{
struct niu *np = netdev_priv(dev);
+ int err, orig_jumbo, new_jumbo;
- niu_full_shutdown(np, dev);
-
- niu_free_irq(np);
-
- niu_free_channels(np);
-
- niu_handle_led(np, 0);
-
- return 0;
-}
-
-static void niu_sync_xmac_stats(struct niu *np)
-{
- struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+ if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
+ return -EINVAL;
- mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
- mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
+ orig_jumbo = (dev->mtu > ETH_DATA_LEN);
+ new_jumbo = (new_mtu > ETH_DATA_LEN);
- mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
- mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
- mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
- mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
- mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
- mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
- mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
- mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
- mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
- mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
- mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
- mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
- mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
- mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
- mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
- mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
-}
+ dev->mtu = new_mtu;
-static void niu_sync_bmac_stats(struct niu *np)
-{
- struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+ if (!netif_running(dev) ||
+ (orig_jumbo == new_jumbo))
+ return 0;
- mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
- mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
+ niu_full_shutdown(np, dev);
- mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
- mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
- mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
- mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
-}
+ niu_free_channels(np);
-static void niu_sync_mac_stats(struct niu *np)
-{
- if (np->flags & NIU_FLAGS_XMAC)
- niu_sync_xmac_stats(np);
- else
- niu_sync_bmac_stats(np);
-}
+ niu_enable_napi(np);
-static void niu_get_rx_stats(struct niu *np)
-{
- unsigned long pkts, dropped, errors, bytes;
- int i;
+ err = niu_alloc_channels(np);
+ if (err)
+ return err;
- pkts = dropped = errors = bytes = 0;
- for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ spin_lock_irq(&np->lock);
- pkts += rp->rx_packets;
- bytes += rp->rx_bytes;
- dropped += rp->rx_dropped;
- errors += rp->rx_errors;
+ err = niu_init_hw(np);
+ if (!err) {
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + HZ;
+ np->timer.data = (unsigned long) np;
+ np->timer.function = niu_timer;
+
+ err = niu_enable_interrupts(np, 1);
+ if (err)
+ niu_stop_hw(np);
}
- np->net_stats.rx_packets = pkts;
- np->net_stats.rx_bytes = bytes;
- np->net_stats.rx_dropped = dropped;
- np->net_stats.rx_errors = errors;
-}
-static void niu_get_tx_stats(struct niu *np)
-{
- unsigned long pkts, errors, bytes;
- int i;
+ spin_unlock_irq(&np->lock);
- pkts = errors = bytes = 0;
- for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ if (!err) {
+ netif_tx_start_all_queues(dev);
+ if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+ netif_carrier_on(dev);
- pkts += rp->tx_packets;
- bytes += rp->tx_bytes;
- errors += rp->tx_errors;
+ add_timer(&np->timer);
}
- np->net_stats.tx_packets = pkts;
- np->net_stats.tx_bytes = bytes;
- np->net_stats.tx_errors = errors;
+
+ return err;
}
-static struct net_device_stats *niu_get_stats(struct net_device *dev)
+static void niu_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct niu *np = netdev_priv(dev);
+ struct niu_vpd *vpd = &np->vpd;
- niu_get_rx_stats(np);
- niu_get_tx_stats(np);
-
- return &np->net_stats;
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ sprintf(info->fw_version, "%d.%d",
+ vpd->fcode_major, vpd->fcode_minor);
+ if (np->parent->plat_type != PLAT_TYPE_NIU)
+ strcpy(info->bus_info, pci_name(np->pdev));
}
-static void niu_load_hash_xmac(struct niu *np, u16 *hash)
+static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- int i;
+ struct niu *np = netdev_priv(dev);
+ struct niu_link_config *lp;
- for (i = 0; i < 16; i++)
- nw64_mac(XMAC_HASH_TBL(i), hash[i]);
+ lp = &np->link_config;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->phy_address = np->phy_addr;
+ cmd->supported = lp->supported;
+ cmd->advertising = lp->active_advertising;
+ cmd->autoneg = lp->active_autoneg;
+ cmd->speed = lp->active_speed;
+ cmd->duplex = lp->active_duplex;
+ cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
+ cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ return 0;
}
-static void niu_load_hash_bmac(struct niu *np, u16 *hash)
+static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- int i;
+ struct niu *np = netdev_priv(dev);
+ struct niu_link_config *lp = &np->link_config;
- for (i = 0; i < 16; i++)
- nw64_mac(BMAC_HASH_TBL(i), hash[i]);
+ lp->advertising = cmd->advertising;
+ lp->speed = cmd->speed;
+ lp->duplex = cmd->duplex;
+ lp->autoneg = cmd->autoneg;
+ return niu_init_link(np);
}
-static void niu_load_hash(struct niu *np, u16 *hash)
+static u32 niu_get_msglevel(struct net_device *dev)
{
- if (np->flags & NIU_FLAGS_XMAC)
- niu_load_hash_xmac(np, hash);
- else
- niu_load_hash_bmac(np, hash);
+ struct niu *np = netdev_priv(dev);
+ return np->msg_enable;
}
-static void niu_set_rx_mode(struct net_device *dev)
+static void niu_set_msglevel(struct net_device *dev, u32 value)
{
struct niu *np = netdev_priv(dev);
- int i, alt_cnt, err;
- struct dev_addr_list *addr;
- unsigned long flags;
- u16 hash[16] = { 0, };
-
- spin_lock_irqsave(&np->lock, flags);
- niu_enable_rx_mac(np, 0);
-
- np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
- if (dev->flags & IFF_PROMISC)
- np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
- np->flags |= NIU_FLAGS_MCAST;
-
- alt_cnt = dev->uc_count;
- if (alt_cnt > niu_num_alt_addr(np)) {
- alt_cnt = 0;
- np->flags |= NIU_FLAGS_PROMISC;
- }
-
- if (alt_cnt) {
- int index = 0;
-
- for (addr = dev->uc_list; addr; addr = addr->next) {
- err = niu_set_alt_mac(np, index,
- addr->da_addr);
- if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "adding alt mac %d\n",
- dev->name, err, index);
- err = niu_enable_alt_mac(np, index, 1);
- if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "enabling alt mac %d\n",
- dev->name, err, index);
-
- index++;
- }
- } else {
- for (i = 0; i < niu_num_alt_addr(np); i++) {
- err = niu_enable_alt_mac(np, i, 0);
- if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "disabling alt mac %d\n",
- dev->name, err, i);
- }
- }
- if (dev->flags & IFF_ALLMULTI) {
- for (i = 0; i < 16; i++)
- hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
- for (addr = dev->mc_list; addr; addr = addr->next) {
- u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
-
- crc >>= 24;
- hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
- }
- }
-
- if (np->flags & NIU_FLAGS_MCAST)
- niu_load_hash(np, hash);
-
- niu_enable_rx_mac(np, 1);
- spin_unlock_irqrestore(&np->lock, flags);
+ np->msg_enable = value;
}
-static int niu_set_mac_addr(struct net_device *dev, void *p)
+static int niu_nway_reset(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- struct sockaddr *addr = p;
- unsigned long flags;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-
- if (!netif_running(dev))
- return 0;
- spin_lock_irqsave(&np->lock, flags);
- niu_enable_rx_mac(np, 0);
- niu_set_primary_mac(np, dev->dev_addr);
- niu_enable_rx_mac(np, 1);
- spin_unlock_irqrestore(&np->lock, flags);
+ if (np->link_config.autoneg)
+ return niu_init_link(np);
return 0;
}
-static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int niu_get_eeprom_len(struct net_device *dev)
{
- return -EOPNOTSUPP;
+ struct niu *np = netdev_priv(dev);
+
+ return np->eeprom_len;
}
-static void niu_netif_stop(struct niu *np)
+static int niu_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- np->dev->trans_start = jiffies; /* prevent tx timeout */
+ struct niu *np = netdev_priv(dev);
+ u32 offset, len, val;
- niu_disable_napi(np);
+ offset = eeprom->offset;
+ len = eeprom->len;
- netif_tx_disable(np->dev);
-}
+ if (offset + len < offset)
+ return -EINVAL;
+ if (offset >= np->eeprom_len)
+ return -EINVAL;
+ if (offset + len > np->eeprom_len)
+ len = eeprom->len = np->eeprom_len - offset;
-static void niu_netif_start(struct niu *np)
-{
- /* NOTE: unconditional netif_wake_queue is only appropriate
- * so long as all callers are assured to have free tx slots
- * (such as after niu_init_hw).
- */
- netif_wake_queue(np->dev);
+ if (offset & 3) {
+ u32 b_offset, b_count;
- niu_enable_napi(np);
+ b_offset = offset & 3;
+ b_count = 4 - b_offset;
+ if (b_count > len)
+ b_count = len;
- niu_enable_interrupts(np, 1);
+ val = nr64(ESPC_NCR((offset - b_offset) / 4));
+ memcpy(data, ((char *)&val) + b_offset, b_count);
+ data += b_count;
+ len -= b_count;
+ offset += b_count;
+ }
+ while (len >= 4) {
+ val = nr64(ESPC_NCR(offset / 4));
+ memcpy(data, &val, 4);
+ data += 4;
+ len -= 4;
+ offset += 4;
+ }
+ if (len) {
+ val = nr64(ESPC_NCR(offset / 4));
+ memcpy(data, &val, len);
+ }
+ return 0;
}
-static void niu_reset_task(struct work_struct *work)
+static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
{
- struct niu *np = container_of(work, struct niu, reset_task);
- unsigned long flags;
- int err;
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ *pid = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ *pid = IPPROTO_UDP;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ *pid = IPPROTO_SCTP;
+ break;
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ *pid = IPPROTO_AH;
+ break;
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ *pid = IPPROTO_ESP;
+ break;
+ default:
+ *pid = 0;
+ break;
+ }
+}
- spin_lock_irqsave(&np->lock, flags);
- if (!netif_running(np->dev)) {
- spin_unlock_irqrestore(&np->lock, flags);
- return;
+static int niu_class_to_ethflow(u64 class, int *flow_type)
+{
+ switch (class) {
+ case CLASS_CODE_TCP_IPV4:
+ *flow_type = TCP_V4_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV4:
+ *flow_type = UDP_V4_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV4:
+ *flow_type = AH_V4_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV4:
+ *flow_type = SCTP_V4_FLOW;
+ break;
+ case CLASS_CODE_TCP_IPV6:
+ *flow_type = TCP_V6_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV6:
+ *flow_type = UDP_V6_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV6:
+ *flow_type = AH_V6_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV6:
+ *flow_type = SCTP_V6_FLOW;
+ break;
+ case CLASS_CODE_USER_PROG1:
+ case CLASS_CODE_USER_PROG2:
+ case CLASS_CODE_USER_PROG3:
+ case CLASS_CODE_USER_PROG4:
+ *flow_type = IP_USER_FLOW;
+ break;
+ default:
+ return 0;
}
- spin_unlock_irqrestore(&np->lock, flags);
+ return 1;
+}
- del_timer_sync(&np->timer);
+static int niu_ethflow_to_class(int flow_type, u64 *class)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *class = CLASS_CODE_TCP_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ *class = CLASS_CODE_UDP_IPV4;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ *class = CLASS_CODE_AH_ESP_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *class = CLASS_CODE_SCTP_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ *class = CLASS_CODE_TCP_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ *class = CLASS_CODE_UDP_IPV6;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ *class = CLASS_CODE_AH_ESP_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ *class = CLASS_CODE_SCTP_IPV6;
+ break;
+ default:
+ return 0;
+ }
- niu_netif_stop(np);
+ return 1;
+}
- spin_lock_irqsave(&np->lock, flags);
+static u64 niu_flowkey_to_ethflow(u64 flow_key)
+{
+ u64 ethflow = 0;
- niu_stop_hw(np);
+ if (flow_key & FLOW_KEY_L2DA)
+ ethflow |= RXH_L2DA;
+ if (flow_key & FLOW_KEY_VLAN)
+ ethflow |= RXH_VLAN;
+ if (flow_key & FLOW_KEY_IPSA)
+ ethflow |= RXH_IP_SRC;
+ if (flow_key & FLOW_KEY_IPDA)
+ ethflow |= RXH_IP_DST;
+ if (flow_key & FLOW_KEY_PROTO)
+ ethflow |= RXH_L3_PROTO;
+ if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
+ ethflow |= RXH_L4_B_0_1;
+ if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
+ ethflow |= RXH_L4_B_2_3;
- err = niu_init_hw(np);
- if (!err) {
- np->timer.expires = jiffies + HZ;
- add_timer(&np->timer);
- niu_netif_start(np);
- }
+ return ethflow;
- spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_tx_timeout(struct net_device *dev)
+static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
{
- struct niu *np = netdev_priv(dev);
+ u64 key = 0;
- dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
- dev->name);
+ if (ethflow & RXH_L2DA)
+ key |= FLOW_KEY_L2DA;
+ if (ethflow & RXH_VLAN)
+ key |= FLOW_KEY_VLAN;
+ if (ethflow & RXH_IP_SRC)
+ key |= FLOW_KEY_IPSA;
+ if (ethflow & RXH_IP_DST)
+ key |= FLOW_KEY_IPDA;
+ if (ethflow & RXH_L3_PROTO)
+ key |= FLOW_KEY_PROTO;
+ if (ethflow & RXH_L4_B_0_1)
+ key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
+ if (ethflow & RXH_L4_B_2_3)
+ key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
- schedule_work(&np->reset_task);
-}
+ *flow_key = key;
-static void niu_set_txd(struct tx_ring_info *rp, int index,
- u64 mapping, u64 len, u64 mark,
- u64 n_frags)
-{
- __le64 *desc = &rp->descr[index];
+ return 1;
- *desc = cpu_to_le64(mark |
- (n_frags << TX_DESC_NUM_PTR_SHIFT) |
- (len << TX_DESC_TR_LEN_SHIFT) |
- (mapping & TX_DESC_SAD));
}
-static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
- u64 pad_bytes, u64 len)
+static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
{
- u16 eth_proto, eth_proto_inner;
- u64 csum_bits, l3off, ihl, ret;
- u8 ip_proto;
- int ipv6;
+ u64 class;
- eth_proto = be16_to_cpu(ehdr->h_proto);
- eth_proto_inner = eth_proto;
- if (eth_proto == ETH_P_8021Q) {
- struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
- __be16 val = vp->h_vlan_encapsulated_proto;
+ nfc->data = 0;
- eth_proto_inner = be16_to_cpu(val);
- }
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
+ return -EINVAL;
- ipv6 = ihl = 0;
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- ip_proto = ip_hdr(skb)->protocol;
- ihl = ip_hdr(skb)->ihl;
+ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+ TCAM_KEY_DISC)
+ nfc->data = RXH_DISCARD;
+ else
+ nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
+ CLASS_CODE_USER_PROG1]);
+ return 0;
+}
+
+static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
+ struct ethtool_rx_flow_spec *fsp)
+{
+
+ fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >>
+ TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >>
+ TCAM_V4KEY3_DADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >>
+ TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >>
+ TCAM_V4KEY3_DADDR_SHIFT;
+
+ fsp->h_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src);
+ fsp->m_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src);
+ fsp->h_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst);
+ fsp->m_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+ fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+ fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.psrc =
+ ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->h_u.tcp_ip4_spec.pdst =
+ ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+ fsp->m_u.tcp_ip4_spec.psrc =
+ ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->m_u.tcp_ip4_spec.pdst =
+ ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst);
break;
- case __constant_htons(ETH_P_IPV6):
- ip_proto = ipv6_hdr(skb)->nexthdr;
- ihl = (40 >> 2);
- ipv6 = 1;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.spi =
+ (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.ah_ip4_spec.spi =
+ (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+
+ fsp->h_u.ah_ip4_spec.spi =
+ cpu_to_be32(fsp->h_u.ah_ip4_spec.spi);
+ fsp->m_u.ah_ip4_spec.spi =
+ cpu_to_be32(fsp->m_u.ah_ip4_spec.spi);
+ break;
+ case IP_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.l4_4_bytes =
+ (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes =
+ (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+
+ fsp->h_u.usr_ip4_spec.l4_4_bytes =
+ cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+ fsp->m_u.usr_ip4_spec.l4_4_bytes =
+ cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+
+ fsp->h_u.usr_ip4_spec.proto =
+ (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ fsp->m_u.usr_ip4_spec.proto =
+ (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
break;
default:
- ip_proto = ihl = 0;
break;
}
-
- csum_bits = TXHDR_CSUM_NONE;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u64 start, stuff;
-
- csum_bits = (ip_proto == IPPROTO_TCP ?
- TXHDR_CSUM_TCP :
- (ip_proto == IPPROTO_UDP ?
- TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
-
- start = skb_transport_offset(skb) -
- (pad_bytes + sizeof(struct tx_pkt_hdr));
- stuff = start + skb->csum_offset;
-
- csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
- csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
- }
-
- l3off = skb_network_offset(skb) -
- (pad_bytes + sizeof(struct tx_pkt_hdr));
-
- ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
- (len << TXHDR_LEN_SHIFT) |
- ((l3off / 2) << TXHDR_L3START_SHIFT) |
- (ihl << TXHDR_IHL_SHIFT) |
- ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
- ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
- (ipv6 ? TXHDR_IP_VER : 0) |
- csum_bits);
-
- return ret;
-}
-
-static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb)
-{
- return &np->tx_rings[0];
}
-static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int niu_get_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
{
- struct niu *np = netdev_priv(dev);
- unsigned long align, headroom;
- struct tx_ring_info *rp;
- struct tx_pkt_hdr *tp;
- unsigned int len, nfg;
- struct ethhdr *ehdr;
- int prod, i, tlen;
- u64 mapping, mrk;
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ u16 idx;
+ u64 class;
+ int ret = 0;
- rp = tx_ring_select(np, skb);
+ idx = tcam_get_index(np, (u16)nfc->fs.location);
- if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
- netif_stop_queue(dev);
- dev_err(np->device, PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", dev->name);
- rp->tx_errors++;
- return NETDEV_TX_BUSY;
+ tp = &parent->tcam[idx];
+ if (!tp->valid) {
+ pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
+ parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ return -EINVAL;
}
- if (skb->len < ETH_ZLEN) {
- unsigned int pad_bytes = ETH_ZLEN - skb->len;
+ /* fill the flow spec entry */
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ ret = niu_class_to_ethflow(class, &fsp->flow_type);
- if (skb_pad(skb, pad_bytes))
- goto out;
- skb_put(skb, pad_bytes);
+ if (ret < 0) {
+ pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
+ parent->index, np->dev->name);
+ ret = -EINVAL;
+ goto out;
}
- len = sizeof(struct tx_pkt_hdr) + 15;
- if (skb_headroom(skb) < len) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, len);
- if (!skb_new) {
- rp->tx_errors++;
- goto out_drop;
+ if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
+ u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ if (proto == IPPROTO_ESP) {
+ if (fsp->flow_type == AH_V4_FLOW)
+ fsp->flow_type = ESP_V4_FLOW;
+ else
+ fsp->flow_type = ESP_V6_FLOW;
}
- kfree_skb(skb);
- skb = skb_new;
- } else
- skb_orphan(skb);
+ }
- align = ((unsigned long) skb->data & (16 - 1));
- headroom = align + sizeof(struct tx_pkt_hdr);
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ ret = -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- ehdr = (struct ethhdr *) skb->data;
- tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
+ if (ret < 0)
+ goto out;
- len = skb->len - sizeof(struct tx_pkt_hdr);
- tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
- tp->resv = 0;
+ if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
+ TCAM_ASSOCDATA_OFFSET_SHIFT;
- len = skb_headlen(skb);
- mapping = np->ops->map_single(np->device, skb->data,
- len, DMA_TO_DEVICE);
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
+out:
+ return ret;
+}
- prod = rp->prod;
+static int niu_get_ethtool_tcam_all(struct niu *np,
+ struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ int i, idx, cnt;
+ u16 n_entries;
+ unsigned long flags;
- rp->tx_buffs[prod].skb = skb;
- rp->tx_buffs[prod].mapping = mapping;
- mrk = TX_DESC_SOP;
- if (++rp->mark_counter == rp->mark_freq) {
- rp->mark_counter = 0;
- mrk |= TX_DESC_MARK;
- rp->mark_pending++;
- }
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
- tlen = len;
- nfg = skb_shinfo(skb)->nr_frags;
- while (tlen > 0) {
- tlen -= MAX_TX_DESC_LEN;
- nfg++;
+ niu_lock_parent(np, flags);
+ n_entries = nfc->rule_cnt;
+ for (cnt = 0, i = 0; i < nfc->data; i++) {
+ idx = tcam_get_index(np, i);
+ tp = &parent->tcam[idx];
+ if (!tp->valid)
+ continue;
+ rule_locs[cnt] = i;
+ cnt++;
}
+ niu_unlock_parent(np, flags);
- while (len > 0) {
- unsigned int this_len = len;
+ if (n_entries != cnt) {
+ /* print warning, this should not happen */
+ pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
+ "n_entries[%d] != cnt[%d]!!!\n\n",
+ np->parent->index, np->dev->name, n_entries, cnt);
+ }
- if (this_len > MAX_TX_DESC_LEN)
- this_len = MAX_TX_DESC_LEN;
+ return 0;
+}
- niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
- mrk = nfg = 0;
+static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ void *rule_locs)
+{
+ struct niu *np = netdev_priv(dev);
+ int ret = 0;
- prod = NEXT_TX(rp, prod);
- mapping += this_len;
- len -= this_len;
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = niu_get_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_GRXRINGS:
+ cmd->data = np->num_rx_rings;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = niu_get_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ return ret;
+}
- len = frag->size;
- mapping = np->ops->map_page(np->device, frag->page,
- frag->page_offset, len,
- DMA_TO_DEVICE);
+static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
+ u64 class;
+ u64 flow_key = 0;
+ unsigned long flags;
- rp->tx_buffs[prod].skb = NULL;
- rp->tx_buffs[prod].mapping = mapping;
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
+ return -EINVAL;
- niu_set_txd(rp, prod, mapping, len, 0, 0);
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
- prod = NEXT_TX(rp, prod);
+ if (nfc->data & RXH_DISCARD) {
+ niu_lock_parent(np, flags);
+ flow_key = np->parent->tcam_key[class -
+ CLASS_CODE_USER_PROG1];
+ flow_key |= TCAM_KEY_DISC;
+ nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+ np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+ niu_unlock_parent(np, flags);
+ return 0;
+ } else {
+ /* Discard was set before, but is not set now */
+ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+ TCAM_KEY_DISC) {
+ niu_lock_parent(np, flags);
+ flow_key = np->parent->tcam_key[class -
+ CLASS_CODE_USER_PROG1];
+ flow_key &= ~TCAM_KEY_DISC;
+ nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
+ flow_key);
+ np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
+ flow_key;
+ niu_unlock_parent(np, flags);
+ }
}
- if (prod < rp->prod)
- rp->wrap_bit ^= TX_RING_KICK_WRAP;
- rp->prod = prod;
+ if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
+ return -EINVAL;
- nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
+ niu_lock_parent(np, flags);
+ nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+ np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+ niu_unlock_parent(np, flags);
- if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
- netif_stop_queue(dev);
- if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
- netif_wake_queue(dev);
- }
+ return 0;
+}
- dev->trans_start = jiffies;
+static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
+ struct niu_tcam_entry *tp,
+ int l2_rdc_tab, u64 class)
+{
+ u8 pid = 0;
+ u32 sip, dip, sipm, dipm, spi, spim;
+ u16 sport, dport, spm, dpm;
+
+ sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
+ sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
+ dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
+ dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+ tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
+ tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
+ tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
+
+ tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key[3] |= dip;
+
+ tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key_mask[3] |= dipm;
+
+ tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+
+ tp->key[2] |= (((u64)sport << 16) | dport);
+ tp->key_mask[2] |= (((u64)spm << 16) | dpm);
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
+ spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
+
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case IP_USER_FLOW:
+ spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+ spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
-out:
- return NETDEV_TX_OK;
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ pid = fsp->h_u.usr_ip4_spec.proto;
+ break;
+ default:
+ break;
+ }
-out_drop:
- rp->tx_errors++;
- kfree_skb(skb);
- goto out;
+ tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
+ if (pid) {
+ tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
+ }
}
-static int niu_change_mtu(struct net_device *dev, int new_mtu)
+static int niu_add_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
{
- struct niu *np = netdev_priv(dev);
- int err, orig_jumbo, new_jumbo;
-
- if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
- return -EINVAL;
-
- orig_jumbo = (dev->mtu > ETH_DATA_LEN);
- new_jumbo = (new_mtu > ETH_DATA_LEN);
-
- dev->mtu = new_mtu;
-
- if (!netif_running(dev) ||
- (orig_jumbo == new_jumbo))
- return 0;
-
- niu_full_shutdown(np, dev);
-
- niu_free_channels(np);
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
+ int l2_rdc_table = rdc_table->first_table_num;
+ u16 idx;
+ u64 class;
+ unsigned long flags;
+ int err, ret;
- niu_enable_napi(np);
+ ret = 0;
- err = niu_alloc_channels(np);
- if (err)
- return err;
+ idx = nfc->fs.location;
+ if (idx >= tcam_get_size(np))
+ return -EINVAL;
- spin_lock_irq(&np->lock);
+ if (fsp->flow_type == IP_USER_FLOW) {
+ int i;
+ int add_usr_cls = 0;
+ int ipv6 = 0;
+ struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+
+ niu_lock_parent(np, flags);
+
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i]) {
+ if (uspec->proto == parent->l3_cls_pid[i]) {
+ class = parent->l3_cls[i];
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ } else {
+ /* Program new user IP class */
+ switch (i) {
+ case 0:
+ class = CLASS_CODE_USER_PROG1;
+ break;
+ case 1:
+ class = CLASS_CODE_USER_PROG2;
+ break;
+ case 2:
+ class = CLASS_CODE_USER_PROG3;
+ break;
+ case 3:
+ class = CLASS_CODE_USER_PROG4;
+ break;
+ default:
+ break;
+ }
+ if (uspec->ip_ver == ETH_RX_NFC_IP6)
+ ipv6 = 1;
+ ret = tcam_user_ip_class_set(np, class, ipv6,
+ uspec->proto,
+ uspec->tos,
+ umask->tos);
+ if (ret)
+ goto out;
+
+ ret = tcam_user_ip_class_enable(np, class, 1);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = class;
+ parent->l3_cls_pid[i] = uspec->proto;
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ }
+ if (!add_usr_cls) {
+ pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
+ "Could not find/insert class for pid %d\n",
+ parent->index, np->dev->name, uspec->proto);
+ ret = -EINVAL;
+ goto out;
+ }
+ niu_unlock_parent(np, flags);
+ } else {
+ if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
+ return -EINVAL;
+ }
+ }
- err = niu_init_hw(np);
- if (!err) {
- init_timer(&np->timer);
- np->timer.expires = jiffies + HZ;
- np->timer.data = (unsigned long) np;
- np->timer.function = niu_timer;
+ niu_lock_parent(np, flags);
- err = niu_enable_interrupts(np, 1);
- if (err)
- niu_stop_hw(np);
- }
+ idx = tcam_get_index(np, idx);
+ tp = &parent->tcam[idx];
- spin_unlock_irq(&np->lock);
+ memset(tp, 0, sizeof(*tp));
- if (!err) {
- netif_start_queue(dev);
- if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
- netif_carrier_on(dev);
+ /* fill in the tcam key and mask */
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "flow %d for IPv6 not implemented\n\n",
+ parent->index, np->dev->name, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ case IP_USER_FLOW:
+ if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) {
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
+ class);
+ } else {
+ /* Not yet implemented */
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "usr flow for IPv6 not implemented\n\n",
+ parent->index, np->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "Unknown flow type %d\n\n",
+ parent->index, np->dev->name, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ }
- add_timer(&np->timer);
+ /* fill in the assoc data */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ tp->assoc_data = TCAM_ASSOCDATA_DISC;
+ } else {
+ if (fsp->ring_cookie >= np->num_rx_rings) {
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "Invalid RX ring %lld\n\n",
+ parent->index, np->dev->name,
+ (long long) fsp->ring_cookie);
+ ret = -EINVAL;
+ goto out;
+ }
+ tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+ (fsp->ring_cookie <<
+ TCAM_ASSOCDATA_OFFSET_SHIFT));
}
- return err;
-}
+ err = tcam_write(np, idx, tp->key, tp->key_mask);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+ err = tcam_assoc_write(np, idx, tp->assoc_data);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
-static void niu_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct niu *np = netdev_priv(dev);
- struct niu_vpd *vpd = &np->vpd;
+ /* validate the entry */
+ tp->valid = 1;
+ np->clas.tcam_valid_entries++;
+out:
+ niu_unlock_parent(np, flags);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- sprintf(info->fw_version, "%d.%d",
- vpd->fcode_major, vpd->fcode_minor);
- if (np->parent->plat_type != PLAT_TYPE_NIU)
- strcpy(info->bus_info, pci_name(np->pdev));
+ return ret;
}
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
{
- struct niu *np = netdev_priv(dev);
- struct niu_link_config *lp;
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ u16 idx;
+ unsigned long flags;
+ u64 class;
+ int ret = 0;
- lp = &np->link_config;
+ if (loc >= tcam_get_size(np))
+ return -EINVAL;
- memset(cmd, 0, sizeof(*cmd));
- cmd->phy_address = np->phy_addr;
- cmd->supported = lp->supported;
- cmd->advertising = lp->advertising;
- cmd->autoneg = lp->autoneg;
- cmd->speed = lp->active_speed;
- cmd->duplex = lp->active_duplex;
+ niu_lock_parent(np, flags);
- return 0;
-}
+ idx = tcam_get_index(np, loc);
+ tp = &parent->tcam[idx];
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- return -EINVAL;
-}
+ /* if the entry is of a user defined class, then update*/
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
-static u32 niu_get_msglevel(struct net_device *dev)
-{
- struct niu *np = netdev_priv(dev);
- return np->msg_enable;
-}
+ if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
+ int i;
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i] == class) {
+ parent->l3_cls_refcnt[i]--;
+ if (!parent->l3_cls_refcnt[i]) {
+ /* disable class */
+ ret = tcam_user_ip_class_enable(np,
+ class,
+ 0);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = 0;
+ parent->l3_cls_pid[i] = 0;
+ }
+ break;
+ }
+ }
+ if (i == NIU_L3_PROG_CLS) {
+ pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
+ "Usr class 0x%llx not found \n",
+ parent->index, np->dev->name,
+ (unsigned long long) class);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
-static void niu_set_msglevel(struct net_device *dev, u32 value)
-{
- struct niu *np = netdev_priv(dev);
- np->msg_enable = value;
-}
+ ret = tcam_flush(np, idx);
+ if (ret)
+ goto out;
-static int niu_get_eeprom_len(struct net_device *dev)
-{
- struct niu *np = netdev_priv(dev);
+ /* invalidate the entry */
+ tp->valid = 0;
+ np->clas.tcam_valid_entries--;
+out:
+ niu_unlock_parent(np, flags);
- return np->eeprom_len;
+ return ret;
}
-static int niu_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct niu *np = netdev_priv(dev);
- u32 offset, len, val;
-
- offset = eeprom->offset;
- len = eeprom->len;
-
- if (offset + len < offset)
- return -EINVAL;
- if (offset >= np->eeprom_len)
- return -EINVAL;
- if (offset + len > np->eeprom_len)
- len = eeprom->len = np->eeprom_len - offset;
-
- if (offset & 3) {
- u32 b_offset, b_count;
-
- b_offset = offset & 3;
- b_count = 4 - b_offset;
- if (b_count > len)
- b_count = len;
+ int ret = 0;
- val = nr64(ESPC_NCR((offset - b_offset) / 4));
- memcpy(data, ((char *)&val) + b_offset, b_count);
- data += b_count;
- len -= b_count;
- offset += b_count;
- }
- while (len >= 4) {
- val = nr64(ESPC_NCR(offset / 4));
- memcpy(data, &val, 4);
- data += 4;
- len -= 4;
- offset += 4;
- }
- if (len) {
- val = nr64(ESPC_NCR(offset / 4));
- memcpy(data, &val, len);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = niu_set_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = niu_add_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return 0;
+
+ return ret;
}
static const struct {
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_sync_rx_discard_stats(np, rp, 0);
+
data[0] = rp->rx_channel;
data[1] = rp->rx_packets;
data[2] = rp->rx_bytes;
.get_link = ethtool_op_get_link,
.get_msglevel = niu_get_msglevel,
.set_msglevel = niu_set_msglevel,
+ .nway_reset = niu_nway_reset,
.get_eeprom_len = niu_get_eeprom_len,
.get_eeprom = niu_get_eeprom,
.get_settings = niu_get_settings,
.get_stats_count = niu_get_stats_count,
.get_ethtool_stats = niu_get_ethtool_stats,
.phys_id = niu_phys_id,
+ .get_rxnfc = niu_get_nfc,
+ .set_rxnfc = niu_set_nfc,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
np->flags |= NIU_FLAGS_10G;
np->flags &= ~NIU_FLAGS_FIBER;
np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
+ /* 10G Serdes or 1G Serdes, default to 10G */
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->flags |= NIU_FLAGS_XCVR_SERDES;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else {
return -EINVAL;
}
return 0;
}
+static int niu_pci_vpd_get_nports(struct niu *np)
+{
+ int ports = 0;
+
+ if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
+ ports = 4;
+ } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
+ ports = 2;
+ }
+
+ return ports;
+}
+
static void __devinit niu_pci_vpd_validate(struct niu *np)
{
struct net_device *dev = np->dev;
return;
}
- if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->flags |= NIU_FLAGS_XCVR_SERDES;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ if (np->port > 1) {
+ np->flags |= NIU_FLAGS_FIBER;
+ np->flags &= ~NIU_FLAGS_10G;
+ }
+ if (np->flags & NIU_FLAGS_10G)
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+ NIU_FLAGS_HOTPLUG_PHY);
+ } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
dev_err(np->device, PFX "Illegal phy string [%s].\n",
np->vpd.phy_type);
dev_err(np->device, PFX "Falling back to SPROM.\n");
if (parent->plat_type == PLAT_TYPE_NIU) {
parent->num_ports = 2;
} else {
- parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
- ESPC_NUM_PORTS_MACS_VAL;
-
- if (!parent->num_ports)
- parent->num_ports = 4;
+ parent->num_ports = niu_pci_vpd_get_nports(np);
+ if (!parent->num_ports) {
+ /* Fall back to SPROM as last resort.
+ * This will fail on most cards.
+ */
+ parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
+ ESPC_NUM_PORTS_MACS_VAL;
+
+ /* All of the current probing methods fail on
+ * Maramba on-board parts.
+ */
+ if (!parent->num_ports)
+ parent->num_ports = 4;
+ }
}
}
return 0;
if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
return 0;
} else {
if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
u32 val;
int err;
- err = fill_phy_probe_info(np, parent, info);
- if (err)
- return err;
-
- num_10g = count_10g_ports(info, &lowest_10g);
- num_1g = count_1g_ports(info, &lowest_1g);
-
- switch ((num_10g << 4) | num_1g) {
- case 0x24:
- if (lowest_1g == 10)
- parent->plat_type = PLAT_TYPE_VF_P0;
- else if (lowest_1g == 26)
- parent->plat_type = PLAT_TYPE_VF_P1;
- else
- goto unknown_vg_1g_port;
+ num_10g = num_1g = 0;
- /* fallthru */
- case 0x22:
- val = (phy_encode(PORT_TYPE_10G, 0) |
- phy_encode(PORT_TYPE_10G, 1) |
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+ num_10g = 0;
+ num_1g = 2;
+ parent->plat_type = PLAT_TYPE_ATCA_CP3220;
+ parent->num_ports = 4;
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
phy_encode(PORT_TYPE_1G, 2) |
phy_encode(PORT_TYPE_1G, 3));
- break;
-
- case 0x20:
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+ num_10g = 2;
+ num_1g = 0;
+ parent->num_ports = 2;
val = (phy_encode(PORT_TYPE_10G, 0) |
phy_encode(PORT_TYPE_10G, 1));
- break;
+ } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
+ (parent->plat_type == PLAT_TYPE_NIU)) {
+ /* this is the Monza case */
+ if (np->flags & NIU_FLAGS_10G) {
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ } else {
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1));
+ }
+ } else {
+ err = fill_phy_probe_info(np, parent, info);
+ if (err)
+ return err;
- case 0x10:
- val = phy_encode(PORT_TYPE_10G, np->port);
- break;
+ num_10g = count_10g_ports(info, &lowest_10g);
+ num_1g = count_1g_ports(info, &lowest_1g);
- case 0x14:
- if (lowest_1g == 10)
- parent->plat_type = PLAT_TYPE_VF_P0;
- else if (lowest_1g == 26)
- parent->plat_type = PLAT_TYPE_VF_P1;
- else
- goto unknown_vg_1g_port;
+ switch ((num_10g << 4) | num_1g) {
+ case 0x24:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
- /* fallthru */
- case 0x13:
- if ((lowest_10g & 0x7) == 0)
+ /* fallthru */
+ case 0x22:
val = (phy_encode(PORT_TYPE_10G, 0) |
- phy_encode(PORT_TYPE_1G, 1) |
- phy_encode(PORT_TYPE_1G, 2) |
- phy_encode(PORT_TYPE_1G, 3));
- else
- val = (phy_encode(PORT_TYPE_1G, 0) |
phy_encode(PORT_TYPE_10G, 1) |
phy_encode(PORT_TYPE_1G, 2) |
phy_encode(PORT_TYPE_1G, 3));
- break;
+ break;
- case 0x04:
- if (lowest_1g == 10)
- parent->plat_type = PLAT_TYPE_VF_P0;
- else if (lowest_1g == 26)
- parent->plat_type = PLAT_TYPE_VF_P1;
- else
- goto unknown_vg_1g_port;
+ case 0x20:
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ break;
- val = (phy_encode(PORT_TYPE_1G, 0) |
- phy_encode(PORT_TYPE_1G, 1) |
- phy_encode(PORT_TYPE_1G, 2) |
- phy_encode(PORT_TYPE_1G, 3));
- break;
+ case 0x10:
+ val = phy_encode(PORT_TYPE_10G, np->port);
+ break;
- default:
- printk(KERN_ERR PFX "Unsupported port config "
- "10G[%d] 1G[%d]\n",
- num_10g, num_1g);
- return -EINVAL;
+ case 0x14:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
+
+ /* fallthru */
+ case 0x13:
+ if ((lowest_10g & 0x7) == 0)
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ else
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_10G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ break;
+
+ case 0x04:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
+
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsupported port config "
+ "10G[%d] 1G[%d]\n",
+ num_10g, num_1g);
+ return -EINVAL;
+ }
}
parent->port_phy = val;
niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
np->parent->tcam_num_entries);
- cp->tcam_index = (u16) np->port;
+ cp->tcam_top = (u16) np->port;
+ cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
cp->h2_init = 0xffff;
ADVERTISED_10000baseT_Full |
ADVERTISED_Autoneg);
lp->speed = lp->active_speed = SPEED_INVALID;
- lp->duplex = lp->active_duplex = DUPLEX_INVALID;
+ lp->duplex = DUPLEX_FULL;
+ lp->active_duplex = DUPLEX_INVALID;
+ lp->autoneg = 1;
#if 0
lp->loopback_mode = LOOPBACK_MAC;
lp->active_speed = SPEED_10000;
struct device_node *dp;
const char *phy_type;
const u8 *mac_addr;
+ const char *model;
int prop_len;
if (np->parent->plat_type == PLAT_TYPE_NIU)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ model = of_get_property(dp, "model", &prop_len);
+
+ if (model)
+ strcpy(np->vpd.model, model);
+
+ if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
+ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+ NIU_FLAGS_HOTPLUG_PHY);
+ }
+
return 0;
#else
return -EINVAL;
have_props = !err;
- err = niu_get_and_validate_port(np);
- if (err)
- return err;
-
err = niu_init_mac_ipp_pcs_base(np);
if (err)
return err;
- if (!have_props) {
+ if (have_props) {
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
+
+ } else {
if (np->parent->plat_type == PLAT_TYPE_NIU)
return -EINVAL;
niu_pci_vpd_fetch(np, offset);
nw64(ESPC_PIO_EN, 0);
- if (np->flags & NIU_FLAGS_VPD_VALID)
+ if (np->flags & NIU_FLAGS_VPD_VALID) {
niu_pci_vpd_validate(np);
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
+ }
if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
err = niu_pci_probe_sprom(np);
if (err)
return err;
plat_dev = platform_device_register_simple("niu", niu_parent_index,
NULL, 0);
- if (!plat_dev)
+ if (IS_ERR(plat_dev))
return NULL;
for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction)
{
- return dma_unmap_page(dev, dma_address, size, direction);
+ dma_unmap_page(dev, dma_address, size, direction);
}
static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
struct of_device *op, const struct niu_ops *ops,
u8 port)
{
- struct net_device *dev = alloc_etherdev(sizeof(struct niu));
+ struct net_device *dev;
struct niu *np;
+ dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
if (!dev) {
dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
return NULL;
return dev;
}
+static const struct net_device_ops niu_netdev_ops = {
+ .ndo_open = niu_open,
+ .ndo_stop = niu_close,
+ .ndo_start_xmit = niu_start_xmit,
+ .ndo_get_stats = niu_get_stats,
+ .ndo_set_multicast_list = niu_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = niu_set_mac_addr,
+ .ndo_do_ioctl = niu_ioctl,
+ .ndo_tx_timeout = niu_tx_timeout,
+ .ndo_change_mtu = niu_change_mtu,
+};
+
static void __devinit niu_assign_netdev_ops(struct net_device *dev)
{
- dev->open = niu_open;
- dev->stop = niu_close;
- dev->get_stats = niu_get_stats;
- dev->set_multicast_list = niu_set_rx_mode;
- dev->set_mac_address = niu_set_mac_addr;
- dev->do_ioctl = niu_ioctl;
- dev->tx_timeout = niu_tx_timeout;
- dev->hard_start_xmit = niu_start_xmit;
+ dev->netdev_ops = &niu_netdev_ops;
dev->ethtool_ops = &niu_ethtool_ops;
dev->watchdog_timeo = NIU_TX_TIMEOUT;
- dev->change_mtu = niu_change_mtu;
}
static void __devinit niu_device_announce(struct niu *np)
{
struct net_device *dev = np->dev;
- int i;
-
- pr_info("%s: NIU Ethernet ", dev->name);
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- i == 5 ? '\n' : ':');
- pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
- dev->name,
- (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
- (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
- (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"),
- (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
- (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
- np->vpd.phy_type);
+ pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
+
+ if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
+ pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+ dev->name,
+ (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+ (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+ (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
+ (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+ (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+ np->vpd.phy_type);
+ } else {
+ pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+ dev->name,
+ (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+ (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+ (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
+ (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
+ "COPPER")),
+ (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+ (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+ np->vpd.phy_type);
+ }
}
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned long niureg_base, niureg_len;
union niu_parent_id parent_id;
struct net_device *dev;
struct niu *np;
goto err_out_release_parent;
}
}
- if (err || dma_mask == DMA_32BIT_MASK) {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err || dma_mask == DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, PFX "No usable DMA configuration, "
"aborting.\n");
dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
- niureg_base = pci_resource_start(pdev, 0);
- niureg_len = pci_resource_len(pdev, 0);
-
- np->regs = ioremap_nocache(niureg_base, niureg_len);
+ np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
dev_err(&pdev->dev, PFX "Cannot map device registers, "
"aborting.\n");
.unmap_single = niu_phys_unmap_single,
};
-static unsigned long res_size(struct resource *r)
-{
- return r->end - r->start + 1UL;
-}
-
static int __devinit niu_of_probe(struct of_device *op,
const struct of_device_id *match)
{
dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
np->regs = of_ioremap(&op->resource[1], 0,
- res_size(&op->resource[1]),
+ resource_size(&op->resource[1]),
"niu regs");
if (!np->regs) {
dev_err(&op->dev, PFX "Cannot map device registers, "
}
np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
- res_size(&op->resource[2]),
+ resource_size(&op->resource[2]),
"niu vregs-1");
if (!np->vir_regs_1) {
dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
}
np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
- res_size(&op->resource[3]),
+ resource_size(&op->resource[3]),
"niu vregs-2");
if (!np->vir_regs_2) {
dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
err_out_iounmap:
if (np->vir_regs_1) {
of_iounmap(&op->resource[2], np->vir_regs_1,
- res_size(&op->resource[2]));
+ resource_size(&op->resource[2]));
np->vir_regs_1 = NULL;
}
if (np->vir_regs_2) {
of_iounmap(&op->resource[3], np->vir_regs_2,
- res_size(&op->resource[3]));
+ resource_size(&op->resource[3]));
np->vir_regs_2 = NULL;
}
if (np->regs) {
of_iounmap(&op->resource[1], np->regs,
- res_size(&op->resource[1]));
+ resource_size(&op->resource[1]));
np->regs = NULL;
}
if (np->vir_regs_1) {
of_iounmap(&op->resource[2], np->vir_regs_1,
- res_size(&op->resource[2]));
+ resource_size(&op->resource[2]));
np->vir_regs_1 = NULL;
}
if (np->vir_regs_2) {
of_iounmap(&op->resource[3], np->vir_regs_2,
- res_size(&op->resource[3]));
+ resource_size(&op->resource[3]));
np->vir_regs_2 = NULL;
}
if (np->regs) {
of_iounmap(&op->resource[1], np->regs,
- res_size(&op->resource[1]));
+ resource_size(&op->resource[1]));
np->regs = NULL;
}
return 0;
}
-static struct of_device_id niu_match[] = {
+static const struct of_device_id niu_match[] = {
{
.name = "network",
.compatible = "SUNW,niusl",