X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fniu.c;h=0678f3106cbcca5981cd1aa50df08919e04968e2;hb=c2b7aefdc9b10c64a4e4be2303131006f9dc7013;hp=918f802fe089d060247bb21e72b8a651fa0ef333;hpb=f9af857489cc19ee3acd0d5248dca7d243e353a5;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 918f802..0678f31 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3,6 +3,8 @@ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -22,6 +24,7 @@ #include #include #include +#include #include @@ -32,9 +35,8 @@ #include "niu.h" #define DRV_MODULE_NAME "niu" -#define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.9" -#define DRV_MODULE_RELDATE "May 4, 2008" +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "Nov 14, 2008" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -44,15 +46,10 @@ MODULE_DESCRIPTION("NIU ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -#ifndef DMA_44BIT_MASK -#define DMA_44BIT_MASK 0x00000fffffffffffULL -#endif - #ifndef readq static u64 readq(void __iomem *reg) { - return (((u64)readl(reg + 0x4UL) << 32) | - (u64)readl(reg)); + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); } static void writeq(u64 val, void __iomem *reg) @@ -62,7 +59,7 @@ static void writeq(u64 val, void __iomem *reg) } #endif -static struct pci_device_id niu_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, {} }; @@ -93,21 +90,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "NIU debug level"); -#define niudbg(TYPE, f, a...) \ -do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_DEBUG PFX f, ## a); \ -} while (0) - -#define niuinfo(TYPE, f, a...) \ -do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_INFO PFX f, ## a); \ -} while (0) - -#define niuwarn(TYPE, f, a...) \ -do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_WARNING PFX f, ## a); \ -} while (0) - #define niu_lock_parent(np, flags) \ spin_lock_irqsave(&np->parent->lock, flags) #define niu_unlock_parent(np, flags) \ @@ -139,10 +121,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, nw64_mac(reg, bits); err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); if (err) - dev_err(np->device, PFX "%s: bits (%llx) of register %s " - "would not clear, val[%llx]\n", - np->dev->name, (unsigned long long) bits, reg_name, - (unsigned long long) nr64_mac(reg)); + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64_mac(reg)); return err; } @@ -179,10 +160,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); if (err) - dev_err(np->device, PFX "%s: bits (%llx) of register %s " - "would not clear, val[%llx]\n", - np->dev->name, (unsigned long long) bits, reg_name, - (unsigned long long) nr64_ipp(reg)); + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64_ipp(reg)); return err; } @@ -220,10 +200,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, nw64(reg, bits); err = __niu_wait_bits_clear(np, reg, bits, limit, delay); if (err) - dev_err(np->device, PFX "%s: bits (%llx) of register %s " - "would not clear, val[%llx]\n", - np->dev->name, (unsigned long long) bits, reg_name, - (unsigned long long) nr64(reg)); + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64(reg)); return err; } @@ -407,7 +386,7 @@ static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) } /* Mode is always 10G fiber. */ -static int serdes_init_niu(struct niu *np) +static int serdes_init_niu_10g_fiber(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg; @@ -444,6 +423,218 @@ static int serdes_init_niu(struct niu *np) return 0; } +static int serdes_init_niu_1g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 pll_cfg, pll_sts; + int max_retry = 100; + u64 uninitialized_var(sig), mask, val; + u32 tx_cfg, rx_cfg; + unsigned long i; + int err; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | + PLL_TX_CFG_RATE_HALF); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_RATE_HALF); + + if (np->port == 0) + rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize PLL for 1G */ + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_CFG_L, pll_cfg); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", + np->port, __func__); + return err; + } + + pll_sts = PLL_CFG_ENPLL; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_STS_L, pll_sts); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", + np->port, __func__); + return err; + } + + udelay(200); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + switch (np->port) { + case 0: + val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); + mask = val; + break; + + case 1: + val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); + mask = val; + break; + + default: + return -EINVAL; + } + + while (max_retry--) { + sig = nr64(ESR_INT_SIGNALS); + if ((sig & mask) == val) + break; + + mdelay(500); + } + + if ((sig & mask) != val) { + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_niu_10g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; + int max_retry = 100; + u64 uninitialized_var(sig), mask, val; + unsigned long i; + int err; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_EQ_LP_ADAPTIVE); + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize PLL for 10G */ + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", + np->port, __func__); + return err; + } + + pll_sts = PLL_CFG_ENPLL; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_STS_L, pll_sts & 0xffff); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", + np->port, __func__); + return err; + } + + udelay(200); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + /* check if serdes is ready */ + + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + while (max_retry--) { + sig = nr64(ESR_INT_SIGNALS); + if ((sig & mask) == val) + break; + + mdelay(500); + } + + if ((sig & mask) != val) { + pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", + np->port, (int)(sig & mask), (int)val); + + /* 10G failed, try initializing at 1G */ + err = serdes_init_niu_1g_serdes(np); + if (!err) { + np->flags &= ~NIU_FLAGS_10G; + np->mac_xcvr = MAC_XCVR_PCS; + } else { + netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", + np->port); + return -ENODEV; + } + } + return 0; +} + static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) { int err; @@ -522,7 +713,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) static int esr_reset(struct niu *np) { - u32 reset; + u32 uninitialized_var(reset); int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, @@ -551,9 +742,8 @@ static int esr_reset(struct niu *np) if (err) return err; if (reset != 0) { - dev_err(np->device, PFX "Port %u ESR_RESET " - "did not clear [%08x]\n", - np->port, reset); + netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", + np->port, reset); return -ENODEV; } @@ -677,8 +867,8 @@ static int serdes_init_10g(struct niu *np) np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } - dev_err(np->device, PFX "Port %u signal bits [%08x] are not " - "[%08x]\n", np->port, (int) (sig & mask), (int) val); + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); return -ENODEV; } if (np->flags & NIU_FLAGS_HOTPLUG_PHY) @@ -826,8 +1016,8 @@ static int serdes_init_1g_serdes(struct niu *np) } if ((sig & mask) != val) { - dev_err(np->device, PFX "Port %u signal bits [%08x] are not " - "[%08x]\n", np->port, (int) (sig & mask), (int) val); + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); return -ENODEV; } @@ -899,6 +1089,130 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p) return 0; } +static int link_status_mii(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + int err; + int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; + int supported, advertising, active_speed, active_duplex; + + err = mii_read(np, np->phy_addr, MII_BMCR); + if (unlikely(err < 0)) + return err; + bmcr = err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (unlikely(err < 0)) + return err; + bmsr = err; + + err = mii_read(np, np->phy_addr, MII_ADVERTISE); + if (unlikely(err < 0)) + return err; + advert = err; + + err = mii_read(np, np->phy_addr, MII_LPA); + if (unlikely(err < 0)) + return err; + lpa = err; + + if (likely(bmsr & BMSR_ESTATEN)) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (unlikely(err < 0)) + return err; + estatus = err; + + err = mii_read(np, np->phy_addr, MII_CTRL1000); + if (unlikely(err < 0)) + return err; + ctrl1000 = err; + + err = mii_read(np, np->phy_addr, MII_STAT1000); + if (unlikely(err < 0)) + return err; + stat1000 = err; + } else + estatus = ctrl1000 = stat1000 = 0; + + supported = 0; + if (bmsr & BMSR_ANEGCAPABLE) + supported |= SUPPORTED_Autoneg; + if (bmsr & BMSR_10HALF) + supported |= SUPPORTED_10baseT_Half; + if (bmsr & BMSR_10FULL) + supported |= SUPPORTED_10baseT_Full; + if (bmsr & BMSR_100HALF) + supported |= SUPPORTED_100baseT_Half; + if (bmsr & BMSR_100FULL) + supported |= SUPPORTED_100baseT_Full; + if (estatus & ESTATUS_1000_THALF) + supported |= SUPPORTED_1000baseT_Half; + if (estatus & ESTATUS_1000_TFULL) + supported |= SUPPORTED_1000baseT_Full; + lp->supported = supported; + + advertising = 0; + if (advert & ADVERTISE_10HALF) + advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + advertising |= ADVERTISED_100baseT_Full; + if (ctrl1000 & ADVERTISE_1000HALF) + advertising |= ADVERTISED_1000baseT_Half; + if (ctrl1000 & ADVERTISE_1000FULL) + advertising |= ADVERTISED_1000baseT_Full; + + if (bmcr & BMCR_ANENABLE) { + int neg, neg1000; + + lp->active_autoneg = 1; + advertising |= ADVERTISED_Autoneg; + + neg = advert & lpa; + neg1000 = (ctrl1000 << 2) & stat1000; + + if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) + active_speed = SPEED_1000; + else if (neg & LPA_100) + active_speed = SPEED_100; + else if (neg & (LPA_10HALF | LPA_10FULL)) + active_speed = SPEED_10; + else + active_speed = SPEED_INVALID; + + if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) + active_duplex = DUPLEX_FULL; + else if (active_speed != SPEED_INVALID) + active_duplex = DUPLEX_HALF; + else + active_duplex = DUPLEX_INVALID; + } else { + lp->active_autoneg = 0; + + if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) + active_speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + active_speed = SPEED_100; + else + active_speed = SPEED_10; + + if (bmcr & BMCR_FULLDPLX) + active_duplex = DUPLEX_FULL; + else + active_duplex = DUPLEX_HALF; + } + + lp->active_advertising = advertising; + lp->active_speed = active_speed; + lp->active_duplex = active_duplex; + *link_up_p = !!(bmsr & BMSR_LSTATUS); + + return 0; +} + static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; @@ -955,13 +1269,29 @@ out: return err; } +static int link_status_1g(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long flags; + int err; + + spin_lock_irqsave(&np->lock, flags); + + err = link_status_mii(np, link_up_p); + lp->supported |= SUPPORTED_TP; + lp->active_advertising |= ADVERTISED_TP; + + spin_unlock_irqrestore(&np->lock, flags); + return err; +} + static int bcm8704_reset(struct niu *np) { int err, limit; err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); - if (err < 0) + if (err < 0 || err == 0xffff) return err; err |= BMCR_RESET; err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, @@ -979,8 +1309,8 @@ static int bcm8704_reset(struct niu *np) break; } if (limit < 0) { - dev_err(np->device, PFX "Port %u PHY will not reset " - "(bmcr=%04x)\n", np->port, (err & 0xffff)); + netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", + np->port, (err & 0xffff)); return -ENODEV; } return 0; @@ -1162,21 +1492,18 @@ static int xcvr_diag_bcm870x(struct niu *np) MII_STAT1000); if (err < 0) return err; - pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", - np->port, err); + pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); if (err < 0) return err; - pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", - np->port, err); + pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_NWAYTEST); if (err < 0) return err; - pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", - np->port, err); + pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); #endif /* XXX dig this out it might not be so useful XXX */ @@ -1202,11 +1529,11 @@ static int xcvr_diag_bcm870x(struct niu *np) if (analog_stat0 != 0x03fc) { if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { - pr_info(PFX "Port %u cable not connected " - "or bad cable.\n", np->port); + pr_info("Port %u cable not connected or bad cable\n", + np->port); } else if (analog_stat0 == 0x639c) { - pr_info(PFX "Port %u optical module is bad " - "or missing.\n", np->port); + pr_info("Port %u optical module is bad or missing\n", + np->port); } } @@ -1346,8 +1673,8 @@ static int mii_reset(struct niu *np) break; } if (limit < 0) { - dev_err(np->device, PFX "Port %u MII would not reset, " - "bmcr[%04x]\n", np->port, err); + netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", + np->port, err); return -ENODEV; } @@ -1460,40 +1787,89 @@ static int mii_init_common(struct niu *np) return err; } - /* XXX configurable XXX */ - /* XXX for now don't advertise half-duplex or asym pause... XXX */ - adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; - if (bmsr & BMSR_10FULL) - adv |= ADVERTISE_10FULL; - if (bmsr & BMSR_100FULL) - adv |= ADVERTISE_100FULL; - err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); - if (err) - return err; - - if (bmsr & BMSR_ESTATEN) { - u16 ctrl1000 = 0; - - if (estat & ESTATUS_1000_TFULL) - ctrl1000 |= ADVERTISE_1000FULL; - err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); + if (lp->autoneg) { + u16 ctrl1000; + + adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; + if ((bmsr & BMSR_10HALF) && + (lp->advertising & ADVERTISED_10baseT_Half)) + adv |= ADVERTISE_10HALF; + if ((bmsr & BMSR_10FULL) && + (lp->advertising & ADVERTISED_10baseT_Full)) + adv |= ADVERTISE_10FULL; + if ((bmsr & BMSR_100HALF) && + (lp->advertising & ADVERTISED_100baseT_Half)) + adv |= ADVERTISE_100HALF; + if ((bmsr & BMSR_100FULL) && + (lp->advertising & ADVERTISED_100baseT_Full)) + adv |= ADVERTISE_100FULL; + err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); if (err) return err; + + if (likely(bmsr & BMSR_ESTATEN)) { + ctrl1000 = 0; + if ((estat & ESTATUS_1000_THALF) && + (lp->advertising & ADVERTISED_1000baseT_Half)) + ctrl1000 |= ADVERTISE_1000HALF; + if ((estat & ESTATUS_1000_TFULL) && + (lp->advertising & ADVERTISED_1000baseT_Full)) + ctrl1000 |= ADVERTISE_1000FULL; + err = mii_write(np, np->phy_addr, + MII_CTRL1000, ctrl1000); + if (err) + return err; + } + + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + } else { + /* !lp->autoneg */ + int fulldpx; + + if (lp->duplex == DUPLEX_FULL) { + bmcr |= BMCR_FULLDPLX; + fulldpx = 1; + } else if (lp->duplex == DUPLEX_HALF) + fulldpx = 0; + else + return -EINVAL; + + if (lp->speed == SPEED_1000) { + /* if X-full requested while not supported, or + X-half requested while not supported... */ + if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || + (!fulldpx && !(estat & ESTATUS_1000_THALF))) + return -EINVAL; + bmcr |= BMCR_SPEED1000; + } else if (lp->speed == SPEED_100) { + if ((fulldpx && !(bmsr & BMSR_100FULL)) || + (!fulldpx && !(bmsr & BMSR_100HALF))) + return -EINVAL; + bmcr |= BMCR_SPEED100; + } else if (lp->speed == SPEED_10) { + if ((fulldpx && !(bmsr & BMSR_10FULL)) || + (!fulldpx && !(bmsr & BMSR_10HALF))) + return -EINVAL; + } else + return -EINVAL; } - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; +#if 0 err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; + bmcr = err; + err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; -#if 0 - pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", + bmsr = err; + + pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", np->port, bmcr, bmsr); #endif @@ -1546,16 +1922,12 @@ static int niu_link_status_common(struct niu *np, int link_up) unsigned long flags; if (!netif_carrier_ok(dev) && link_up) { - niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", - dev->name, - (lp->active_speed == SPEED_10000 ? - "10Gb/sec" : - (lp->active_speed == SPEED_1000 ? - "1Gb/sec" : - (lp->active_speed == SPEED_100 ? - "100Mbit/sec" : "10Mbit/sec"))), - (lp->active_duplex == DUPLEX_FULL ? - "full" : "half")); + netif_info(np, link, dev, "Link is up at %s, %s duplex\n", + lp->active_speed == SPEED_10000 ? "10Gb/sec" : + lp->active_speed == SPEED_1000 ? "1Gb/sec" : + lp->active_speed == SPEED_100 ? "100Mbit/sec" : + "10Mbit/sec", + lp->active_duplex == DUPLEX_FULL ? "full" : "half"); spin_lock_irqsave(&np->lock, flags); niu_init_xif(np); @@ -1564,7 +1936,7 @@ static int niu_link_status_common(struct niu *np, int link_up) netif_carrier_on(dev); } else if (netif_carrier_ok(dev) && !link_up) { - niuwarn(LINK, "%s: Link is down\n", dev->name); + netif_warn(np, link, dev, "Link is down\n"); spin_lock_irqsave(&np->lock, flags); niu_handle_led(np, 0); spin_unlock_irqrestore(&np->lock, flags); @@ -1637,7 +2009,7 @@ static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); - if (err < 0) + if (err < 0 || err == 0xffff) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; @@ -1678,8 +2050,6 @@ static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) out: *link_up_p = link_up; - if (np->flags & NIU_FLAGS_HOTPLUG_PHY) - err = 0; return err; } @@ -1815,108 +2185,42 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p) if (phy_present != phy_present_prev) { /* state change */ if (phy_present) { + /* A NEM was just plugged in */ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; if (np->phy_ops->xcvr_init) err = np->phy_ops->xcvr_init(np); if (err) { + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err == 0xffff) { + /* No mdio, back-to-back XAUI */ + goto out; + } /* debounce */ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; } } else { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; *link_up_p = 0; - niuwarn(LINK, "%s: Hotplug PHY Removed\n", - np->dev->name); + netif_warn(np, link, np->dev, + "Hotplug PHY Removed\n"); } } - if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) +out: + if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { err = link_status_10g_bcm8706(np, link_up_p); - } - - spin_unlock_irqrestore(&np->lock, flags); - - return err; -} - -static int link_status_1g(struct niu *np, int *link_up_p) -{ - struct niu_link_config *lp = &np->link_config; - u16 current_speed, bmsr; - unsigned long flags; - u8 current_duplex; - int err, link_up; - - link_up = 0; - current_speed = SPEED_INVALID; - current_duplex = DUPLEX_INVALID; - - spin_lock_irqsave(&np->lock, flags); - - err = -EINVAL; - if (np->link_config.loopback_mode != LOOPBACK_DISABLED) - goto out; - - err = mii_read(np, np->phy_addr, MII_BMSR); - if (err < 0) - goto out; - - bmsr = err; - if (bmsr & BMSR_LSTATUS) { - u16 adv, lpa, common, estat; - - err = mii_read(np, np->phy_addr, MII_ADVERTISE); - if (err < 0) - goto out; - adv = err; - - err = mii_read(np, np->phy_addr, MII_LPA); - if (err < 0) - goto out; - lpa = err; - - common = adv & lpa; - - err = mii_read(np, np->phy_addr, MII_ESTATUS); - if (err < 0) - goto out; - estat = err; - - link_up = 1; - if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) { - current_speed = SPEED_1000; - if (estat & ESTATUS_1000_TFULL) - current_duplex = DUPLEX_FULL; - else - current_duplex = DUPLEX_HALF; - } else { - if (common & ADVERTISE_100BASE4) { - current_speed = SPEED_100; - current_duplex = DUPLEX_HALF; - } else if (common & ADVERTISE_100FULL) { - current_speed = SPEED_100; - current_duplex = DUPLEX_FULL; - } else if (common & ADVERTISE_100HALF) { - current_speed = SPEED_100; - current_duplex = DUPLEX_HALF; - } else if (common & ADVERTISE_10FULL) { - current_speed = SPEED_10; - current_duplex = DUPLEX_FULL; - } else if (common & ADVERTISE_10HALF) { - current_speed = SPEED_10; - current_duplex = DUPLEX_HALF; - } else - link_up = 0; + if (err == 0xffff) { + /* No mdio, back-to-back XAUI: it is C10NEM */ + *link_up_p = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + } } } - lp->active_speed = current_speed; - lp->active_duplex = current_duplex; - err = 0; -out: spin_unlock_irqrestore(&np->lock, flags); - *link_up_p = link_up; - return err; + return 0; } static int niu_link_status(struct niu *np, int *link_up_p) @@ -1955,13 +2259,23 @@ static const struct niu_phy_ops phy_ops_10g_serdes = { .link_status = link_status_10g_serdes, }; +static const struct niu_phy_ops phy_ops_10g_serdes_niu = { + .serdes_init = serdes_init_niu_10g_serdes, + .link_status = link_status_10g_serdes, +}; + +static const struct niu_phy_ops phy_ops_1g_serdes_niu = { + .serdes_init = serdes_init_niu_1g_serdes, + .link_status = link_status_1g_serdes, +}; + static const struct niu_phy_ops phy_ops_1g_rgmii = { .xcvr_init = xcvr_init_1g_rgmii, .link_status = link_status_1g_rgmii, }; static const struct niu_phy_ops phy_ops_10g_fiber_niu = { - .serdes_init = serdes_init_niu, + .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; @@ -1978,6 +2292,12 @@ static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { .link_status = link_status_10g_hotplug, }; +static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { + .serdes_init = serdes_init_niu_10g_fiber, + .xcvr_init = xcvr_init_10g_bcm8706, + .link_status = link_status_10g_hotplug, +}; + static const struct niu_phy_ops phy_ops_10g_copper = { .serdes_init = serdes_init_10g, .link_status = link_status_10g, /* XXX */ @@ -1999,11 +2319,21 @@ struct niu_phy_template { u32 phy_addr_base; }; -static const struct niu_phy_template phy_template_niu = { +static const struct niu_phy_template phy_template_niu_10g_fiber = { .ops = &phy_ops_10g_fiber_niu, .phy_addr_base = 16, }; +static const struct niu_phy_template phy_template_niu_10g_serdes = { + .ops = &phy_ops_10g_serdes_niu, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_niu_1g_serdes = { + .ops = &phy_ops_1g_serdes_niu, + .phy_addr_base = 0, +}; + static const struct niu_phy_template phy_template_10g_fiber = { .ops = &phy_ops_10g_fiber, .phy_addr_base = 8, @@ -2014,6 +2344,11 @@ static const struct niu_phy_template phy_template_10g_fiber_hotplug = { .phy_addr_base = 8, }; +static const struct niu_phy_template phy_template_niu_10g_hotplug = { + .ops = &phy_ops_niu_10g_hotplug, + .phy_addr_base = 8, +}; + static const struct niu_phy_template phy_template_10g_copper = { .ops = &phy_ops_10g_copper, .phy_addr_base = 10, @@ -2048,7 +2383,6 @@ static int serdes_init_10g_serdes(struct niu *np) struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; - int err; u64 reset_val; switch (np->port) { @@ -2101,6 +2435,7 @@ static int serdes_init_10g_serdes(struct niu *np) /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; + int err; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) @@ -2166,8 +2501,8 @@ static int serdes_init_10g_serdes(struct niu *np) np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { - dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n", - np->port); + netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", + np->port); return -ENODEV; } } @@ -2183,8 +2518,33 @@ static int niu_determine_phy_disposition(struct niu *np) u32 phy_addr_off = 0; if (plat_type == PLAT_TYPE_NIU) { - tp = &phy_template_niu; - phy_addr_off += np->port; + switch (np->flags & + (NIU_FLAGS_10G | + NIU_FLAGS_FIBER | + NIU_FLAGS_XCVR_SERDES)) { + case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: + /* 10G Serdes */ + tp = &phy_template_niu_10g_serdes; + break; + case NIU_FLAGS_XCVR_SERDES: + /* 1G Serdes */ + tp = &phy_template_niu_1g_serdes; + break; + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + /* 10G Fiber */ + default: + if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { + tp = &phy_template_niu_10g_hotplug; + if (np->port == 0) + phy_addr_off = 8; + if (np->port == 1) + phy_addr_off = 12; + } else { + tp = &phy_template_niu_10g_fiber; + phy_addr_off += np->port; + } + break; + } } else { switch (np->flags & (NIU_FLAGS_10G | @@ -2203,7 +2563,7 @@ static int niu_determine_phy_disposition(struct niu *np) case NIU_FLAGS_10G: /* 10G copper */ - tp = &phy_template_1g_copper; + tp = &phy_template_10g_copper; break; case NIU_FLAGS_FIBER: @@ -2269,11 +2629,11 @@ static int niu_init_link(struct niu *np) msleep(200); } err = niu_serdes_init(np); - if (err) + if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) return err; msleep(200); err = niu_xcvr_init(np); - if (!err) + if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) niu_link_status(np, &ignore); return 0; } @@ -2454,7 +2814,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit) break; udelay(1); } - if (limit < 0) + if (limit <= 0) return -ENODEV; return 0; @@ -2620,7 +2980,6 @@ static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, return 0; } -#if 0 static int tcam_user_ip_class_set(struct niu *np, unsigned long class, int ipv6, u64 protocol_id, u64 tos_mask, u64 tos_val) @@ -2648,7 +3007,6 @@ static int tcam_user_ip_class_set(struct niu *np, unsigned long class, return 0; } -#endif static int tcam_early_init(struct niu *np) { @@ -2846,23 +3204,22 @@ static int fflp_early_init(struct niu *np) parent = np->parent; err = 0; if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { - niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", - np->port); if (np->parent->plat_type != PLAT_TYPE_NIU) { fflp_reset(np); fflp_set_timings(np); err = fflp_disable_all_partitions(np); if (err) { - niudbg(PROBE, "fflp_disable_all_partitions " - "failed, err=%d\n", err); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "fflp_disable_all_partitions failed, err=%d\n", + err); goto out; } } err = tcam_early_init(np); if (err) { - niudbg(PROBE, "tcam_early_init failed, err=%d\n", - err); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "tcam_early_init failed, err=%d\n", err); goto out; } fflp_llcsnap_enable(np, 1); @@ -2872,22 +3229,22 @@ static int fflp_early_init(struct niu *np) err = tcam_flush_all(np); if (err) { - niudbg(PROBE, "tcam_flush_all failed, err=%d\n", - err); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "tcam_flush_all failed, err=%d\n", err); goto out; } if (np->parent->plat_type != PLAT_TYPE_NIU) { err = fflp_hash_clear(np); if (err) { - niudbg(PROBE, "fflp_hash_clear failed, " - "err=%d\n", err); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "fflp_hash_clear failed, err=%d\n", + err); goto out; } } vlan_tbl_clear(np); - niudbg(PROBE, "fflp_early_init: Success\n"); parent->flags |= PARENT_FLGS_CLS_HWINIT; } out: @@ -2915,6 +3272,27 @@ static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) return 0; } +/* Entries for the ports are interleaved in the TCAM */ +static u16 tcam_get_index(struct niu *np, u16 idx) +{ + /* One entry reserved for IP fragment rule */ + if (idx >= (np->clas.tcam_sz - 1)) + idx = 0; + return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports)); +} + +static u16 tcam_get_size(struct niu *np) +{ + /* One entry reserved for IP fragment rule */ + return np->clas.tcam_sz - 1; +} + +static u16 tcam_get_valid_entry_cnt(struct niu *np) +{ + /* One entry reserved for IP fragment rule */ + return np->clas.tcam_valid_entries - 1; +} + static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, u32 offset, u32 size) { @@ -3061,7 +3439,8 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) return num_rcr; } -static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) +static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, + struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; struct sk_buff *skb; @@ -3131,15 +3510,14 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) rp->rcr_index = index; skb_reserve(skb, NET_IP_ALIGN); - __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); + __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN)); rp->rx_packets++; rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); - netif_receive_skb(skb); - - np->dev->last_rx = jiffies; + skb_record_rx_queue(skb, rp->rx_channel); + napi_gro_receive(napi, skb); return num_rcr; } @@ -3236,10 +3614,14 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { + struct netdev_queue *txq; u16 pkt_cnt, tmp; - int cons; + int cons, index; u64 cs; + index = (rp - np->tx_rings); + txq = netdev_get_tx_queue(np->dev, index); + cs = rp->tx_cs; if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) goto out; @@ -3252,8 +3634,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) cons = rp->cons; - niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", - np->dev->name, pkt_cnt, cons); + netif_printk(np, tx_done, KERN_DEBUG, np->dev, + "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); @@ -3262,17 +3644,70 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) smp_mb(); out: - if (unlikely(netif_queue_stopped(np->dev) && + if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - netif_tx_lock(np->dev); - if (netif_queue_stopped(np->dev) && + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) - netif_wake_queue(np->dev); - netif_tx_unlock(np->dev); + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); } } -static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) +static inline void niu_sync_rx_discard_stats(struct niu *np, + struct rx_ring_info *rp, + const int limit) +{ + /* This elaborate scheme is needed for reading the RX discard + * counters, as they are only 16-bit and can overflow quickly, + * and because the overflow indication bit is not usable as + * the counter value does not wrap, but remains at max value + * 0xFFFF. + * + * In theory and in practice counters can be lost in between + * reading nr64() and clearing the counter nw64(). For this + * reason, the number of counter clearings nw64() is + * limited/reduced though the limit parameter. + */ + int rx_channel = rp->rx_channel; + u32 misc, wred; + + /* RXMISC (Receive Miscellaneous Discard Count), covers the + * following discard events: IPP (Input Port Process), + * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive + * Block Ring) prefetch buffer is empty. + */ + misc = nr64(RXMISC(rx_channel)); + if (unlikely((misc & RXMISC_COUNT) > limit)) { + nw64(RXMISC(rx_channel), 0); + rp->rx_errors += misc & RXMISC_COUNT; + + if (unlikely(misc & RXMISC_OFLOW)) + dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", + rx_channel); + + netif_printk(np, rx_err, KERN_DEBUG, np->dev, + "rx-%d: MISC drop=%u over=%u\n", + rx_channel, misc, misc-limit); + } + + /* WRED (Weighted Random Early Discard) by hardware */ + wred = nr64(RED_DIS_CNT(rx_channel)); + if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { + nw64(RED_DIS_CNT(rx_channel), 0); + rp->rx_dropped += wred & RED_DIS_CNT_COUNT; + + if (unlikely(wred & RED_DIS_CNT_OFLOW)) + dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); + + netif_printk(np, rx_err, KERN_DEBUG, np->dev, + "rx-%d: WRED drop=%u over=%u\n", + rx_channel, wred, wred-limit); + } +} + +static int niu_rx_work(struct napi_struct *napi, struct niu *np, + struct rx_ring_info *rp, int budget) { int qlen, rcr_done = 0, work_done = 0; struct rxdma_mailbox *mbox = rp->mbox; @@ -3288,13 +3723,14 @@ static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) mbox->rx_dma_ctl_stat = 0; mbox->rcrstat_a = 0; - niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", - np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); + netif_printk(np, rx_status, KERN_DEBUG, np->dev, + "%s(chan[%d]), stat[%llx] qlen=%d\n", + __func__, rp->rx_channel, (unsigned long long)stat, qlen); rcr_done = work_done = 0; qlen = min(qlen, budget); while (work_done < qlen) { - rcr_done += niu_process_rx_pkt(np, rp); + rcr_done += niu_process_rx_pkt(napi, np, rp); work_done++; } @@ -3312,6 +3748,10 @@ static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); + /* Only sync discards stats when qlen indicate potential for drops */ + if (qlen > 10) + niu_sync_rx_discard_stats(np, rp, 0x7FFF); + return work_done; } @@ -3322,8 +3762,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) u32 rx_vec = (v0 & 0xffffffff); int i, work_done = 0; - niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", - np->dev->name, (unsigned long long) v0); + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; @@ -3338,7 +3778,7 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) if (rx_vec & (1 << rp->rx_channel)) { int this_work_done; - this_work_done = niu_rx_work(np, rp, + this_work_done = niu_rx_work(&lp->napi, np, rp, budget); budget -= this_work_done; @@ -3359,7 +3799,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - netif_rx_complete(np->dev, napi); + napi_complete(napi); niu_ldg_rearm(np, lp, 1); } return work_done; @@ -3368,39 +3808,38 @@ static int niu_poll(struct napi_struct *napi, int budget) static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, u64 stat) { - dev_err(np->device, PFX "%s: RX channel %u errors ( ", - np->dev->name, rp->rx_channel); + netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) - printk("RBR_TMOUT "); + pr_cont("RBR_TMOUT "); if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) - printk("RSP_CNT "); + pr_cont("RSP_CNT "); if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) - printk("BYTE_EN_BUS "); + pr_cont("BYTE_EN_BUS "); if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) - printk("RSP_DAT "); + pr_cont("RSP_DAT "); if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) - printk("RCR_ACK "); + pr_cont("RCR_ACK "); if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) - printk("RCR_SHA_PAR "); + pr_cont("RCR_SHA_PAR "); if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) - printk("RBR_PRE_PAR "); + pr_cont("RBR_PRE_PAR "); if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) - printk("CONFIG "); + pr_cont("CONFIG "); if (stat & RX_DMA_CTL_STAT_RCRINCON) - printk("RCRINCON "); + pr_cont("RCRINCON "); if (stat & RX_DMA_CTL_STAT_RCRFULL) - printk("RCRFULL "); + pr_cont("RCRFULL "); if (stat & RX_DMA_CTL_STAT_RBRFULL) - printk("RBRFULL "); + pr_cont("RBRFULL "); if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) - printk("RBRLOGPAGE "); + pr_cont("RBRLOGPAGE "); if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) - printk("CFIGLOGPAGE "); + pr_cont("CFIGLOGPAGE "); if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) - printk("DC_FIDO "); + pr_cont("DC_FIDO "); - printk(")\n"); + pr_cont(")\n"); } static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) @@ -3414,9 +3853,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) err = -EINVAL; if (err) { - dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", - np->dev->name, rp->rx_channel, - (unsigned long long) stat); + netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", + rp->rx_channel, + (unsigned long long) stat); niu_log_rxchan_errors(np, rp, stat); } @@ -3430,27 +3869,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, u64 cs) { - dev_err(np->device, PFX "%s: TX channel %u errors ( ", - np->dev->name, rp->tx_channel); + netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); if (cs & TX_CS_MBOX_ERR) - printk("MBOX "); + pr_cont("MBOX "); if (cs & TX_CS_PKT_SIZE_ERR) - printk("PKT_SIZE "); + pr_cont("PKT_SIZE "); if (cs & TX_CS_TX_RING_OFLOW) - printk("TX_RING_OFLOW "); + pr_cont("TX_RING_OFLOW "); if (cs & TX_CS_PREF_BUF_PAR_ERR) - printk("PREF_BUF_PAR "); + pr_cont("PREF_BUF_PAR "); if (cs & TX_CS_NACK_PREF) - printk("NACK_PREF "); + pr_cont("NACK_PREF "); if (cs & TX_CS_NACK_PKT_RD) - printk("NACK_PKT_RD "); + pr_cont("NACK_PKT_RD "); if (cs & TX_CS_CONF_PART_ERR) - printk("CONF_PART "); + pr_cont("CONF_PART "); if (cs & TX_CS_PKT_PRT_ERR) - printk("PKT_PTR "); + pr_cont("PKT_PTR "); - printk(")\n"); + pr_cont(")\n"); } static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) @@ -3461,12 +3899,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); - dev_err(np->device, PFX "%s: TX channel %u error, " - "cs[%llx] logh[%llx] logl[%llx]\n", - np->dev->name, rp->tx_channel, - (unsigned long long) cs, - (unsigned long long) logh, - (unsigned long long) logl); + netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", + rp->tx_channel, + (unsigned long long)cs, + (unsigned long long)logh, + (unsigned long long)logl); niu_log_txchan_errors(np, rp, cs); @@ -3485,9 +3922,8 @@ static int niu_mif_interrupt(struct niu *np) phy_mdint = 1; } - dev_err(np->device, PFX "%s: MIF interrupt, " - "stat[%llx] phy_mdint(%d)\n", - np->dev->name, (unsigned long long) mif_status, phy_mdint); + netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", + (unsigned long long)mif_status, phy_mdint); return -ENODEV; } @@ -3542,7 +3978,7 @@ static void niu_xmac_interrupt(struct niu *np) mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; - if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) + if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) mp->rx_octets += RXMAC_BT_CNT_COUNT; if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; @@ -3612,41 +4048,40 @@ static int niu_mac_interrupt(struct niu *np) static void niu_log_device_error(struct niu *np, u64 stat) { - dev_err(np->device, PFX "%s: Core device errors ( ", - np->dev->name); + netdev_err(np->dev, "Core device errors ( "); if (stat & SYS_ERR_MASK_META2) - printk("META2 "); + pr_cont("META2 "); if (stat & SYS_ERR_MASK_META1) - printk("META1 "); + pr_cont("META1 "); if (stat & SYS_ERR_MASK_PEU) - printk("PEU "); + pr_cont("PEU "); if (stat & SYS_ERR_MASK_TXC) - printk("TXC "); + pr_cont("TXC "); if (stat & SYS_ERR_MASK_RDMC) - printk("RDMC "); + pr_cont("RDMC "); if (stat & SYS_ERR_MASK_TDMC) - printk("TDMC "); + pr_cont("TDMC "); if (stat & SYS_ERR_MASK_ZCP) - printk("ZCP "); + pr_cont("ZCP "); if (stat & SYS_ERR_MASK_FFLP) - printk("FFLP "); + pr_cont("FFLP "); if (stat & SYS_ERR_MASK_IPP) - printk("IPP "); + pr_cont("IPP "); if (stat & SYS_ERR_MASK_MAC) - printk("MAC "); + pr_cont("MAC "); if (stat & SYS_ERR_MASK_SMX) - printk("SMX "); + pr_cont("SMX "); - printk(")\n"); + pr_cont(")\n"); } static int niu_device_error(struct niu *np) { u64 stat = nr64(SYS_ERR_STAT); - dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", - np->dev->name, (unsigned long long) stat); + netdev_err(np->dev, "Core device error, stat[%llx]\n", + (unsigned long long)stat); niu_log_device_error(np, stat); @@ -3728,8 +4163,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, RX_DMA_CTL_STAT_RCRTO); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); - niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", - np->dev->name, (unsigned long long) stat); + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() stat[%llx]\n", __func__, (unsigned long long)stat); } static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, @@ -3737,8 +4172,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, { rp->tx_cs = nr64(TX_CS(rp->tx_channel)); - niudbg(INTR, "%s: txchan_intr cs[%llx]\n", - np->dev->name, (unsigned long long) rp->tx_cs); + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); } static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) @@ -3778,12 +4213,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { - if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { + if (likely(napi_schedule_prep(&lp->napi))) { lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; __niu_fastpath_interrupt(np, lp->ldg_num, v0); - __netif_rx_schedule(np->dev, &lp->napi); + __napi_schedule(&lp->napi); } } @@ -3796,8 +4231,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id) u64 v0, v1, v2; if (netif_msg_intr(np)) - printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", - lp, ldg); + printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", + __func__, lp, ldg); spin_lock_irqsave(&np->lock, flags); @@ -3806,7 +4241,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id) v2 = nr64(LDSV2(ldg)); if (netif_msg_intr(np)) - printk("v0[%llx] v1[%llx] v2[%llx]\n", + pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", (unsigned long long) v0, (unsigned long long) v1, (unsigned long long) v2); @@ -3931,8 +4366,8 @@ static int niu_alloc_rx_ring_info(struct niu *np, if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { - dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " - "RXDMA mailbox %p\n", np->dev->name, rp->mbox); + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", + rp->mbox); return -EINVAL; } @@ -3942,8 +4377,8 @@ static int niu_alloc_rx_ring_info(struct niu *np, if (!rp->rcr) return -ENOMEM; if ((unsigned long)rp->rcr & (64UL - 1)) { - dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " - "RXDMA RCR table %p\n", np->dev->name, rp->rcr); + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", + rp->rcr); return -EINVAL; } rp->rcr_table_size = MAX_RCR_RING_SIZE; @@ -3955,8 +4390,8 @@ static int niu_alloc_rx_ring_info(struct niu *np, if (!rp->rbr) return -ENOMEM; if ((unsigned long)rp->rbr & (64UL - 1)) { - dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " - "RXDMA RBR table %p\n", np->dev->name, rp->rbr); + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", + rp->rbr); return -EINVAL; } rp->rbr_table_size = MAX_RBR_RING_SIZE; @@ -3989,8 +4424,8 @@ static int niu_alloc_tx_ring_info(struct niu *np, if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { - dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " - "TXDMA mailbox %p\n", np->dev->name, rp->mbox); + netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", + rp->mbox); return -EINVAL; } @@ -4000,8 +4435,8 @@ static int niu_alloc_tx_ring_info(struct niu *np, if (!rp->descr) return -ENOMEM; if ((unsigned long)rp->descr & (64UL - 1)) { - dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " - "TXDMA descr table %p\n", np->dev->name, rp->descr); + netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", + rp->descr); return -EINVAL; } @@ -4061,6 +4496,8 @@ static int niu_alloc_channels(struct niu *np) np->num_rx_rings = parent->rxchan_per_port[port]; np->num_tx_rings = parent->txchan_per_port[port]; + np->dev->real_num_tx_queues = np->num_tx_rings; + np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), GFP_KERNEL); err = -ENOMEM; @@ -4255,10 +4692,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | TX_RNG_CFIG_STADDR)) { - dev_err(np->device, PFX "%s: TX ring channel %d " - "DMA addr (%llx) is not aligned.\n", - np->dev->name, channel, - (unsigned long long) rp->descr_dma); + netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", + channel, (unsigned long long)rp->descr_dma); return -EINVAL; } @@ -4275,10 +4710,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { - dev_err(np->device, PFX "%s: TX ring channel %d " - "MBOX addr (%llx) is has illegal bits.\n", - np->dev->name, channel, - (unsigned long long) rp->mbox_dma); + netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", + channel, (unsigned long long)rp->mbox_dma); return -EINVAL; } nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); @@ -4392,6 +4825,7 @@ static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; + *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); @@ -4578,8 +5012,7 @@ static int niu_set_ip_frag_rule(struct niu *np) struct niu_tcam_entry *tp; int index, err; - /* XXX fix this allocation scheme XXX */ - index = cp->tcam_index; + index = cp->tcam_top; tp = &parent->tcam[index]; /* Note that the noport bit is the same in both ipv4 and @@ -4596,6 +5029,8 @@ static int niu_set_ip_frag_rule(struct niu *np) err = tcam_assoc_write(np, index, tp->assoc_data); if (err) return err; + tp->valid = 1; + cp->tcam_valid_entries++; return 0; } @@ -4673,9 +5108,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data) err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { - dev_err(np->device, PFX "%s: ZCP read busy won't clear, " - "ZCP_RAM_ACC[%llx]\n", np->dev->name, - (unsigned long long) nr64(ZCP_RAM_ACC)); + netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", + (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } @@ -4687,9 +5121,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data) err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { - dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " - "ZCP_RAM_ACC[%llx]\n", np->dev->name, - (unsigned long long) nr64(ZCP_RAM_ACC)); + netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", + (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } @@ -4899,10 +5332,10 @@ static void niu_init_xif_xmac(struct niu *np) if (np->flags & NIU_FLAGS_10G) { val |= XMAC_CONFIG_MODE_XGMII; } else { - if (lp->active_speed == SPEED_100) - val |= XMAC_CONFIG_MODE_MII; - else + if (lp->active_speed == SPEED_1000) val |= XMAC_CONFIG_MODE_GMII; + else + val |= XMAC_CONFIG_MODE_MII; } nw64_mac(XMAC_CONFIG, val); @@ -5054,8 +5487,7 @@ static int niu_reset_tx_bmac(struct niu *np) udelay(100); } if (limit < 0) { - dev_err(np->device, PFX "Port %u TX BMAC would not reset, " - "BTXMAC_SW_RST[%llx]\n", + dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BTXMAC_SW_RST)); return -ENODEV; @@ -5138,7 +5570,7 @@ static void niu_init_tx_mac(struct niu *np) /* The XMAC_MIN register only accepts values for TX min which * have the low 3 bits cleared. */ - BUILD_BUG_ON(min & 0x7); + BUG_ON(min & 0x7); if (np->flags & NIU_FLAGS_XMAC) niu_init_tx_xmac(np, min, max); @@ -5156,12 +5588,11 @@ static int niu_reset_rx_xmac(struct niu *np) while (--limit >= 0) { if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST))) - break; + break; udelay(100); } if (limit < 0) { - dev_err(np->device, PFX "Port %u RX XMAC would not reset, " - "XRXMAC_SW_RST[%llx]\n", + dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(XRXMAC_SW_RST)); return -ENODEV; @@ -5182,8 +5613,7 @@ static int niu_reset_rx_bmac(struct niu *np) udelay(100); } if (limit < 0) { - dev_err(np->device, PFX "Port %u RX BMAC would not reset, " - "BRXMAC_SW_RST[%llx]\n", + dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BRXMAC_SW_RST)); return -ENODEV; @@ -5487,11 +5917,9 @@ static void niu_disable_ipp(struct niu *np) } if (limit < 0 && (rd != 0 && wr != 1)) { - dev_err(np->device, PFX "%s: IPP would not quiesce, " - "rd_ptr[%llx] wr_ptr[%llx]\n", - np->dev->name, - (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), - (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); + netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", + (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), + (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); } val = nr64_ipp(IPP_CFIG); @@ -5508,12 +5936,12 @@ static int niu_init_hw(struct niu *np) { int i, err; - niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); niu_txc_enable_port(np, 1); niu_txc_port_dma_enable(np, 1); niu_txc_set_imask(np, 0); - niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; @@ -5522,27 +5950,27 @@ static int niu_init_hw(struct niu *np) return err; } - niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); err = niu_init_rx_channels(np); if (err) goto out_uninit_tx_channels; - niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); err = niu_init_classifier_hw(np); if (err) goto out_uninit_rx_channels; - niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); err = niu_init_zcp(np); if (err) goto out_uninit_rx_channels; - niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); err = niu_init_ipp(np); if (err) goto out_uninit_rx_channels; - niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); err = niu_init_mac(np); if (err) goto out_uninit_ipp; @@ -5550,16 +5978,16 @@ static int niu_init_hw(struct niu *np) return 0; out_uninit_ipp: - niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); niu_disable_ipp(np); out_uninit_rx_channels: - niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); niu_stop_rx_channels(np); niu_reset_rx_channels(np); out_uninit_tx_channels: - niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); niu_stop_tx_channels(np); niu_reset_tx_channels(np); @@ -5568,39 +5996,64 @@ out_uninit_tx_channels: static void niu_stop_hw(struct niu *np) { - niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); niu_enable_interrupts(np, 0); - niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); niu_enable_rx_mac(np, 0); - niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); niu_disable_ipp(np); - niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); niu_stop_tx_channels(np); - niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); niu_stop_rx_channels(np); - niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); niu_reset_tx_channels(np); - niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); niu_reset_rx_channels(np); } +static void niu_set_irq_name(struct niu *np) +{ + int port = np->port; + int i, j = 1; + + sprintf(np->irq_name[0], "%s:MAC", np->dev->name); + + if (port == 0) { + sprintf(np->irq_name[1], "%s:MIF", np->dev->name); + sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); + j = 3; + } + + for (i = 0; i < np->num_ldg - j; i++) { + if (i < np->num_rx_rings) + sprintf(np->irq_name[i+j], "%s-rx-%d", + np->dev->name, i); + else if (i < np->num_tx_rings + np->num_rx_rings) + sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, + i - np->num_rx_rings); + } +} + static int niu_request_irq(struct niu *np) { int i, j, err; + niu_set_irq_name(np); + err = 0; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, - np->dev->name, lp); + np->irq_name[i], lp); if (err) goto out_free_irqs; @@ -5686,7 +6139,7 @@ static int niu_open(struct net_device *dev) goto out_free_irq; } - netif_start_queue(dev); + netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); @@ -5710,7 +6163,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev) cancel_work_sync(&np->reset_task); niu_disable_napi(np); - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); del_timer_sync(&np->timer); @@ -5791,15 +6244,17 @@ static void niu_get_rx_stats(struct niu *np) for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; + niu_sync_rx_discard_stats(np, rp, 0); + pkts += rp->rx_packets; bytes += rp->rx_bytes; dropped += rp->rx_dropped; errors += rp->rx_errors; } - np->net_stats.rx_packets = pkts; - np->net_stats.rx_bytes = bytes; - np->net_stats.rx_dropped = dropped; - np->net_stats.rx_errors = errors; + np->dev->stats.rx_packets = pkts; + np->dev->stats.rx_bytes = bytes; + np->dev->stats.rx_dropped = dropped; + np->dev->stats.rx_errors = errors; } static void niu_get_tx_stats(struct niu *np) @@ -5815,9 +6270,9 @@ static void niu_get_tx_stats(struct niu *np) bytes += rp->tx_bytes; errors += rp->tx_errors; } - np->net_stats.tx_packets = pkts; - np->net_stats.tx_bytes = bytes; - np->net_stats.tx_errors = errors; + np->dev->stats.tx_packets = pkts; + np->dev->stats.tx_bytes = bytes; + np->dev->stats.tx_errors = errors; } static struct net_device_stats *niu_get_stats(struct net_device *dev) @@ -5827,7 +6282,7 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev) niu_get_rx_stats(np); niu_get_tx_stats(np); - return &np->net_stats; + return &dev->stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) @@ -5859,6 +6314,7 @@ static void niu_set_rx_mode(struct net_device *dev) struct niu *np = netdev_priv(dev); int i, alt_cnt, err; struct dev_addr_list *addr; + struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; @@ -5868,10 +6324,10 @@ static void niu_set_rx_mode(struct net_device *dev) np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); if (dev->flags & IFF_PROMISC) np->flags |= NIU_FLAGS_PROMISC; - if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) + if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) np->flags |= NIU_FLAGS_MCAST; - alt_cnt = dev->uc_count; + alt_cnt = netdev_uc_count(dev); if (alt_cnt > niu_num_alt_addr(np)) { alt_cnt = 0; np->flags |= NIU_FLAGS_PROMISC; @@ -5880,18 +6336,15 @@ static void niu_set_rx_mode(struct net_device *dev) if (alt_cnt) { int index = 0; - for (addr = dev->uc_list; addr; addr = addr->next) { - err = niu_set_alt_mac(np, index, - addr->da_addr); + netdev_for_each_uc_addr(ha, dev) { + err = niu_set_alt_mac(np, index, ha->addr); if (err) - printk(KERN_WARNING PFX "%s: Error %d " - "adding alt mac %d\n", - dev->name, err, index); + netdev_warn(dev, "Error %d adding alt mac %d\n", + err, index); err = niu_enable_alt_mac(np, index, 1); if (err) - printk(KERN_WARNING PFX "%s: Error %d " - "enabling alt mac %d\n", - dev->name, err, index); + netdev_warn(dev, "Error %d enabling alt mac %d\n", + err, index); index++; } @@ -5904,16 +6357,15 @@ static void niu_set_rx_mode(struct net_device *dev) for (i = alt_start; i < niu_num_alt_addr(np); i++) { err = niu_enable_alt_mac(np, i, 0); if (err) - printk(KERN_WARNING PFX "%s: Error %d " - "disabling alt mac %d\n", - dev->name, err, i); + netdev_warn(dev, "Error %d disabling alt mac %d\n", + err, i); } } if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 16; i++) hash[i] = 0xffff; - } else if (dev->mc_count > 0) { - for (addr = dev->mc_list; addr; addr = addr->next) { + } else if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(addr, dev) { u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); crc >>= 24; @@ -5971,13 +6423,63 @@ static void niu_netif_start(struct niu *np) * so long as all callers are assured to have free tx slots * (such as after niu_init_hw). */ - netif_wake_queue(np->dev); + netif_tx_wake_all_queues(np->dev); niu_enable_napi(np); niu_enable_interrupts(np, 1); } +static void niu_reset_buffers(struct niu *np) +{ + int i, j, k, err; + + if (np->rx_rings) { + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { + struct page *page; + + page = rp->rxhash[j]; + while (page) { + struct page *next = + (struct page *) page->mapping; + u64 base = page->index; + base = base >> RBR_DESCR_ADDR_SHIFT; + rp->rbr[k++] = cpu_to_le32(base); + page = next; + } + } + for (; k < MAX_RBR_RING_SIZE; k++) { + err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); + if (unlikely(err)) + break; + } + + rp->rbr_index = rp->rbr_table_size - 1; + rp->rcr_index = 0; + rp->rbr_pending = 0; + rp->rbr_refill_pending = 0; + } + } + if (np->tx_rings) { + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + for (j = 0; j < MAX_TX_RING_SIZE; j++) { + if (rp->tx_buffs[j].skb) + (void) release_tx_packet(np, rp, j); + } + + rp->pending = MAX_TX_RING_SIZE; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + } + } +} + static void niu_reset_task(struct work_struct *work) { struct niu *np = container_of(work, struct niu, reset_task); @@ -6000,6 +6502,12 @@ static void niu_reset_task(struct work_struct *work) niu_stop_hw(np); + spin_unlock_irqrestore(&np->lock, flags); + + niu_reset_buffers(np); + + spin_lock_irqsave(&np->lock, flags); + err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; @@ -6014,7 +6522,7 @@ static void niu_tx_timeout(struct net_device *dev) { struct niu *np = netdev_priv(dev); - dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", + dev_err(np->device, "%s: Transmit timed out, resetting\n", dev->name); schedule_work(&np->reset_task); @@ -6051,11 +6559,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, ipv6 = ihl = 0; switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; ihl = ip_hdr(skb)->ihl; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; ihl = (40 >> 2); ipv6 = 1; @@ -6097,15 +6605,12 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, return ret; } -static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb) -{ - return &np->tx_rings[0]; -} - -static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t niu_start_xmit(struct sk_buff *skb, + struct net_device *dev) { struct niu *np = netdev_priv(dev); unsigned long align, headroom; + struct netdev_queue *txq; struct tx_ring_info *rp; struct tx_pkt_hdr *tp; unsigned int len, nfg; @@ -6113,12 +6618,13 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) int prod, i, tlen; u64 mapping, mrk; - rp = tx_ring_select(np, skb); + i = skb_get_queue_mapping(skb); + rp = &np->tx_rings[i]; + txq = netdev_get_tx_queue(dev, i); if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { - netif_stop_queue(dev); - dev_err(np->device, PFX "%s: BUG! Tx ring full when " - "queue awake!\n", dev->name); + netif_tx_stop_queue(txq); + dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); rp->tx_errors++; return NETDEV_TX_BUSY; } @@ -6208,181 +6714,978 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) prod = NEXT_TX(rp, prod); } - if (prod < rp->prod) - rp->wrap_bit ^= TX_RING_KICK_WRAP; - rp->prod = prod; + if (prod < rp->prod) + rp->wrap_bit ^= TX_RING_KICK_WRAP; + rp->prod = prod; + + nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); + + if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) + netif_tx_wake_queue(txq); + } + +out: + return NETDEV_TX_OK; + +out_drop: + rp->tx_errors++; + kfree_skb(skb); + goto out; +} + +static int niu_change_mtu(struct net_device *dev, int new_mtu) +{ + struct niu *np = netdev_priv(dev); + int err, orig_jumbo, new_jumbo; + + if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) + return -EINVAL; + + orig_jumbo = (dev->mtu > ETH_DATA_LEN); + new_jumbo = (new_mtu > ETH_DATA_LEN); + + dev->mtu = new_mtu; + + if (!netif_running(dev) || + (orig_jumbo == new_jumbo)) + return 0; + + niu_full_shutdown(np, dev); + + niu_free_channels(np); + + niu_enable_napi(np); + + err = niu_alloc_channels(np); + if (err) + return err; + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (!err) { + netif_tx_start_all_queues(dev); + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + } + + return err; +} + +static void niu_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct niu *np = netdev_priv(dev); + struct niu_vpd *vpd = &np->vpd; + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + sprintf(info->fw_version, "%d.%d", + vpd->fcode_major, vpd->fcode_minor); + if (np->parent->plat_type != PLAT_TYPE_NIU) + strcpy(info->bus_info, pci_name(np->pdev)); +} + +static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp; + + lp = &np->link_config; + + memset(cmd, 0, sizeof(*cmd)); + cmd->phy_address = np->phy_addr; + cmd->supported = lp->supported; + cmd->advertising = lp->active_advertising; + cmd->autoneg = lp->active_autoneg; + cmd->speed = lp->active_speed; + cmd->duplex = lp->active_duplex; + cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; + cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + return 0; +} + +static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp = &np->link_config; + + lp->advertising = cmd->advertising; + lp->speed = cmd->speed; + lp->duplex = cmd->duplex; + lp->autoneg = cmd->autoneg; + return niu_init_link(np); +} + +static u32 niu_get_msglevel(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + return np->msg_enable; +} + +static void niu_set_msglevel(struct net_device *dev, u32 value) +{ + struct niu *np = netdev_priv(dev); + np->msg_enable = value; +} + +static int niu_nway_reset(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + if (np->link_config.autoneg) + return niu_init_link(np); + + return 0; +} + +static int niu_get_eeprom_len(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return np->eeprom_len; +} + +static int niu_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct niu *np = netdev_priv(dev); + u32 offset, len, val; + + offset = eeprom->offset; + len = eeprom->len; + + if (offset + len < offset) + return -EINVAL; + if (offset >= np->eeprom_len) + return -EINVAL; + if (offset + len > np->eeprom_len) + len = eeprom->len = np->eeprom_len - offset; + + if (offset & 3) { + u32 b_offset, b_count; + + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) + b_count = len; + + val = nr64(ESPC_NCR((offset - b_offset) / 4)); + memcpy(data, ((char *)&val) + b_offset, b_count); + data += b_count; + len -= b_count; + offset += b_count; + } + while (len >= 4) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, 4); + data += 4; + len -= 4; + offset += 4; + } + if (len) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, len); + } + return 0; +} + +static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) +{ + switch (flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + *pid = IPPROTO_TCP; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + *pid = IPPROTO_UDP; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + *pid = IPPROTO_SCTP; + break; + case AH_V4_FLOW: + case AH_V6_FLOW: + *pid = IPPROTO_AH; + break; + case ESP_V4_FLOW: + case ESP_V6_FLOW: + *pid = IPPROTO_ESP; + break; + default: + *pid = 0; + break; + } +} + +static int niu_class_to_ethflow(u64 class, int *flow_type) +{ + switch (class) { + case CLASS_CODE_TCP_IPV4: + *flow_type = TCP_V4_FLOW; + break; + case CLASS_CODE_UDP_IPV4: + *flow_type = UDP_V4_FLOW; + break; + case CLASS_CODE_AH_ESP_IPV4: + *flow_type = AH_V4_FLOW; + break; + case CLASS_CODE_SCTP_IPV4: + *flow_type = SCTP_V4_FLOW; + break; + case CLASS_CODE_TCP_IPV6: + *flow_type = TCP_V6_FLOW; + break; + case CLASS_CODE_UDP_IPV6: + *flow_type = UDP_V6_FLOW; + break; + case CLASS_CODE_AH_ESP_IPV6: + *flow_type = AH_V6_FLOW; + break; + case CLASS_CODE_SCTP_IPV6: + *flow_type = SCTP_V6_FLOW; + break; + case CLASS_CODE_USER_PROG1: + case CLASS_CODE_USER_PROG2: + case CLASS_CODE_USER_PROG3: + case CLASS_CODE_USER_PROG4: + *flow_type = IP_USER_FLOW; + break; + default: + return 0; + } + + return 1; +} + +static int niu_ethflow_to_class(int flow_type, u64 *class) +{ + switch (flow_type) { + case TCP_V4_FLOW: + *class = CLASS_CODE_TCP_IPV4; + break; + case UDP_V4_FLOW: + *class = CLASS_CODE_UDP_IPV4; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + *class = CLASS_CODE_AH_ESP_IPV4; + break; + case SCTP_V4_FLOW: + *class = CLASS_CODE_SCTP_IPV4; + break; + case TCP_V6_FLOW: + *class = CLASS_CODE_TCP_IPV6; + break; + case UDP_V6_FLOW: + *class = CLASS_CODE_UDP_IPV6; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + *class = CLASS_CODE_AH_ESP_IPV6; + break; + case SCTP_V6_FLOW: + *class = CLASS_CODE_SCTP_IPV6; + break; + default: + return 0; + } + + return 1; +} + +static u64 niu_flowkey_to_ethflow(u64 flow_key) +{ + u64 ethflow = 0; + + if (flow_key & FLOW_KEY_L2DA) + ethflow |= RXH_L2DA; + if (flow_key & FLOW_KEY_VLAN) + ethflow |= RXH_VLAN; + if (flow_key & FLOW_KEY_IPSA) + ethflow |= RXH_IP_SRC; + if (flow_key & FLOW_KEY_IPDA) + ethflow |= RXH_IP_DST; + if (flow_key & FLOW_KEY_PROTO) + ethflow |= RXH_L3_PROTO; + if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) + ethflow |= RXH_L4_B_0_1; + if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) + ethflow |= RXH_L4_B_2_3; + + return ethflow; + +} + +static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) +{ + u64 key = 0; + + if (ethflow & RXH_L2DA) + key |= FLOW_KEY_L2DA; + if (ethflow & RXH_VLAN) + key |= FLOW_KEY_VLAN; + if (ethflow & RXH_IP_SRC) + key |= FLOW_KEY_IPSA; + if (ethflow & RXH_IP_DST) + key |= FLOW_KEY_IPDA; + if (ethflow & RXH_L3_PROTO) + key |= FLOW_KEY_PROTO; + if (ethflow & RXH_L4_B_0_1) + key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); + if (ethflow & RXH_L4_B_2_3) + key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); + + *flow_key = key; + + return 1; + +} + +static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) +{ + u64 class; + + nfc->data = 0; + + if (!niu_ethflow_to_class(nfc->flow_type, &class)) + return -EINVAL; + + if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & + TCAM_KEY_DISC) + nfc->data = RXH_DISCARD; + else + nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - + CLASS_CODE_USER_PROG1]); + return 0; +} + +static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, + struct ethtool_rx_flow_spec *fsp) +{ + + fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >> + TCAM_V4KEY3_SADDR_SHIFT; + fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >> + TCAM_V4KEY3_DADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> + TCAM_V4KEY3_SADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> + TCAM_V4KEY3_DADDR_SHIFT; + + fsp->h_u.tcp_ip4_spec.ip4src = + cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src); + fsp->m_u.tcp_ip4_spec.ip4src = + cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src); + fsp->h_u.tcp_ip4_spec.ip4dst = + cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst); + fsp->m_u.tcp_ip4_spec.ip4dst = + cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst); + + fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> + TCAM_V4KEY2_TOS_SHIFT; + fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> + TCAM_V4KEY2_TOS_SHIFT; + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.psrc = + ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->h_u.tcp_ip4_spec.pdst = + ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + fsp->m_u.tcp_ip4_spec.psrc = + ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->m_u.tcp_ip4_spec.pdst = + ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + + fsp->h_u.tcp_ip4_spec.psrc = + cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc); + fsp->h_u.tcp_ip4_spec.pdst = + cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst); + fsp->m_u.tcp_ip4_spec.psrc = + cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc); + fsp->m_u.tcp_ip4_spec.pdst = + cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst); + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.spi = + (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.ah_ip4_spec.spi = + (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + + fsp->h_u.ah_ip4_spec.spi = + cpu_to_be32(fsp->h_u.ah_ip4_spec.spi); + fsp->m_u.ah_ip4_spec.spi = + cpu_to_be32(fsp->m_u.ah_ip4_spec.spi); + break; + case IP_USER_FLOW: + fsp->h_u.usr_ip4_spec.l4_4_bytes = + (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.usr_ip4_spec.l4_4_bytes = + (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + + fsp->h_u.usr_ip4_spec.l4_4_bytes = + cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes); + fsp->m_u.usr_ip4_spec.l4_4_bytes = + cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes); + + fsp->h_u.usr_ip4_spec.proto = + (tp->key[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + fsp->m_u.usr_ip4_spec.proto = + (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + break; + default: + break; + } +} + +static int niu_get_ethtool_tcam_entry(struct niu *np, + struct ethtool_rxnfc *nfc) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + struct ethtool_rx_flow_spec *fsp = &nfc->fs; + u16 idx; + u64 class; + int ret = 0; + + idx = tcam_get_index(np, (u16)nfc->fs.location); + + tp = &parent->tcam[idx]; + if (!tp->valid) { + netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", + parent->index, (u16)nfc->fs.location, idx); + return -EINVAL; + } + + /* fill the flow spec entry */ + class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> + TCAM_V4KEY0_CLASS_CODE_SHIFT; + ret = niu_class_to_ethflow(class, &fsp->flow_type); + + if (ret < 0) { + netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", + parent->index); + ret = -EINVAL; + goto out; + } + + if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { + u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + if (proto == IPPROTO_ESP) { + if (fsp->flow_type == AH_V4_FLOW) + fsp->flow_type = ESP_V4_FLOW; + else + fsp->flow_type = ESP_V6_FLOW; + } + } + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + niu_get_ip4fs_from_tcam_key(tp, fsp); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + /* Not yet implemented */ + ret = -EINVAL; + break; + case IP_USER_FLOW: + niu_get_ip4fs_from_tcam_key(tp, fsp); + break; + default: + ret = -EINVAL; + break; + } + + if (ret < 0) + goto out; + + if (tp->assoc_data & TCAM_ASSOCDATA_DISC) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> + TCAM_ASSOCDATA_OFFSET_SHIFT; + + /* put the tcam size here */ + nfc->data = tcam_get_size(np); +out: + return ret; +} + +static int niu_get_ethtool_tcam_all(struct niu *np, + struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + int i, idx, cnt; + u16 n_entries; + unsigned long flags; + + + /* put the tcam size here */ + nfc->data = tcam_get_size(np); + + niu_lock_parent(np, flags); + n_entries = nfc->rule_cnt; + for (cnt = 0, i = 0; i < nfc->data; i++) { + idx = tcam_get_index(np, i); + tp = &parent->tcam[idx]; + if (!tp->valid) + continue; + rule_locs[cnt] = i; + cnt++; + } + niu_unlock_parent(np, flags); + + if (n_entries != cnt) { + /* print warning, this should not happen */ + netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", + np->parent->index, __func__, n_entries, cnt); + } + + return 0; +} - nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); +static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + void *rule_locs) +{ + struct niu *np = netdev_priv(dev); + int ret = 0; - if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { - netif_stop_queue(dev); - if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) - netif_wake_queue(dev); + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + ret = niu_get_hash_opts(np, cmd); + break; + case ETHTOOL_GRXRINGS: + cmd->data = np->num_rx_rings; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = tcam_get_valid_entry_cnt(np); + break; + case ETHTOOL_GRXCLSRULE: + ret = niu_get_ethtool_tcam_entry(np, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs); + break; + default: + ret = -EINVAL; + break; } - dev->trans_start = jiffies; - -out: - return NETDEV_TX_OK; - -out_drop: - rp->tx_errors++; - kfree_skb(skb); - goto out; + return ret; } -static int niu_change_mtu(struct net_device *dev, int new_mtu) +static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { - struct niu *np = netdev_priv(dev); - int err, orig_jumbo, new_jumbo; + u64 class; + u64 flow_key = 0; + unsigned long flags; - if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) + if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; - orig_jumbo = (dev->mtu > ETH_DATA_LEN); - new_jumbo = (new_mtu > ETH_DATA_LEN); - - dev->mtu = new_mtu; + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_SCTP_IPV6) + return -EINVAL; - if (!netif_running(dev) || - (orig_jumbo == new_jumbo)) + if (nfc->data & RXH_DISCARD) { + niu_lock_parent(np, flags); + flow_key = np->parent->tcam_key[class - + CLASS_CODE_USER_PROG1]; + flow_key |= TCAM_KEY_DISC; + nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); + np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; + niu_unlock_parent(np, flags); return 0; + } else { + /* Discard was set before, but is not set now */ + if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & + TCAM_KEY_DISC) { + niu_lock_parent(np, flags); + flow_key = np->parent->tcam_key[class - + CLASS_CODE_USER_PROG1]; + flow_key &= ~TCAM_KEY_DISC; + nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), + flow_key); + np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = + flow_key; + niu_unlock_parent(np, flags); + } + } - niu_full_shutdown(np, dev); - - niu_free_channels(np); + if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) + return -EINVAL; - niu_enable_napi(np); + niu_lock_parent(np, flags); + nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); + np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; + niu_unlock_parent(np, flags); - err = niu_alloc_channels(np); - if (err) - return err; + return 0; +} - spin_lock_irq(&np->lock); +static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, + struct niu_tcam_entry *tp, + int l2_rdc_tab, u64 class) +{ + u8 pid = 0; + u32 sip, dip, sipm, dipm, spi, spim; + u16 sport, dport, spm, dpm; + + sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); + sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); + dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); + dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); + + tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; + tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; + tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; + tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; + + tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; + tp->key[3] |= dip; + + tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; + tp->key_mask[3] |= dipm; + + tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << + TCAM_V4KEY2_TOS_SHIFT); + tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << + TCAM_V4KEY2_TOS_SHIFT); + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); + spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); + dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); + dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); + + tp->key[2] |= (((u64)sport << 16) | dport); + tp->key_mask[2] |= (((u64)spm << 16) | dpm); + niu_ethflow_to_l3proto(fsp->flow_type, &pid); + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); + spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); + + tp->key[2] |= spi; + tp->key_mask[2] |= spim; + niu_ethflow_to_l3proto(fsp->flow_type, &pid); + break; + case IP_USER_FLOW: + spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); + spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); - err = niu_init_hw(np); - if (!err) { - init_timer(&np->timer); - np->timer.expires = jiffies + HZ; - np->timer.data = (unsigned long) np; - np->timer.function = niu_timer; + tp->key[2] |= spi; + tp->key_mask[2] |= spim; + pid = fsp->h_u.usr_ip4_spec.proto; + break; + default: + break; + } - err = niu_enable_interrupts(np, 1); - if (err) - niu_stop_hw(np); + tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); + if (pid) { + tp->key_mask[2] |= TCAM_V4KEY2_PROTO; } +} - spin_unlock_irq(&np->lock); +static int niu_add_ethtool_tcam_entry(struct niu *np, + struct ethtool_rxnfc *nfc) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + struct ethtool_rx_flow_spec *fsp = &nfc->fs; + struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; + int l2_rdc_table = rdc_table->first_table_num; + u16 idx; + u64 class; + unsigned long flags; + int err, ret; - if (!err) { - netif_start_queue(dev); - if (np->link_config.loopback_mode != LOOPBACK_DISABLED) - netif_carrier_on(dev); + ret = 0; - add_timer(&np->timer); + idx = nfc->fs.location; + if (idx >= tcam_get_size(np)) + return -EINVAL; + + if (fsp->flow_type == IP_USER_FLOW) { + int i; + int add_usr_cls = 0; + int ipv6 = 0; + struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; + + niu_lock_parent(np, flags); + + for (i = 0; i < NIU_L3_PROG_CLS; i++) { + if (parent->l3_cls[i]) { + if (uspec->proto == parent->l3_cls_pid[i]) { + class = parent->l3_cls[i]; + parent->l3_cls_refcnt[i]++; + add_usr_cls = 1; + break; + } + } else { + /* Program new user IP class */ + switch (i) { + case 0: + class = CLASS_CODE_USER_PROG1; + break; + case 1: + class = CLASS_CODE_USER_PROG2; + break; + case 2: + class = CLASS_CODE_USER_PROG3; + break; + case 3: + class = CLASS_CODE_USER_PROG4; + break; + default: + break; + } + if (uspec->ip_ver == ETH_RX_NFC_IP6) + ipv6 = 1; + ret = tcam_user_ip_class_set(np, class, ipv6, + uspec->proto, + uspec->tos, + umask->tos); + if (ret) + goto out; + + ret = tcam_user_ip_class_enable(np, class, 1); + if (ret) + goto out; + parent->l3_cls[i] = class; + parent->l3_cls_pid[i] = uspec->proto; + parent->l3_cls_refcnt[i]++; + add_usr_cls = 1; + break; + } + } + if (!add_usr_cls) { + netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", + parent->index, __func__, uspec->proto); + ret = -EINVAL; + goto out; + } + niu_unlock_parent(np, flags); + } else { + if (!niu_ethflow_to_class(fsp->flow_type, &class)) { + return -EINVAL; + } } - return err; -} + niu_lock_parent(np, flags); -static void niu_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct niu *np = netdev_priv(dev); - struct niu_vpd *vpd = &np->vpd; + idx = tcam_get_index(np, idx); + tp = &parent->tcam[idx]; - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - sprintf(info->fw_version, "%d.%d", - vpd->fcode_major, vpd->fcode_minor); - if (np->parent->plat_type != PLAT_TYPE_NIU) - strcpy(info->bus_info, pci_name(np->pdev)); -} + memset(tp, 0, sizeof(*tp)); -static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct niu *np = netdev_priv(dev); - struct niu_link_config *lp; + /* fill in the tcam key and mask */ + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + /* Not yet implemented */ + netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", + parent->index, __func__, fsp->flow_type); + ret = -EINVAL; + goto out; + case IP_USER_FLOW: + if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) { + niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, + class); + } else { + /* Not yet implemented */ + netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n", + parent->index, __func__); + ret = -EINVAL; + goto out; + } + break; + default: + netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", + parent->index, __func__, fsp->flow_type); + ret = -EINVAL; + goto out; + } - lp = &np->link_config; + /* fill in the assoc data */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + tp->assoc_data = TCAM_ASSOCDATA_DISC; + } else { + if (fsp->ring_cookie >= np->num_rx_rings) { + netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", + parent->index, __func__, + (long long)fsp->ring_cookie); + ret = -EINVAL; + goto out; + } + tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | + (fsp->ring_cookie << + TCAM_ASSOCDATA_OFFSET_SHIFT)); + } - memset(cmd, 0, sizeof(*cmd)); - cmd->phy_address = np->phy_addr; - cmd->supported = lp->supported; - cmd->advertising = lp->advertising; - cmd->autoneg = lp->autoneg; - cmd->speed = lp->active_speed; - cmd->duplex = lp->active_duplex; + err = tcam_write(np, idx, tp->key, tp->key_mask); + if (err) { + ret = -EINVAL; + goto out; + } + err = tcam_assoc_write(np, idx, tp->assoc_data); + if (err) { + ret = -EINVAL; + goto out; + } - return 0; -} + /* validate the entry */ + tp->valid = 1; + np->clas.tcam_valid_entries++; +out: + niu_unlock_parent(np, flags); -static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - return -EINVAL; + return ret; } -static u32 niu_get_msglevel(struct net_device *dev) +static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) { - struct niu *np = netdev_priv(dev); - return np->msg_enable; -} + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + u16 idx; + unsigned long flags; + u64 class; + int ret = 0; -static void niu_set_msglevel(struct net_device *dev, u32 value) -{ - struct niu *np = netdev_priv(dev); - np->msg_enable = value; -} + if (loc >= tcam_get_size(np)) + return -EINVAL; -static int niu_get_eeprom_len(struct net_device *dev) -{ - struct niu *np = netdev_priv(dev); + niu_lock_parent(np, flags); - return np->eeprom_len; -} + idx = tcam_get_index(np, loc); + tp = &parent->tcam[idx]; -static int niu_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct niu *np = netdev_priv(dev); - u32 offset, len, val; + /* if the entry is of a user defined class, then update*/ + class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> + TCAM_V4KEY0_CLASS_CODE_SHIFT; - offset = eeprom->offset; - len = eeprom->len; + if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { + int i; + for (i = 0; i < NIU_L3_PROG_CLS; i++) { + if (parent->l3_cls[i] == class) { + parent->l3_cls_refcnt[i]--; + if (!parent->l3_cls_refcnt[i]) { + /* disable class */ + ret = tcam_user_ip_class_enable(np, + class, + 0); + if (ret) + goto out; + parent->l3_cls[i] = 0; + parent->l3_cls_pid[i] = 0; + } + break; + } + } + if (i == NIU_L3_PROG_CLS) { + netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", + parent->index, __func__, + (unsigned long long)class); + ret = -EINVAL; + goto out; + } + } - if (offset + len < offset) - return -EINVAL; - if (offset >= np->eeprom_len) - return -EINVAL; - if (offset + len > np->eeprom_len) - len = eeprom->len = np->eeprom_len - offset; + ret = tcam_flush(np, idx); + if (ret) + goto out; - if (offset & 3) { - u32 b_offset, b_count; + /* invalidate the entry */ + tp->valid = 0; + np->clas.tcam_valid_entries--; +out: + niu_unlock_parent(np, flags); - b_offset = offset & 3; - b_count = 4 - b_offset; - if (b_count > len) - b_count = len; + return ret; +} - val = nr64(ESPC_NCR((offset - b_offset) / 4)); - memcpy(data, ((char *)&val) + b_offset, b_count); - data += b_count; - len -= b_count; - offset += b_count; - } - while (len >= 4) { - val = nr64(ESPC_NCR(offset / 4)); - memcpy(data, &val, 4); - data += 4; - len -= 4; - offset += 4; - } - if (len) { - val = nr64(ESPC_NCR(offset / 4)); - memcpy(data, &val, len); +static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct niu *np = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = niu_set_hash_opts(np, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = niu_add_ethtool_tcam_entry(np, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); + break; + default: + ret = -EINVAL; + break; } - return 0; + + return ret; } static const struct { @@ -6492,10 +7795,13 @@ static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int niu_get_stats_count(struct net_device *dev) +static int niu_get_sset_count(struct net_device *dev, int stringset) { struct niu *np = netdev_priv(dev); + if (stringset != ETH_SS_STATS) + return -EINVAL; + return ((np->flags & NIU_FLAGS_XMAC ? NUM_XMAC_STAT_KEYS : NUM_BMAC_STAT_KEYS) + @@ -6522,6 +7828,8 @@ static void niu_get_ethtool_stats(struct net_device *dev, for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; + niu_sync_rx_discard_stats(np, rp, 0); + data[0] = rp->rx_channel; data[1] = rp->rx_packets; data[2] = rp->rx_bytes; @@ -6607,14 +7915,17 @@ static const struct ethtool_ops niu_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = niu_get_msglevel, .set_msglevel = niu_set_msglevel, + .nway_reset = niu_nway_reset, .get_eeprom_len = niu_get_eeprom_len, .get_eeprom = niu_get_eeprom, .get_settings = niu_get_settings, .set_settings = niu_set_settings, .get_strings = niu_get_strings, - .get_stats_count = niu_get_stats_count, + .get_sset_count = niu_get_sset_count, .get_ethtool_stats = niu_get_ethtool_stats, .phys_id = niu_phys_id, + .get_rxnfc = niu_get_nfc, + .set_rxnfc = niu_set_nfc, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, @@ -6634,9 +7945,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, * won't get any interrupts and that's painful to debug. */ if (nr64(LDG_NUM(ldn)) != ldg) { - dev_err(np->device, PFX "Port %u, mis-matched " - "LDG assignment " - "for ldn %d, should be %d is %llu\n", + dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", np->port, ldn, ldg, (unsigned long long) nr64(LDG_NUM(ldn))); return -EINVAL; @@ -6689,7 +7998,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { - dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } @@ -6704,7 +8013,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { - dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } @@ -6776,7 +8085,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np) int i; for (i = 0; i < len - 5; i++) { - if (!strncmp(s + i, "FCode ", 5)) + if (!strncmp(s + i, "FCode ", 6)) break; } if (i >= len - 5) @@ -6785,8 +8094,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np) s += i + 5; sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); - niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", - vpd->fcode_major, vpd->fcode_minor); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: FCODE major(%d) minor(%d)\n", + vpd->fcode_major, vpd->fcode_minor); if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || (vpd->fcode_major == NIU_VPD_MIN_MAJOR && vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) @@ -6806,8 +8116,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np, #define FOUND_MASK_PHY 0x00000020 #define FOUND_MASK_ALL 0x0000003f - niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", - start, end); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: start[%x] end[%x]\n", start, end); while (start < end) { int len, err, instance, type, prop_len; char namebuf[64]; @@ -6861,8 +8171,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np, } if (max_len && prop_len > max_len) { - dev_err(np->device, PFX "Property '%s' length (%d) is " - "too long.\n", namebuf, prop_len); + dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); return -EINVAL; } @@ -6870,8 +8179,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np, u32 off = start + 5 + err; int i; - niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " - "len[%d]\n", namebuf, prop_len); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: Reading in property [%s] len[%d]\n", + namebuf, prop_len); for (i = 0; i < prop_len; i++) *prop_buf++ = niu_pci_eeprom_read(np, off + i); } @@ -6996,6 +8306,12 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np, np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { + /* 10G Serdes or 1G Serdes, default to 10G */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->flags |= NIU_FLAGS_XCVR_SERDES; + np->mac_xcvr = MAC_XCVR_XPCS; } else { return -EINVAL; } @@ -7029,8 +8345,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { - dev_err(np->device, PFX "VPD MAC invalid, " - "falling back to SPROM.\n"); + dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; @@ -7047,14 +8362,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) np->flags &= ~NIU_FLAGS_10G; } if (np->flags & NIU_FLAGS_10G) - np->mac_xcvr = MAC_XCVR_XPCS; + np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { - dev_err(np->device, PFX "Illegal phy string [%s].\n", + dev_err(np->device, "Illegal phy string [%s]\n", np->vpd.phy_type); - dev_err(np->device, PFX "Falling back to SPROM.\n"); + dev_err(np->device, "Falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } @@ -7082,7 +8397,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) np->eeprom_len = len; - niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: Image size %llu\n", (unsigned long long)val); sum = 0; for (i = 0; i < len; i++) { @@ -7092,10 +8408,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) sum += (val >> 16) & 0xff; sum += (val >> 24) & 0xff; } - niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: Checksum %x\n", (int)(sum & 0xff)); if ((sum & 0xff) != 0xab) { - dev_err(np->device, PFX "Bad SPROM checksum " - "(%x, should be 0xab)\n", (int) (sum & 0xff)); + dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); return -EINVAL; } @@ -7118,11 +8434,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) ESPC_PHY_TYPE_PORT3_SHIFT; break; default: - dev_err(np->device, PFX "Bogus port number %u\n", + dev_err(np->device, "Bogus port number %u\n", np->port); return -EINVAL; } - niudbg(PROBE, "SPROM: PHY type %x\n", val8); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: PHY type %x\n", val8); switch (val8) { case ESPC_PHY_TYPE_1G_COPPER: @@ -7154,30 +8471,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) break; default: - dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); + dev_err(np->device, "Bogus SPROM phy type %u\n", val8); return -EINVAL; } val = nr64(ESPC_MAC_ADDR0); - niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", - (unsigned long long) val); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); dev->perm_addr[0] = (val >> 0) & 0xff; dev->perm_addr[1] = (val >> 8) & 0xff; dev->perm_addr[2] = (val >> 16) & 0xff; dev->perm_addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); - niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", - (unsigned long long) val); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); dev->perm_addr[4] = (val >> 0) & 0xff; dev->perm_addr[5] = (val >> 8) & 0xff; if (!is_valid_ether_addr(&dev->perm_addr[0])) { - dev_err(np->device, PFX "SPROM MAC address invalid\n"); - dev_err(np->device, PFX "[ \n"); - for (i = 0; i < 6; i++) - printk("%02x ", dev->perm_addr[i]); - printk("]\n"); + dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", + dev->perm_addr); return -EINVAL; } @@ -7189,8 +8503,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); val = nr64(ESPC_MOD_STR_LEN); - niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", - (unsigned long long) val); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 8 * 4) return -EINVAL; @@ -7205,8 +8519,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) np->vpd.model[val] = '\0'; val = nr64(ESPC_BD_MOD_STR_LEN); - niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", - (unsigned long long) val); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 4 * 4) return -EINVAL; @@ -7222,8 +8536,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) np->vpd.mac_num = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; - niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", - np->vpd.mac_num); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); return 0; } @@ -7256,8 +8570,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np) } } - niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", - np->port, parent->num_ports); if (np->port >= parent->num_ports) return -ENODEV; @@ -7286,14 +8598,12 @@ static int __devinit phy_record(struct niu_parent *parent, pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", parent->index, id, - (type == PHY_TYPE_PMA_PMD ? - "PMA/PMD" : - (type == PHY_TYPE_PCS ? - "PCS" : "MII")), + type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : + type == PHY_TYPE_PCS ? "PCS" : "MII", phy_port); if (p->cur[type] >= NIU_MAX_PORTS) { - printk(KERN_ERR PFX "Too many PHY ports.\n"); + pr_err("Too many PHY ports\n"); return -EINVAL; } idx = p->cur[type]; @@ -7354,8 +8664,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent) parent->rxchan_per_port[i] = (16 / num_ports); parent->txchan_per_port[i] = (16 / num_ports); - pr_info(PFX "niu%d: Port %u [%u RX chans] " - "[%u TX chans]\n", + pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); @@ -7398,8 +8707,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent, parent->rxchan_per_port[i] = rx_chans_per_1g; parent->txchan_per_port[i] = tx_chans_per_1g; } - pr_info(PFX "niu%d: Port %u [%u RX chans] " - "[%u TX chans]\n", + pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); @@ -7408,23 +8716,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent, } if (tot_rx > NIU_NUM_RXCHAN) { - printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " - "resetting to one per port.\n", + pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", parent->index, tot_rx); for (i = 0; i < num_ports; i++) parent->rxchan_per_port[i] = 1; } if (tot_tx > NIU_NUM_TXCHAN) { - printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " - "resetting to one per port.\n", + pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", parent->index, tot_tx); for (i = 0; i < num_ports; i++) parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { - printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " - "RX[%d] TX[%d]\n", - parent->index, tot_rx, tot_tx); + pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); } } @@ -7452,18 +8757,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, struct rdc_table *rt = &tp->tables[grp]; int slot; - pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", + pr_info("niu%d: Port %d RDC tbl(%d) [ ", parent->index, i, tp->first_table_num + grp); for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { rt->rxdma_channel[slot] = rdc_channel_base + this_channel_offset; - printk("%d ", rt->rxdma_channel[slot]); + pr_cont("%d ", rt->rxdma_channel[slot]); if (++this_channel_offset == num_channels) this_channel_offset = 0; } - printk("]\n"); + pr_cont("]\n"); } parent->rdc_default[i] = rdc_channel_base; @@ -7524,6 +8829,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) u32 val; int err; + num_10g = num_1g = 0; + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; @@ -7540,6 +8847,16 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) parent->num_ports = 2; val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); + } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && + (parent->plat_type == PLAT_TYPE_NIU)) { + /* this is the Monza case */ + if (np->flags & NIU_FLAGS_10G) { + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + } else { + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1)); + } } else { err = fill_phy_probe_info(np, parent, info); if (err) @@ -7611,8 +8928,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) break; default: - printk(KERN_ERR PFX "Unsupported port config " - "10G[%d] 1G[%d]\n", + pr_err("Unsupported port config 10G[%d] 1G[%d]\n", num_10g, num_1g); return -EINVAL; } @@ -7630,8 +8946,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) return 0; unknown_vg_1g_port: - printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", - lowest_1g); + pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); return -EINVAL; } @@ -7640,9 +8955,6 @@ static int __devinit niu_probe_ports(struct niu *np) struct niu_parent *parent = np->parent; int err, i; - niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", - parent->port_phy); - if (parent->port_phy == PORT_PHY_UNKNOWN) { err = walk_phys(np, parent); if (err) @@ -7663,10 +8975,8 @@ static int __devinit niu_classifier_swstate_init(struct niu *np) { struct niu_classifier *cp = &np->clas; - niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", - np->parent->tcam_num_entries); - - cp->tcam_index = (u16) np->port; + cp->tcam_top = (u16) np->port; + cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; cp->h1_init = 0xffffffff; cp->h2_init = 0xffff; @@ -7686,7 +8996,9 @@ static void __devinit niu_link_config_init(struct niu *np) ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg); lp->speed = lp->active_speed = SPEED_INVALID; - lp->duplex = lp->active_duplex = DUPLEX_INVALID; + lp->duplex = DUPLEX_FULL; + lp->active_duplex = DUPLEX_INVALID; + lp->autoneg = 1; #if 0 lp->loopback_mode = LOOPBACK_MAC; lp->active_speed = SPEED_10000; @@ -7728,8 +9040,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) break; default: - dev_err(np->device, PFX "Port %u is invalid, cannot " - "compute MAC block offset.\n", np->port); + dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); return -EINVAL; } @@ -7939,9 +9250,8 @@ static int __devinit niu_get_of_props(struct niu *np) phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { - dev_err(np->device, PFX "%s: OF node lacks " - "phy-type property\n", - dp->full_name); + netdev_err(dev, "%s: OF node lacks phy-type property\n", + dp->full_name); return -EINVAL; } @@ -7951,34 +9261,26 @@ static int __devinit niu_get_of_props(struct niu *np) strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { - dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", - dp->full_name, np->vpd.phy_type); + netdev_err(dev, "%s: Illegal phy string [%s]\n", + dp->full_name, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { - dev_err(np->device, PFX "%s: OF node lacks " - "local-mac-address property\n", - dp->full_name); + netdev_err(dev, "%s: OF node lacks local-mac-address property\n", + dp->full_name); return -EINVAL; } if (prop_len != dev->addr_len) { - dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " - "is wrong.\n", - dp->full_name, prop_len); + netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", + dp->full_name, prop_len); } memcpy(dev->perm_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->perm_addr[0])) { - int i; - - dev_err(np->device, PFX "%s: OF MAC address is invalid\n", - dp->full_name); - dev_err(np->device, PFX "%s: [ \n", - dp->full_name); - for (i = 0; i < 6; i++) - printk("%02x ", dev->perm_addr[i]); - printk("]\n"); + netdev_err(dev, "%s: OF MAC address is invalid\n", + dp->full_name); + netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr); return -EINVAL; } @@ -7989,6 +9291,11 @@ static int __devinit niu_get_of_props(struct niu *np) if (model) strcpy(np->vpd.model, model); + if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { + np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | + NIU_FLAGS_HOTPLUG_PHY); + } + return 0; #else return -EINVAL; @@ -8021,8 +9328,8 @@ static int __devinit niu_get_invariants(struct niu *np) nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); offset = niu_pci_vpd_offset(np); - niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", - offset); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "%s() VPD offset [%08x]\n", __func__, offset); if (offset) niu_pci_vpd_fetch(np, offset); nw64(ESPC_PIO_EN, 0); @@ -8182,11 +9489,9 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np, struct niu_parent *p; int i; - niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); - plat_dev = platform_device_register_simple("niu", niu_parent_index, NULL, 0); - if (!plat_dev) + if (IS_ERR(plat_dev)) return NULL; for (i = 0; attr_name(niu_parent_attributes[i]); i++) { @@ -8248,9 +9553,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np, struct niu_parent *p, *tmp; int port = np->port; - niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", - ptype, port); - mutex_lock(&niu_parent_lock); p = NULL; list_for_each_entry(tmp, &niu_parent_list, list) { @@ -8288,7 +9590,8 @@ static void niu_put_parent(struct niu *np) BUG_ON(!p || p->ports[port] != np); - niudbg(PROBE, "niu_put_parent: port[%u]\n", port); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "%s() port[%u]\n", __func__, port); sprintf(port_name, "port%d", port); @@ -8335,7 +9638,7 @@ static u64 niu_pci_map_page(struct device *dev, struct page *page, static void niu_pci_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { - return dma_unmap_page(dev, dma_address, size, direction); + dma_unmap_page(dev, dma_address, size, direction); } static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, @@ -8374,11 +9677,12 @@ static struct net_device * __devinit niu_alloc_and_init( struct of_device *op, const struct niu_ops *ops, u8 port) { - struct net_device *dev = alloc_etherdev(sizeof(struct niu)); + struct net_device *dev; struct niu *np; + dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); if (!dev) { - dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); + dev_err(gen_dev, "Etherdev alloc failed, aborting\n"); return NULL; } @@ -8401,28 +9705,31 @@ static struct net_device * __devinit niu_alloc_and_init( return dev; } +static const struct net_device_ops niu_netdev_ops = { + .ndo_open = niu_open, + .ndo_stop = niu_close, + .ndo_start_xmit = niu_start_xmit, + .ndo_get_stats = niu_get_stats, + .ndo_set_multicast_list = niu_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = niu_set_mac_addr, + .ndo_do_ioctl = niu_ioctl, + .ndo_tx_timeout = niu_tx_timeout, + .ndo_change_mtu = niu_change_mtu, +}; + static void __devinit niu_assign_netdev_ops(struct net_device *dev) { - dev->open = niu_open; - dev->stop = niu_close; - dev->get_stats = niu_get_stats; - dev->set_multicast_list = niu_set_rx_mode; - dev->set_mac_address = niu_set_mac_addr; - dev->do_ioctl = niu_ioctl; - dev->tx_timeout = niu_tx_timeout; - dev->hard_start_xmit = niu_start_xmit; + dev->netdev_ops = &niu_netdev_ops; dev->ethtool_ops = &niu_ethtool_ops; dev->watchdog_timeo = NIU_TX_TIMEOUT; - dev->change_mtu = niu_change_mtu; } static void __devinit niu_device_announce(struct niu *np) { struct net_device *dev = np->dev; - DECLARE_MAC_BUF(mac); - pr_info("%s: NIU Ethernet %s\n", - dev->name, print_mac(mac, dev->dev_addr)); + pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", @@ -8438,7 +9745,9 @@ static void __devinit niu_device_announce(struct niu *np) dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), - (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), + (np->flags & NIU_FLAGS_FIBER ? "FIBER" : + (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : + "COPPER")), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); @@ -8448,7 +9757,6 @@ static void __devinit niu_device_announce(struct niu *np) static int __devinit niu_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - unsigned long niureg_base, niureg_len; union niu_parent_id parent_id; struct net_device *dev; struct niu *np; @@ -8460,30 +9768,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, PFX "Cannot enable PCI device, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, PFX "Cannot find proper PCI device " - "base addresses, aborting.\n"); + dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { - dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (pos <= 0) { - dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); goto err_out_free_res; } @@ -8516,36 +9820,29 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, PCI_EXP_DEVCTL_RELAX_EN); pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); - dma_mask = DMA_44BIT_MASK; + dma_mask = DMA_BIT_MASK(44); err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, dma_mask); if (err) { - dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " - "DMA for consistent allocations, " - "aborting.\n"); + dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); goto err_out_release_parent; } } - if (err || dma_mask == DMA_32BIT_MASK) { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, PFX "No usable DMA configuration, " - "aborting.\n"); + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; } } dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); - niureg_base = pci_resource_start(pdev, 0); - niureg_len = pci_resource_len(pdev, 0); - - np->regs = ioremap_nocache(niureg_base, niureg_len); + np->regs = pci_ioremap_bar(pdev, 0); if (!np->regs) { - dev_err(&pdev->dev, PFX "Cannot map device registers, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } @@ -8560,15 +9857,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, err = niu_get_invariants(np); if (err) { if (err != -ENODEV) - dev_err(&pdev->dev, PFX "Problem fetching invariants " - "of chip, aborting.\n"); + dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { - dev_err(&pdev->dev, PFX "Cannot register net device, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } @@ -8749,11 +10044,6 @@ static const struct niu_ops niu_phys_ops = { .unmap_single = niu_phys_unmap_single, }; -static unsigned long res_size(struct resource *r) -{ - return r->end - r->start + 1UL; -} - static int __devinit niu_of_probe(struct of_device *op, const struct of_device_id *match) { @@ -8767,7 +10057,7 @@ static int __devinit niu_of_probe(struct of_device *op, reg = of_get_property(op->node, "reg", NULL); if (!reg) { - dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", + dev_err(&op->dev, "%s: No 'reg' property, aborting\n", op->node->full_name); return -ENODEV; } @@ -8793,31 +10083,28 @@ static int __devinit niu_of_probe(struct of_device *op, dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); np->regs = of_ioremap(&op->resource[1], 0, - res_size(&op->resource[1]), + resource_size(&op->resource[1]), "niu regs"); if (!np->regs) { - dev_err(&op->dev, PFX "Cannot map device registers, " - "aborting.\n"); + dev_err(&op->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } np->vir_regs_1 = of_ioremap(&op->resource[2], 0, - res_size(&op->resource[2]), + resource_size(&op->resource[2]), "niu vregs-1"); if (!np->vir_regs_1) { - dev_err(&op->dev, PFX "Cannot map device vir registers 1, " - "aborting.\n"); + dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } np->vir_regs_2 = of_ioremap(&op->resource[3], 0, - res_size(&op->resource[3]), + resource_size(&op->resource[3]), "niu vregs-2"); if (!np->vir_regs_2) { - dev_err(&op->dev, PFX "Cannot map device vir registers 2, " - "aborting.\n"); + dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } @@ -8827,15 +10114,13 @@ static int __devinit niu_of_probe(struct of_device *op, err = niu_get_invariants(np); if (err) { if (err != -ENODEV) - dev_err(&op->dev, PFX "Problem fetching invariants " - "of chip, aborting.\n"); + dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { - dev_err(&op->dev, PFX "Cannot register net device, " - "aborting.\n"); + dev_err(&op->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } @@ -8848,19 +10133,19 @@ static int __devinit niu_of_probe(struct of_device *op, err_out_iounmap: if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, - res_size(&op->resource[2])); + resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, - res_size(&op->resource[3])); + resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, - res_size(&op->resource[1])); + resource_size(&op->resource[1])); np->regs = NULL; } @@ -8885,19 +10170,19 @@ static int __devexit niu_of_remove(struct of_device *op) if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, - res_size(&op->resource[2])); + resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, - res_size(&op->resource[3])); + resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, - res_size(&op->resource[1])); + resource_size(&op->resource[1])); np->regs = NULL; } @@ -8911,7 +10196,7 @@ static int __devexit niu_of_remove(struct of_device *op) return 0; } -static struct of_device_id niu_match[] = { +static const struct of_device_id niu_match[] = { { .name = "network", .compatible = "SUNW,niusl",