X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Ftg3.c;h=3a74d21685982f3e82b159d9ae1fa8940c2dce47;hb=ca5f12c1a82cf72ce73617dfc8ef56faf6fec30a;hp=07b3f77e7626dd646dcd0aed83484e12ef3579d8;hpb=920e37f76b19b4a3d8a1a3144cd6ee24e0e7f5b4;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 07b3f77..3a74d21 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2007 Broadcom Corporation. + * Copyright (C) 2005-2009 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -32,12 +32,15 @@ #include #include #include +#include +#include #include #include #include #include #include #include +#include #include #include @@ -52,20 +55,21 @@ #include #endif +#define BAR_0 0 +#define BAR_2 2 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define TG3_VLAN_TAG_USED 1 #else #define TG3_VLAN_TAG_USED 0 #endif -#define TG3_TSO_SUPPORT 1 - #include "tg3.h" #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.92" -#define DRV_MODULE_RELDATE "May 2, 2008" +#define DRV_MODULE_VERSION "3.105" +#define DRV_MODULE_RELDATE "December 2, 2009" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -88,7 +92,7 @@ /* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU 60 #define TG3_MAX_MTU(tp) \ - ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) + ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) /* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place @@ -98,6 +102,7 @@ #define TG3_DEF_RX_RING_PENDING 200 #define TG3_RX_JUMBO_RING_SIZE 256 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 +#define TG3_RSS_INDIR_TBL_SIZE 128 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -106,32 +111,52 @@ * replace things like '% foo' with '& (foo - 1)'. */ #define TG3_RX_RCB_RING_SIZE(tp) \ - ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024) + (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \ + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512) #define TG3_TX_RING_SIZE 512 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ TG3_RX_RING_SIZE) -#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_JUMBO_RING_SIZE) +#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ + TG3_RX_JUMBO_RING_SIZE) #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_RCB_RING_SIZE(tp)) + TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) -#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) -#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) +#define TG3_DMA_BYTE_ENAB 64 + +#define TG3_RX_STD_DMA_SZ 1536 +#define TG3_RX_JMB_DMA_SZ 9046 + +#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + +#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) +#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + +#define TG3_RX_STD_BUFF_RING_SIZE \ + (sizeof(struct ring_info) * TG3_RX_RING_SIZE) + +#define TG3_RX_JMB_BUFF_RING_SIZE \ + (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) /* minimum number of free TX descriptors required to wake up TX process */ -#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) +#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) + +#define TG3_RAW_IP_ALIGN 2 /* number of ETHTOOL_GSTATS u64's */ #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) #define TG3_NUM_TEST 6 +#define FIRMWARE_TG3 "tigon/tg3.bin" +#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" +#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" + static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -139,6 +164,11 @@ MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FIRMWARE_TG3); +MODULE_FIRMWARE(FIRMWARE_TG3TSO); +MODULE_FIRMWARE(FIRMWARE_TG3TSO5); + +#define TG3_RSS_MIN_NUM_MSIX_VECS 2 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ module_param(tg3_debug, int, 0); @@ -203,6 +233,17 @@ static struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -364,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) TG3_64BIT_REG_LOW, val); return; } - if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { + if (off == TG3_RX_STD_PROD_IDX_REG) { pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + TG3_64BIT_REG_LOW, val); return; @@ -533,6 +574,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) return 0; switch (locknum) { + case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: break; default: @@ -570,6 +612,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) return; switch (locknum) { + case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: break; default: @@ -582,39 +625,47 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) static void tg3_disable_ints(struct tg3 *tp) { + int i; + tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); -} - -static inline void tg3_cond_int(struct tg3 *tp) -{ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - (tp->hw_status->status & SD_STATUS_UPDATED)) - tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); - else - tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); } static void tg3_enable_ints(struct tg3 *tp) { + int i; + u32 coal_now = 0; + tp->irq_sync = 0; wmb(); tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - (tp->last_tag << 24)); - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - (tp->last_tag << 24)); - tg3_cond_int(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | coal_now); } -static inline unsigned int tg3_has_work(struct tg3 *tp) +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) { - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int work_exists = 0; /* check for phy events */ @@ -625,22 +676,23 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) work_exists = 1; } /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer != tp->tx_cons || - sblk->idx[0].rx_producer != tp->rx_rcb_ptr) + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_exists = 1; return work_exists; } -/* tg3_restart_ints +/* tg3_int_reenable * similar to tg3_enable_ints, but it accurately determines whether there * is new work pending and can return without flushing the PIO write * which reenables interrupts */ -static void tg3_restart_ints(struct tg3 *tp) +static void tg3_int_reenable(struct tg3_napi *tnapi) { - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - tp->last_tag << 24); + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); mmiowb(); /* When doing tagged status, this work check is unnecessary. @@ -648,39 +700,58 @@ static void tg3_restart_ints(struct tg3 *tp) * work we've completed. */ if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - tg3_has_work(tp)) + tg3_has_work(tnapi)) tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + HOSTCC_MODE_ENABLE | tnapi->coal_now); +} + +static void tg3_napi_disable(struct tg3 *tp) +{ + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} + +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); } static inline void tg3_netif_stop(struct tg3 *tp) { tp->dev->trans_start = jiffies; /* prevent tx timeout */ - napi_disable(&tp->napi); + tg3_napi_disable(tp); netif_tx_disable(tp->dev); } static inline void tg3_netif_start(struct tg3 *tp) { - netif_wake_queue(tp->dev); - /* NOTE: unconditional netif_wake_queue is only appropriate - * so long as all callers are assured to have free tx slots - * (such as after tg3_init_hw) + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) */ - napi_enable(&tp->napi); - tp->hw_status->status |= SD_STATUS_UPDATED; + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; tg3_enable_ints(tp); } static void tg3_switch_clocks(struct tg3 *tp) { - u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + u32 clock_ctrl; u32 orig_clock_ctrl; if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) return; + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + orig_clock_ctrl = clock_ctrl; clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE | @@ -720,7 +791,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) *val = 0x0; - frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); @@ -761,7 +832,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) unsigned int loops; int ret; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) return 0; @@ -771,7 +842,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) udelay(80); } - frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); @@ -804,63 +875,6 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) return ret; } -static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) -{ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); -} - -static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) -{ - u32 phy; - - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) - return; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 ephy; - - if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { - tg3_writephy(tp, MII_TG3_EPHY_TEST, - ephy | MII_TG3_EPHY_SHADOW_EN); - if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { - if (enable) - phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; - else - phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; - tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); - } - tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); - } - } else { - phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | - MII_TG3_AUXCTL_SHDWSEL_MISC; - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { - if (enable) - phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - else - phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - phy |= MII_TG3_AUXCTL_MISC_WREN; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); - } - } -} - -static void tg3_phy_set_wirespeed(struct tg3 *tp) -{ - u32 val; - - if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) - return; - - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) - tg3_writephy(tp, MII_TG3_AUX_CTRL, - (val | (1 << 15) | (1 << 4))); -} - static int tg3_bmcr_reset(struct tg3 *tp) { u32 phy_control; @@ -886,165 +900,901 @@ static int tg3_bmcr_reset(struct tg3 *tp) } udelay(10); } - if (limit <= 0) + if (limit < 0) return -EBUSY; return 0; } -static void tg3_phy_apply_otp(struct tg3 *tp) +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) { - u32 otp, phy; + struct tg3 *tp = bp->priv; + u32 val; - if (!tp->phy_otp) - return; + spin_lock_bh(&tp->lock); - otp = tp->phy_otp; + if (tg3_readphy(tp, reg, &val)) + val = -EIO; - /* Enable SM_DSP clock and tx 6dB coding. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + spin_unlock_bh(&tp->lock); - phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); - phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + return val; +} - phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | - ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct tg3 *tp = bp->priv; + u32 ret = 0; - phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); - phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; - tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + spin_lock_bh(&tp->lock); - phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + if (tg3_writephy(tp, reg, val)) + ret = -EIO; - phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + spin_unlock_bh(&tp->lock); - phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | - ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + return ret; +} - /* Turn off SM_DSP clock. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); +static int tg3_mdio_reset(struct mii_bus *bp) +{ + return 0; } -static int tg3_wait_macro_done(struct tg3 *tp) +static void tg3_mdio_config_5785(struct tg3 *tp) { - int limit = 100; + u32 val; + struct phy_device *phydev; - while (limit--) { - u32 tmp32; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case TG3_PHY_ID_BCM50610: + case TG3_PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case TG3_PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case TG3_PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case TG3_PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } - if (!tg3_readphy(tp, 0x16, &tmp32)) { - if ((tmp32 & 0x1000) == 0) - break; - } + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; } - if (limit <= 0) - return -EBUSY; - return 0; + if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); +} + +static void tg3_mdio_start(struct tg3 *tp) +{ + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + u32 funcnum, is_serdes; + + funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; + if (funcnum) + tp->phy_addr = 2; + else + tp->phy_addr = 1; + + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); } -static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) +static int tg3_mdio_init(struct tg3 *tp) { - static const u32 test_pat[4][6] = { - { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, - { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, - { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, - { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } - }; - int chan; - - for (chan = 0; chan < 4; chan++) { - int i; + int i; + u32 reg; + struct phy_device *phydev; - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); + tg3_mdio_start(tp); - for (i = 0; i < 6; i++) - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, - test_pat[chan][i]); + if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || + (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) + return 0; - tg3_writephy(tp, 0x16, 0x0202); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0082); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->reset = &tg3_mdio_reset; + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); - tg3_writephy(tp, 0x16, 0x0802); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } + i = mdiobus_register(tp->mdio_bus); + if (i) { + printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", + tp->dev->name, i); + mdiobus_free(tp->mdio_bus); + return i; + } - for (i = 0; i < 6; i += 2) { - u32 low, high; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || - tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } - low &= 0x7fff; - high &= 0x000f; - if (low != test_pat[chan][i] || - high != test_pat[chan][i+1]) { - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + if (!phydev || !phydev->drv) { + printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } - return -EBUSY; - } - } + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case TG3_PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case TG3_PHY_ID_BCM50610: + case TG3_PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case TG3_PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case TG3_PHY_ID_RTL8201E: + case TG3_PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + break; } + tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + return 0; } -static int tg3_phy_reset_chanpat(struct tg3 *tp) +static void tg3_mdio_fini(struct tg3 *tp) { - int chan; - - for (chan = 0; chan < 4; chan++) { - int i; - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); - for (i = 0; i < 6; i++) - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); - tg3_writephy(tp, 0x16, 0x0202); - if (tg3_wait_macro_done(tp)) - return -EBUSY; + if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { + tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); } - - return 0; } -static int tg3_phy_reset_5703_4_5(struct tg3 *tp) +/* tp->lock is held. */ +static inline void tg3_generate_fw_event(struct tg3 *tp) { - u32 reg32, phy9_orig; - int retries, do_phy_reset, err; + u32 val; - retries = 10; + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; +} + +#define TG3_FW_EVENT_TIMEOUT_USEC 2500 + +/* tp->lock is held. */ +static void tg3_wait_for_event_ack(struct tg3 *tp) +{ + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(8); + } +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 reg; + u32 val; + + if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + tg3_generate_fw_event(tp); +} + +static void tg3_link_report(struct tg3 *tp) +{ + if (!netif_carrier_ok(tp->dev)) { + if (netif_msg_link(tp)) + printk(KERN_INFO PFX "%s: Link is down.\n", + tp->dev->name); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", + tp->dev->name, + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + printk(KERN_INFO PFX + "%s: Flow control is %s for TX and %s for RX.\n", + tp->dev->name, + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + tg3_ump_link_report(tp); + } +} + +static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) +{ + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && + (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { + if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); +} + +static void tg3_adjust_link(struct net_device *dev) +{ + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = tg3_advert_flowctrl_1000T( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || + (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); +} + +static int tg3_phy_init(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, + phydev->dev_flags, phydev->interface); + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + return -EINVAL; + } + + tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; +} + +static void tg3_phy_start(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (tp->link_config.phy_is_low_power) { + tp->link_config.phy_is_low_power = 0; + phydev->speed = tp->link_config.orig_speed; + phydev->duplex = tp->link_config.orig_duplex; + phydev->autoneg = tp->link_config.orig_autoneg; + phydev->advertising = tp->link_config.orig_advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); +} + +static void tg3_phy_stop(struct tg3 *tp) +{ + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +} + +static void tg3_phy_fini(struct tg3 *tp) +{ + if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; + } +} + +static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); +} + +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } +} + +static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 reg; + + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + return; + + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_SCR5_SEL | + MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_APD_SEL | + MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); +} + +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) +{ + u32 phy; + + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + return; + + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | + MII_TG3_AUXCTL_SHDWSEL_MISC; + if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && + !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + phy |= MII_TG3_AUXCTL_MISC_WREN; + tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + } + } +} + +static void tg3_phy_set_wirespeed(struct tg3 *tp) +{ + u32 val; + + if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) + return; + + if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && + !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) + tg3_writephy(tp, MII_TG3_AUX_CTRL, + (val | (1 << 15) | (1 << 4))); +} + +static void tg3_phy_apply_otp(struct tg3 *tp) +{ + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + /* Enable SM_DSP clock and tx 6dB coding. */ + phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + /* Turn off SM_DSP clock. */ + phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); +} + +static int tg3_wait_macro_done(struct tg3 *tp) +{ + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, 0x16, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) +{ + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, 0x16, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, 0x16, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, 0x16, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, 0x16, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; +} + +static int tg3_phy_reset_chanpat(struct tg3 *tp) +{ + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, 0x16, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, 0x16, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; +} + +static int tg3_phy_reset_5703_4_5(struct tg3 *tp) +{ + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; do_phy_reset = 1; do { if (do_phy_reset) { @@ -1115,8 +1865,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) return err; } -static void tg3_link_report(struct tg3 *); - /* This will reset the tigon3 PHY if there is no valid * link unless the FORCE argument is non-zero. */ @@ -1174,7 +1922,8 @@ static int tg3_phy_reset(struct tg3 *tp) tw32(TG3_CPMU_CTRL, cpmuctrl); } - if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) { + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { u32 val; val = tr32(TG3_CPMU_LSPD_1000MB_CLK); @@ -1184,16 +1933,15 @@ static int tg3_phy_reset(struct tg3 *tp) udelay(40); tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); } - - /* Disable GPHY autopowerdown. */ - tg3_writephy(tp, MII_TG3_MISC_SHDW, - MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_APD_SEL | - MII_TG3_MISC_SHDW_APD_WKTM_84MS); } tg3_phy_apply_otp(tp); + if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + out: if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); @@ -1233,7 +1981,7 @@ out: if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); - } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { + } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { u32 phy_reg; /* Set bit 14 with read-modify-write to preserve other bits */ @@ -1245,7 +1993,7 @@ out: /* Set phy register 0x10 bit 0 to high fifo elasticity to support * jumbo frames transmission. */ - if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { u32 phy_reg; if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) @@ -1255,7 +2003,7 @@ out: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* adjust output voltage */ - tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } tg3_phy_toggle_automdix(tp, 1); @@ -1270,8 +2018,9 @@ static void tg3_frob_aux_power(struct tg3 *tp) if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) return; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { struct net_device *dev_peer; dev_peer = pci_get_drvdata(tp->pdev_peer); @@ -1295,6 +2044,22 @@ static void tg3_frob_aux_power(struct tg3 *tp) GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); } else { u32 no_gpio2; u32 grc_local_ctrl = 0; @@ -1380,10 +2145,8 @@ static int tg3_setup_phy(struct tg3 *, int); static void tg3_write_sig_post_reset(struct tg3 *, int); static int tg3_halt_cpu(struct tg3 *, u32); -static int tg3_nvram_lock(struct tg3 *); -static void tg3_nvram_unlock(struct tg3 *); -static void tg3_power_down_phy(struct tg3 *tp) +static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -1406,10 +2169,35 @@ static void tg3_power_down_phy(struct tg3 *tp) tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); udelay(40); return; - } else { + } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); + + tg3_writephy(tp, MII_TG3_AUX_CTRL, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V); } /* The PHY should not be powered down on some chips because @@ -1421,21 +2209,253 @@ static void tg3_power_down_phy(struct tg3 *tp) (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) return; - if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) { - val = tr32(TG3_CPMU_LSPD_1000MB_CLK); - val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; - val |= CPMU_LSPD_1000MB_MACCLK_12_5; - tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +/* tp->lock is held. */ +static int tg3_nvram_lock(struct tg3 *tp) +{ + if (tp->tg3_flags & TG3_FLAG_NVRAM) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; +} + +/* tp->lock is held. */ +static void tg3_nvram_unlock(struct tg3 *tp) +{ + if (tp->tg3_flags & TG3_FLAG_NVRAM) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } +} + +/* tp->lock is held. */ +static void tg3_enable_nvram_access(struct tg3 *tp) +{ + if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } +} + +/* tp->lock is held. */ +static void tg3_disable_nvram_access(struct tg3 *tp) +{ + if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } +} + +static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) +{ + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; +} + +#define NVRAM_CMD_TIMEOUT 10000 + +static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) +{ + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + udelay(10); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; +} + +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) +{ + if ((tp->tg3_flags & TG3_FLAG_NVRAM) && + (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && + (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; +} + +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) +{ + if ((tp->tg3_flags & TG3_FLAG_NVRAM) && + (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && + (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; +} + +/* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ +static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int ret; + + if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; +} + +/* Ensures NVRAM data is in bytestream format. */ +static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) +{ + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; +} + +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +{ + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } } - tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); } static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) { u32 misc_host_ctrl; - u16 power_control, power_caps; - int pm = tp->pm_cap; + bool device_should_wake, do_low_power; /* Make sure register accesses (indirect or otherwise) * will function correctly. @@ -1444,18 +2464,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); - pci_read_config_word(tp->pdev, - pm + PCI_PM_CTRL, - &power_control); - power_control |= PCI_PM_CTRL_PME_STATUS; - power_control &= ~(PCI_PM_CTRL_STATE_MASK); switch (state) { case PCI_D0: - power_control |= 0; - pci_write_config_word(tp->pdev, - pm + PCI_PM_CTRL, - power_control); - udelay(100); /* Delay after power state change */ + pci_enable_wake(tp->pdev, state, false); + pci_set_power_state(tp->pdev, PCI_D0); /* Switch out of Vaux if it is a NIC */ if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) @@ -1464,42 +2476,98 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) return 0; case PCI_D1: - power_control |= 1; - break; - case PCI_D2: - power_control |= 2; - break; - case PCI_D3hot: - power_control |= 3; break; default: - printk(KERN_WARNING PFX "%s: Invalid power state (%d) " - "requested.\n", - tp->dev->name, state); + printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n", + tp->dev->name, state); return -EINVAL; - }; + } + + /* Restore the CLKREQ setting. */ + if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { + u16 lnkctl; - power_control |= PCI_PM_CTRL_PME_ENABLE; + pci_read_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKCTL, + lnkctl); + } misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); tw32(TG3PCI_MISC_HOST_CTRL, misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); - if (tp->link_config.phy_is_low_power == 0) { - tp->link_config.phy_is_low_power = 1; - tp->link_config.orig_speed = tp->link_config.speed; - tp->link_config.orig_duplex = tp->link_config.duplex; - tp->link_config.orig_autoneg = tp->link_config.autoneg; - } + device_should_wake = pci_pme_capable(tp->pdev, state) && + device_may_wakeup(&tp->pdev->dev) && + (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + do_low_power = false; + if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && + !tp->link_config.phy_is_low_power) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->link_config.phy_is_low_power = 1; + + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || + device_should_wake) { + if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { - tp->link_config.speed = SPEED_10; - tp->link_config.duplex = DUPLEX_HALF; - tp->link_config.autoneg = AUTONEG_ENABLE; - tg3_setup_phy(tp, 0); + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != TG3_PHY_ID_BCMAC131) { + phyid &= TG3_PHY_OUI_MASK; + if (phyid == TG3_PHY_OUI_1 || + phyid == TG3_PHY_OUI_2 || + phyid == TG3_PHY_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (tp->link_config.phy_is_low_power == 0) { + tp->link_config.phy_is_low_power = 1; + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + } + + if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { + tp->link_config.speed = SPEED_10; + tp->link_config.duplex = DUPLEX_HALF; + tp->link_config.autoneg = AUTONEG_ENABLE; + tg3_setup_phy(tp, 0); + } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { @@ -1524,14 +2592,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) WOL_DRV_WOL | WOL_SET_MAGIC_PKT); - pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps); - - if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { + if (device_should_wake) { u32 mac_mode; if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); - udelay(40); + if (do_low_power) { + tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); + udelay(40); + } if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) mac_mode = MAC_MODE_PORT_MODE_GMII; @@ -1553,313 +2621,118 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) mac_mode = MAC_MODE_PORT_MODE_TBI; } - if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) - tw32(MAC_LED_CTRL, tp->led_ctrl); - - if (((power_caps & PCI_PM_CAP_PME_D3cold) && - (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) - mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - - tw32_f(MAC_MODE, mac_mode); - udelay(100); - - tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); - udelay(10); - } - - if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { - u32 base_val; - - base_val = tp->pci_clock_ctrl; - base_val |= (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE); - - tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | - CLOCK_CTRL_PWRDOWN_PLL133, 40); - } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { - /* do nothing */ - } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { - u32 newbits1, newbits2; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_ALTCLK); - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { - newbits1 = CLOCK_CTRL_625_CORE; - newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; - } else { - newbits1 = CLOCK_CTRL_ALTCLK; - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } - - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, - 40); - - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, - 40); - - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - u32 newbits3; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_44MHZ_CORE); - } else { - newbits3 = CLOCK_CTRL_44MHZ_CORE; - } - - tw32_wait_f(TG3PCI_CLOCK_CTRL, - tp->pci_clock_ctrl | newbits3, 40); - } - } - - if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) - tg3_power_down_phy(tp); - - tg3_frob_aux_power(tp); - - /* Workaround for unstable PLL clock */ - if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || - (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { - u32 val = tr32(0x7d00); - - val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); - tw32(0x7d00, val); - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { - int err; - - err = tg3_nvram_lock(tp); - tg3_halt_cpu(tp, RX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); - } - } - - tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); - - /* Finally, set the new power state. */ - pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); - udelay(100); /* Delay after power state change */ - - return 0; -} - -/* tp->lock is held. */ -static void tg3_wait_for_event_ack(struct tg3 *tp) -{ - int i; - - /* Wait for up to 2.5 milliseconds */ - for (i = 0; i < 250000; i++) { - if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) - break; - udelay(10); - } -} - -/* tp->lock is held. */ -static void tg3_ump_link_report(struct tg3 *tp) -{ - u32 reg; - u32 val; - - if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) - return; - - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); - - val = 0; - if (!tg3_readphy(tp, MII_BMCR, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_BMSR, ®)) - val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); - - val = 0; - if (!tg3_readphy(tp, MII_ADVERTISE, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_LPA, ®)) - val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); - - val = 0; - if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { - if (!tg3_readphy(tp, MII_CTRL1000, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_STAT1000, ®)) - val |= (reg & 0xffff); - } - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); - - if (!tg3_readphy(tp, MII_PHYADDR, ®)) - val = reg << 16; - else - val = 0; - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); - - val = tr32(GRC_RX_CPU_EVENT); - val |= GRC_RX_CPU_DRIVER_EVENT; - tw32_f(GRC_RX_CPU_EVENT, val); -} - -static void tg3_link_report(struct tg3 *tp) -{ - if (!netif_carrier_ok(tp->dev)) { - if (netif_msg_link(tp)) - printk(KERN_INFO PFX "%s: Link is down.\n", - tp->dev->name); - tg3_ump_link_report(tp); - } else if (netif_msg_link(tp)) { - printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", - tp->dev->name, - (tp->link_config.active_speed == SPEED_1000 ? - 1000 : - (tp->link_config.active_speed == SPEED_100 ? - 100 : 10)), - (tp->link_config.active_duplex == DUPLEX_FULL ? - "full" : "half")); - - printk(KERN_INFO PFX - "%s: Flow control is %s for TX and %s for RX.\n", - tp->dev->name, - (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ? - "on" : "off", - (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ? - "on" : "off"); - tg3_ump_link_report(tp); - } -} - -static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX)) - miireg = ADVERTISE_PAUSE_CAP; - else if (flow_ctrl & TG3_FLOW_CTRL_TX) - miireg = ADVERTISE_PAUSE_ASYM; - else if (flow_ctrl & TG3_FLOW_CTRL_RX) - miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - else - miireg = 0; - - return miireg; -} - -static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX)) - miireg = ADVERTISE_1000XPAUSE; - else if (flow_ctrl & TG3_FLOW_CTRL_TX) - miireg = ADVERTISE_1000XPSE_ASYM; - else if (flow_ctrl & TG3_FLOW_CTRL_RX) - miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; - else - miireg = 0; - - return miireg; -} + if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); -static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv) -{ - u8 cap = 0; + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || + (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; - if (lcladv & ADVERTISE_PAUSE_CAP) { - if (lcladv & ADVERTISE_PAUSE_ASYM) { - if (rmtadv & LPA_PAUSE_CAP) - cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; - else if (rmtadv & LPA_PAUSE_ASYM) - cap = TG3_FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_PAUSE_CAP) - cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + mac_mode |= tp->mac_mode & + (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); + if (mac_mode & MAC_MODE_APE_TX_EN) + mac_mode |= MAC_MODE_TDE_ENABLE; } - } else if (lcladv & ADVERTISE_PAUSE_ASYM) { - if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM)) - cap = TG3_FLOW_CTRL_TX; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); } - return cap; -} + if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; -static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) -{ - u8 cap = 0; + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = TG3_FLOW_CTRL_RX; + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { + /* do nothing */ + } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && + (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; } else { - if (rmtadv & LPA_1000XPAUSE) - cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) - cap = TG3_FLOW_CTRL_TX; - } - return cap; -} + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); -static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv) -{ - u8 new_tg3_flags = 0; - u32 old_rx_mode = tp->rx_mode; - u32 old_tx_mode = tp->tx_mode; + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); - if (tp->link_config.autoneg == AUTONEG_ENABLE && - (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) - new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv, - remote_adv); - else - new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv, - remote_adv); - } else { - new_tg3_flags = tp->link_config.flowctrl; + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + u32 newbits3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } } - tp->link_config.active_flowctrl = new_tg3_flags; + if (!(device_should_wake) && + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); - if (new_tg3_flags & TG3_FLOW_CTRL_RX) - tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; - else - tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + tg3_frob_aux_power(tp); - if (old_rx_mode != tp->rx_mode) { - tw32_f(MAC_RX_MODE, tp->rx_mode); - } + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); - if (new_tg3_flags & TG3_FLOW_CTRL_TX) - tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; - else - tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + int err; - if (old_tx_mode != tp->tx_mode) { - tw32_f(MAC_TX_MODE, tp->tx_mode); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + if (device_should_wake) + pci_enable_wake(tp->pdev, state, true); + + /* Finally, set the new power state. */ + pci_set_power_state(tp->pdev, state); + + return 0; } static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) @@ -1896,7 +2769,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 break; default: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : SPEED_10; *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : @@ -1906,7 +2779,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *speed = SPEED_INVALID; *duplex = DUPLEX_INVALID; break; - }; + } } static void tg3_phy_copper_begin(struct tg3 *tp) @@ -2018,7 +2891,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp) case SPEED_1000: bmcr |= TG3_BMCR_SPEED1000; break; - }; + } if (tp->link_config.duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; @@ -2231,7 +3104,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) + else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -2341,7 +3214,9 @@ relink: tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else + } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; @@ -2392,6 +3267,24 @@ relink: NIC_SRAM_FIRMWARE_MBOX_MAGIC2); } + /* Prevent send BD corruption. */ + if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { + u16 oldlnkctl, newlnkctl; + + pci_read_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKCTL, + newlnkctl); + } + if (current_link_up != netif_carrier_ok(tp->dev)) { if (current_link_up) netif_carrier_on(tp->dev); @@ -2716,7 +3609,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, default: ret = ANEG_FAILED; break; - }; + } return ret; } @@ -3071,9 +3964,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); - tp->hw_status->status = + tp->napi[0].hw_status->status = (SD_STATUS_UPDATED | - (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); for (i = 0; i < 100; i++) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | @@ -3168,8 +4061,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMCR, &bmcr); if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && - tp->link_config.flowctrl == tp->link_config.active_flowctrl) { + (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 adv, new_adv; @@ -3369,8 +4261,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) err = tg3_setup_copper_phy(tp, force_reset); } - if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) { + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { u32 val, scale; val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; @@ -3441,24 +4332,32 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } -static inline u32 tg3_tx_avail(struct tg3 *tp) +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { smp_mb(); - return (tp->tx_pending - - ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. */ -static void tg3_tx(struct tg3 *tp) +static void tg3_tx(struct tg3_napi *tnapi) { - u32 hw_idx = tp->hw_status->idx[0].tx_consumer; - u32 sw_idx = tp->tx_cons; + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); while (sw_idx != hw_idx) { - struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; + struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -3477,7 +4376,7 @@ static void tg3_tx(struct tg3 *tp) sw_idx = NEXT_TX(sw_idx); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - ri = &tp->tx_buffers[sw_idx]; + ri = &tnapi->tx_buffers[sw_idx]; if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1; @@ -3485,7 +4384,6 @@ static void tg3_tx(struct tg3 *tp) pci_unmap_addr(ri, mapping), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); - sw_idx = NEXT_TX(sw_idx); } @@ -3497,7 +4395,7 @@ static void tg3_tx(struct tg3 *tp) } } - tp->tx_cons = sw_idx; + tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() * before checking for netif_queue_stopped(). Without the @@ -3506,16 +4404,27 @@ static void tg3_tx(struct tg3 *tp) */ smp_mb(); - if (unlikely(netif_queue_stopped(tp->dev) && - (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { - netif_tx_lock(tp->dev); - if (netif_queue_stopped(tp->dev) && - (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) - netif_wake_queue(tp->dev); - netif_tx_unlock(tp->dev); + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); } } +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +{ + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; +} + /* Returns size of skb allocated or < 0 on error. * * We only need to fill in the address because the other members @@ -3527,8 +4436,8 @@ static void tg3_tx(struct tg3 *tp) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, - int src_idx, u32 dest_idx_unmasked) +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map, *src_map; @@ -3540,25 +4449,21 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - desc = &tp->rx_std[dest_idx]; - map = &tp->rx_std_buffers[dest_idx]; - if (src_idx >= 0) - src_map = &tp->rx_std_buffers[src_idx]; - skb_size = tp->rx_pkt_buf_sz; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + skb_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - desc = &tp->rx_jumbo[dest_idx]; - map = &tp->rx_jumbo_buffers[dest_idx]; - if (src_idx >= 0) - src_map = &tp->rx_jumbo_buffers[src_idx]; - skb_size = RX_JUMBO_PKT_BUF_SZ; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; break; default: return -EINVAL; - }; + } /* Do not overwrite any of the map or rp information * until we are sure we can commit to a new buffer. @@ -3566,22 +4471,22 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size); + skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); if (skb == NULL) return -ENOMEM; skb_reserve(skb, tp->rx_offset); - mapping = pci_map_single(tp->pdev, skb->data, - skb_size - tp->rx_offset, + mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } map->skb = skb; pci_unmap_addr_set(map, mapping, mapping); - if (src_map != NULL) - src_map->skb = NULL; - desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); @@ -3592,50 +4497,46 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * members of the RX descriptor are invariant. See notes above * tg3_alloc_rx_skb for full details. */ -static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, - int src_idx, u32 dest_idx_unmasked) +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) { + struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; int dest_idx; + struct tg3_rx_prodring_set *spr = &tp->prodring[0]; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - dest_desc = &tp->rx_std[dest_idx]; - dest_map = &tp->rx_std_buffers[dest_idx]; - src_desc = &tp->rx_std[src_idx]; - src_map = &tp->rx_std_buffers[src_idx]; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - dest_desc = &tp->rx_jumbo[dest_idx]; - dest_map = &tp->rx_jumbo_buffers[dest_idx]; - src_desc = &tp->rx_jumbo[src_idx]; - src_map = &tp->rx_jumbo_buffers[src_idx]; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; break; default: return; - }; + } dest_map->skb = src_map->skb; pci_unmap_addr_set(dest_map, mapping, pci_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; dest_desc->addr_lo = src_desc->addr_lo; - src_map->skb = NULL; } -#if TG3_VLAN_TAG_USED -static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) -{ - return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag); -} -#endif - /* The RX ring scheme is composed of multiple rings which post fresh * buffers to the chip, and one special ring the chip uses to report * status back to the host. @@ -3660,14 +4561,17 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) * If both the host and chip were to write into the same ring, cache line * eviction could occur since both entities want it in an exclusive state. */ -static int tg3_rx(struct tg3 *tp, int budget) +static int tg3_rx(struct tg3_napi *tnapi, int budget) { + struct tg3 *tp = tnapi->tp; u32 work_mask, rx_std_posted = 0; - u32 sw_idx = tp->rx_rcb_ptr; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; + struct tg3_rx_prodring_set *tpr = tnapi->prodring; - hw_idx = tp->hw_status->idx[0].rx_producer; + hw_idx = *(tnapi->rx_rcb_prod_idx); /* * We need to order the read of hw_idx and the read of * the opaque cookie. @@ -3675,8 +4579,11 @@ static int tg3_rx(struct tg3 *tp, int budget) rmb(); work_mask = 0; received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; while (sw_idx != hw_idx && budget > 0) { - struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; unsigned int len; struct sk_buff *skb; dma_addr_t dma_addr; @@ -3685,27 +4592,25 @@ static int tg3_rx(struct tg3 *tp, int budget) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], - mapping); - skb = tp->rx_std_buffers[desc_idx].skb; - post_ptr = &tp->rx_std_ptr; + ri = &tp->prodring[0].rx_std_buffers[desc_idx]; + dma_addr = pci_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], - mapping); - skb = tp->rx_jumbo_buffers[desc_idx].skb; - post_ptr = &tp->rx_jumbo_ptr; - } - else { + ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; + dma_addr = pci_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &jmb_prod_idx; + } else goto next_pkt_nopost; - } work_mask |= opaque_key; if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { drop_it: - tg3_recycle_rx(tp, opaque_key, + tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ @@ -3713,36 +4618,40 @@ static int tg3_rx(struct tg3 *tp, int budget) goto next_pkt; } - len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; - if (len > RX_COPY_THRESHOLD - && tp->rx_offset == 2 - /* rx_offset != 2 iff this is a 5701 card running - * in PCI-X mode [see tg3_get_invariants()] */ - ) { + if (len > RX_COPY_THRESHOLD && + tp->rx_offset == NET_IP_ALIGN) { + /* rx_offset will likely not equal NET_IP_ALIGN + * if this is a 5701 card running in PCI-X mode + * [see tg3_get_invariants()] + */ int skb_size; - skb_size = tg3_alloc_rx_skb(tp, opaque_key, - desc_idx, *post_ptr); + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); if (skb_size < 0) goto drop_it; - pci_unmap_single(tp->pdev, dma_addr, - skb_size - tp->rx_offset, + ri->skb = NULL; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); skb_put(skb, len); } else { struct sk_buff *copy_skb; - tg3_recycle_rx(tp, opaque_key, + tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + 2); + copy_skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, 2); + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); @@ -3761,16 +4670,22 @@ static int tg3_rx(struct tg3 *tp, int budget) skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto next_pkt; + } + #if TG3_VLAN_TAG_USED if (tp->vlgrp != NULL && desc->type_flags & RXD_FLAG_VLAN) { - tg3_vlan_rx(tp, skb, - desc->err_vlan & RXD_VLAN_MASK); + vlan_gro_receive(&tnapi->napi, tp->vlgrp, + desc->err_vlan & RXD_VLAN_MASK, skb); } else #endif - netif_receive_skb(skb); + napi_gro_receive(&tnapi->napi, skb); - tp->dev->last_rx = jiffies; received++; budget--; @@ -3779,9 +4694,7 @@ next_pkt: if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { u32 idx = *post_ptr % TG3_RX_RING_SIZE; - - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + - TG3_64BIT_REG_LOW, idx); + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); work_mask &= ~RXD_OPAQUE_RING_STD; rx_std_posted = 0; } @@ -3791,51 +4704,164 @@ next_pkt_nopost: /* Refresh hw_idx to see if there is new work */ if (sw_idx == hw_idx) { - hw_idx = tp->hw_status->idx[0].rx_producer; + hw_idx = *(tnapi->rx_rcb_prod_idx); rmb(); } } /* ACK the status ring. */ - tp->rx_rcb_ptr = sw_idx; - tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ - if (work_mask & RXD_OPAQUE_RING_STD) { - sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, - sw_idx); - } - if (work_mask & RXD_OPAQUE_RING_JUMBO) { - sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; - tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, - sw_idx); + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx % + TG3_RX_JUMBO_RING_SIZE; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; + + napi_schedule(&tp->napi[1].napi); } - mmiowb(); return received; } -static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) +static void tg3_poll_link(struct tg3 *tp) { - struct tg3_hw_status *sblk = tp->hw_status; - /* handle link change and other phy events */ if (!(tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); + (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); - tg3_setup_phy(tp, 0); + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); spin_unlock(&tp->lock); } } +} + +static void tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) +{ + u32 si, di, cpycnt, src_prod_idx; + int i; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % + TG3_RX_RING_SIZE; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % + TG3_RX_RING_SIZE; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % + TG3_RX_JUMBO_RING_SIZE; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % + TG3_RX_JUMBO_RING_SIZE; + } +} + +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +{ + struct tg3 *tp = tnapi->tp; /* run TX completion thread */ - if (sblk->idx[0].tx_consumer != tp->tx_cons) { - tg3_tx(tp); + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) return work_done; } @@ -3844,20 +4870,91 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) * All RX "locking" is done by ensuring outside * code synchronizes with tg3->napi.poll() */ - if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) - work_done += tg3_rx(tp, budget - work_done); + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { + int i; + u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; + u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; + + for (i = 2; i < tp->irq_cnt; i++) + tg3_rx_prodring_xfer(tp, tnapi->prodring, + tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { + u32 mbox = TG3_RX_STD_PROD_IDX_REG; + tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); + } + + if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { + u32 mbox = TG3_RX_JMB_PROD_IDX_REG; + tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); + } + + mmiowb(); + } + + return work_done; +} + +static int tg3_poll_msix(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_restart_ints() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; } static int tg3_poll(struct napi_struct *napi, int budget) { - struct tg3 *tp = container_of(napi, struct tg3, napi); + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; int work_done = 0; - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { - work_done = tg3_poll_work(tp, work_done, budget); + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) goto tx_recovery; @@ -3866,18 +4963,19 @@ static int tg3_poll(struct napi_struct *napi, int budget) break; if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { - /* tp->last_tag is used in tg3_restart_ints() below + /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. */ - tp->last_tag = sblk->status_tag; + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; rmb(); } else sblk->status &= ~SD_STATUS_UPDATED; - if (likely(!tg3_has_work(tp))) { - netif_rx_complete(tp->dev, napi); - tg3_restart_ints(tp); + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); break; } } @@ -3886,19 +4984,22 @@ static int tg3_poll(struct napi_struct *napi, int budget) tx_recovery: /* work_done is guaranteed to be less than budget. */ - netif_rx_complete(tp->dev, napi); + napi_complete(napi); schedule_work(&tp->reset_task); return work_done; } static void tg3_irq_quiesce(struct tg3 *tp) { + int i; + BUG_ON(tp->irq_sync); tp->irq_sync = 1; smp_mb(); - synchronize_irq(tp->pdev->irq); + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); } static inline int tg3_irq_sync(struct tg3 *tp) @@ -3928,14 +5029,15 @@ static inline void tg3_full_unlock(struct tg3 *tp) */ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - prefetch(tp->hw_status); - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev, &tp->napi); + napi_schedule(&tnapi->napi); return IRQ_HANDLED; } @@ -3946,11 +5048,12 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) */ static irqreturn_t tg3_msi(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - prefetch(tp->hw_status); - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); /* * Writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. @@ -3960,16 +5063,16 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev, &tp->napi); + napi_schedule(&tnapi->napi); return IRQ_RETVAL(1); } static irqreturn_t tg3_interrupt(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at @@ -4000,9 +5103,9 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) if (tg3_irq_sync(tp)) goto out; sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tp))) { - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - netif_rx_schedule(dev, &tp->napi); + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write @@ -4016,9 +5119,9 @@ out: static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at @@ -4026,7 +5129,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * Reading the PCI State register will confirm whether the * interrupt is ours and will flush the status block. */ - if (unlikely(sblk->status_tag == tp->last_tag)) { + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; @@ -4046,18 +5149,22 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * excessive spurious interrupts can be worse in some cases. */ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + if (tg3_irq_sync(tp)) goto out; - if (netif_rx_schedule_prep(dev, &tp->napi)) { - prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - /* Update last_tag to mark that this status has been - * seen. Because interrupt may be shared, we may be - * racing with tg3_poll(), so only update last_tag - * if tg3_poll() is not scheduled. - */ - tp->last_tag = sblk->status_tag; - __netif_rx_schedule(dev, &tp->napi); - } + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + out: return IRQ_RETVAL(handled); } @@ -4065,9 +5172,9 @@ out: /* ISR for interrupt test */ static irqreturn_t tg3_test_isr(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct tg3 *tp = netdev_priv(dev); - struct tg3_hw_status *sblk = tp->hw_status; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; if ((sblk->status & SD_STATUS_UPDATED) || !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { @@ -4097,7 +5204,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) tg3_full_unlock(tp); del_timer_sync(&tp->timer); tp->irq_sync = 0; - napi_enable(&tp->napi); + tg3_napi_enable(tp); dev_close(tp->dev); tg3_full_lock(tp, 0); } @@ -4107,15 +5214,18 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) { + int i; struct tg3 *tp = netdev_priv(dev); - tg3_interrupt(tp->pdev->irq, dev); + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, dev); } #endif static void tg3_reset_task(struct work_struct *work) { struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; unsigned int restart_timer; tg3_full_lock(tp, 0); @@ -4127,6 +5237,8 @@ static void tg3_reset_task(struct work_struct *work) tg3_full_unlock(tp); + tg3_phy_stop(tp); + tg3_netif_stop(tp); tg3_full_lock(tp, 1); @@ -4142,7 +5254,8 @@ static void tg3_reset_task(struct work_struct *work) } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - if (tg3_init_hw(tp, 1)) + err = tg3_init_hw(tp, 1); + if (err) goto out; tg3_netif_start(tp); @@ -4152,6 +5265,9 @@ static void tg3_reset_task(struct work_struct *work) out: tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); } static void tg3_dump_short_state(struct tg3 *tp) @@ -4190,20 +5306,21 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) - return (((u64) mapping + len) > DMA_40BIT_MASK); + return (((u64) mapping + len) > DMA_BIT_MASK(40)); return 0; #else return 0; #endif } -static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); +static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); /* Workaround 4GB and 40-bit hardware DMA bugs. */ -static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, - u32 last_plus_one, u32 *start, - u32 base_flags, u32 mss) +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + struct sk_buff *skb, u32 last_plus_one, + u32 *start, u32 base_flags, u32 mss) { + struct tg3 *tp = tnapi->tp; struct sk_buff *new_skb; dma_addr_t new_addr = 0; u32 entry = *start; @@ -4226,15 +5343,24 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, entry = *start; new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + ret = -1; + dev_kfree_skb(new_skb); + new_skb = NULL; + /* Make sure new skb does not cross any 4G boundaries. * Drop the packet if it does. */ - if (tg3_4g_overflow_test(new_addr, new_skb->len)) { + } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(new_addr, new_skb->len)) { + pci_unmap_single(tp->pdev, new_addr, new_skb->len, + PCI_DMA_TODEVICE); ret = -1; dev_kfree_skb(new_skb); new_skb = NULL; } else { - tg3_set_txd(tp, entry, new_addr, new_skb->len, + tg3_set_txd(tnapi, entry, new_addr, new_skb->len, base_flags, 1 | (mss << 1)); *start = NEXT_TX(entry); } @@ -4249,14 +5375,17 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, len = skb_headlen(skb); else len = skb_shinfo(skb)->frags[i-1].size; + pci_unmap_single(tp->pdev, - pci_unmap_addr(&tp->tx_buffers[entry], mapping), + pci_unmap_addr(&tnapi->tx_buffers[entry], + mapping), len, PCI_DMA_TODEVICE); if (i == 0) { - tp->tx_buffers[entry].skb = new_skb; - pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr); + tnapi->tx_buffers[entry].skb = new_skb; + pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + new_addr); } else { - tp->tx_buffers[entry].skb = NULL; + tnapi->tx_buffers[entry].skb = NULL; } entry = NEXT_TX(entry); i++; @@ -4267,11 +5396,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, return ret; } -static void tg3_set_txd(struct tg3 *tp, int entry, +static void tg3_set_txd(struct tg3_napi *tnapi, int entry, dma_addr_t mapping, int len, u32 flags, u32 mss_and_is_end) { - struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; + struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; int is_end = (mss_and_is_end & 0x1); u32 mss = (mss_and_is_end >> 1); u32 vlan_tag = 0; @@ -4291,24 +5420,32 @@ static void tg3_set_txd(struct tg3 *tp, int entry, } /* hard_start_xmit for devices that don't have any bugs and - * support TG3_FLG2_HW_TSO_2 only. + * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. */ -static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, + struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - dma_addr_t mapping; u32 len, entry, base_flags, mss; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int i, last; - len = skb_headlen(skb); + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + tnapi++; /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); /* This is a hard error, log it. */ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " @@ -4317,11 +5454,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - entry = tp->tx_prod; + entry = tnapi->tx_prod; base_flags = 0; mss = 0; if ((mss = skb_shinfo(skb)->gso_size) != 0) { int tcp_opt_len, ip_tcp_len; + u32 hdrlen; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -4330,7 +5468,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - mss |= (skb_headlen(skb) - ETH_HLEN) << 9; + hdrlen = skb_headlen(skb) - ETH_HLEN; else { struct iphdr *iph = ip_hdr(skb); @@ -4339,9 +5477,17 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) iph->check = 0; iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - mss |= (ip_tcp_len + tcp_opt_len) << 9; + hdrlen = ip_tcp_len + tcp_opt_len; } + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + mss |= (hdrlen & 0xc) << 12; + if (hdrlen & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdrlen & 0x3e0) << 5; + } else + mss |= hdrlen << 9; + base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); @@ -4356,21 +5502,29 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) (vlan_tx_tag_get(skb) << 16)); #endif + len = skb_headlen(skb); + /* Queue skb data, a.k.a. the main skb fragment. */ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } - tp->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + tnapi->tx_buffers[entry].skb = skb; + pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); - tg3_set_txd(tp, entry, mapping, len, base_flags, + if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && + !mss && skb->len > ETH_DATA_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); entry = NEXT_TX(entry); /* Now loop through additional data fragments, and queue them. */ if (skb_shinfo(skb)->nr_frags > 0) { - unsigned int i, last; - last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -4380,11 +5534,14 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) frag->page, frag->page_offset, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) + goto dma_error; - tp->tx_buffers[entry].skb = NULL; - pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + tnapi->tx_buffers[entry].skb = NULL; + pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last) | (mss << 1)); entry = NEXT_TX(entry); @@ -4392,24 +5549,44 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); + tw32_tx_mbox(tnapi->prodmbox, entry); - tp->tx_prod = entry; - if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { - netif_stop_queue(dev); - if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) - netif_wake_queue(tp->dev); + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); } out_unlock: - mmiowb(); + mmiowb(); + + return NETDEV_TX_OK; + +dma_error: + last = i; + entry = tnapi->tx_prod; + tnapi->tx_buffers[entry].skb = NULL; + pci_unmap_single(tp->pdev, + pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + entry = NEXT_TX(entry); - dev->trans_start = jiffies; + pci_unmap_page(tp->pdev, + pci_unmap_addr(&tnapi->tx_buffers[entry], + mapping), + frag->size, PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); return NETDEV_TX_OK; } -static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); +static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, + struct net_device *); /* Use GSO to workaround a rare TSO bug that may be triggered when the * TSO header is greater than 80 bytes. @@ -4417,11 +5594,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) { struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { netif_stop_queue(tp->dev); - if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) return NETDEV_TX_BUSY; netif_wake_queue(tp->dev); @@ -4447,23 +5625,31 @@ tg3_tso_bug_end: /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and * support TG3_FLG2_HW_TSO_1 or firmware TSO only. */ -static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, + struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - dma_addr_t mapping; u32 len, entry, base_flags, mss; int would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int i, last; - len = skb_headlen(skb); + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + tnapi++; /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); /* This is a hard error, log it. */ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " @@ -4472,14 +5658,14 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - entry = tp->tx_prod; + entry = tnapi->tx_prod; base_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; - mss = 0; + if ((mss = skb_shinfo(skb)->gso_size) != 0) { struct iphdr *iph; - int tcp_opt_len, ip_tcp_len, hdr_len; + u32 tcp_opt_len, ip_tcp_len, hdr_len; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -4510,8 +5696,15 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) IPPROTO_TCP, 0); - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) + mss |= hdr_len << 9; + else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; @@ -4533,28 +5726,44 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) (vlan_tx_tag_get(skb) << 16)); #endif - /* Queue skb data, a.k.a. the main skb fragment. */ + if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && + !mss && skb->len > ETH_DATA_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + len = skb_headlen(skb); + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } - tp->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + tnapi->tx_buffers[entry].skb = skb; + pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); would_hit_hwbug = 0; - if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) + would_hit_hwbug = 1; + + if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; - else if (tg3_4g_overflow_test(mapping, len)) + + if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + tg3_40bit_overflow_test(tp, mapping, len)) + would_hit_hwbug = 1; + + if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) would_hit_hwbug = 1; - tg3_set_txd(tp, entry, mapping, len, base_flags, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); entry = NEXT_TX(entry); /* Now loop through additional data fragments, and queue them. */ if (skb_shinfo(skb)->nr_frags > 0) { - unsigned int i, last; - last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -4565,20 +5774,29 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) frag->page_offset, len, PCI_DMA_TODEVICE); - tp->tx_buffers[entry].skb = NULL; - pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + tnapi->tx_buffers[entry].skb = NULL; + pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (pci_dma_mapping_error(tp->pdev, mapping)) + goto dma_error; + + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && + len <= 8) + would_hit_hwbug = 1; - if (tg3_4g_overflow_test(mapping, len)) + if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; - if (tg3_40bit_overflow_test(tp, mapping, len)) + if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last)|(mss << 1)); else - tg3_set_txd(tp, entry, mapping, len, + tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last)); entry = NEXT_TX(entry); @@ -4595,7 +5813,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ - if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, + if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, &start, base_flags, mss)) goto out_unlock; @@ -4603,20 +5821,39 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); + tw32_tx_mbox(tnapi->prodmbox, entry); - tp->tx_prod = entry; - if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { - netif_stop_queue(dev); - if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) - netif_wake_queue(tp->dev); + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); } out_unlock: - mmiowb(); + mmiowb(); - dev->trans_start = jiffies; + return NETDEV_TX_OK; + +dma_error: + last = i; + entry = tnapi->tx_prod; + tnapi->tx_buffers[entry].skb = NULL; + pci_unmap_single(tp->pdev, + pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + entry = NEXT_TX(entry); + + pci_unmap_page(tp->pdev, + pci_unmap_addr(&tnapi->tx_buffers[entry], + mapping), + frag->size, PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -4655,6 +5892,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) return 0; } + tg3_phy_stop(tp); + tg3_netif_stop(tp); tg3_full_lock(tp, 1); @@ -4670,78 +5909,43 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_full_unlock(tp); + if (!err) + tg3_phy_start(tp); + return err; } -/* Free up pending packets in all rx/tx rings. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock is not held and we are not - * in an interrupt context and thus may sleep. - */ -static void tg3_free_rings(struct tg3 *tp) +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - struct ring_info *rxp; int i; - for (i = 0; i < TG3_RX_RING_SIZE; i++) { - rxp = &tp->rx_std_buffers[i]; - - if (rxp->skb == NULL) - continue; - pci_unmap_single(tp->pdev, - pci_unmap_addr(rxp, mapping), - tp->rx_pkt_buf_sz - tp->rx_offset, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(rxp->skb); - rxp->skb = NULL; - } - - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { - rxp = &tp->rx_jumbo_buffers[i]; - - if (rxp->skb == NULL) - continue; - pci_unmap_single(tp->pdev, - pci_unmap_addr(rxp, mapping), - RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(rxp->skb); - rxp->skb = NULL; - } - - for (i = 0; i < TG3_TX_RING_SIZE; ) { - struct tx_ring_info *txp; - struct sk_buff *skb; - int j; - - txp = &tp->tx_buffers[i]; - skb = txp->skb; - - if (skb == NULL) { - i++; - continue; + if (tpr != &tp->prodring[0]) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) % TG3_RX_RING_SIZE) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } } - pci_unmap_single(tp->pdev, - pci_unmap_addr(txp, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - txp->skb = NULL; - - i++; + return; + } - for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { - txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; - pci_unmap_page(tp->pdev, - pci_unmap_addr(txp, mapping), - skb_shinfo(skb)->frags[j].size, - PCI_DMA_TODEVICE); - i++; - } + for (i = 0; i < TG3_RX_RING_SIZE; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); - dev_kfree_skb_any(skb); + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); } } @@ -4752,23 +5956,32 @@ static void tg3_free_rings(struct tg3 *tp) * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ -static int tg3_init_rings(struct tg3 *tp) +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - u32 i; + u32 i, rx_pkt_dma_sz; - /* Free up all the SKBs. */ - tg3_free_rings(tp); + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->prodring[0]) { + memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE); + goto done; + } /* Zero out all descriptors. */ - memset(tp->rx_std, 0, TG3_RX_RING_BYTES); - memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); - memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); + memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); - tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && - (tp->dev->mtu > ETH_DATA_LEN)) - tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); /* Initialize invariants of the rings, we only set this * stuff once. This works because the card does not @@ -4777,102 +5990,258 @@ static int tg3_init_rings(struct tg3 *tp) for (i = 0; i < TG3_RX_RING_SIZE; i++) { struct tg3_rx_buffer_desc *rxd; - rxd = &tp->rx_std[i]; - rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) - << RXD_LEN_SHIFT; + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT)); } - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { - struct tg3_rx_buffer_desc *rxd; - - rxd = &tp->rx_jumbo[i]; - rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) - << RXD_LEN_SHIFT; - rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | - RXD_FLAG_JUMBO; - rxd->opaque = (RXD_OPAQUE_RING_JUMBO | - (i << RXD_OPAQUE_INDEX_SHIFT)); - } - } - /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX standard ring, " "only %d out of %d buffers were allocated " "successfully.\n", tp->dev->name, i, tp->rx_pending); if (i == 0) - return -ENOMEM; + goto initfail; tp->rx_pending = i; break; } } + if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); + if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, - -1, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, + i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX jumbo ring, " "only %d out of %d buffers were " "allocated successfully.\n", tp->dev->name, i, tp->rx_jumbo_pending); - if (i == 0) { - tg3_free_rings(tp); - return -ENOMEM; - } + if (i == 0) + goto initfail; tp->rx_jumbo_pending = i; break; } } } + +done: return 0; + +initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; } -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. - */ -static void tg3_free_consistent(struct tg3 *tp) +static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) { - kfree(tp->rx_std_buffers); - tp->rx_std_buffers = NULL; - if (tp->rx_std) { + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, - tp->rx_std, tp->rx_std_mapping); - tp->rx_std = NULL; + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; } - if (tp->rx_jumbo) { + if (tpr->rx_jmb) { pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, - tp->rx_jumbo, tp->rx_jumbo_mapping); - tp->rx_jumbo = NULL; + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } +} + +static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, + &tpr->rx_std_mapping); + if (!tpr->rx_std) + goto err_out; + + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = pci_alloc_consistent(tp->pdev, + TG3_RX_JUMBO_RING_BYTES, + &tpr->rx_jmb_mapping); + if (!tpr->rx_jmb) + goto err_out; } - if (tp->rx_rcb) { - pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - tp->rx_rcb, tp->rx_rcb_mapping); - tp->rx_rcb = NULL; + + return 0; + +err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void tg3_free_rings(struct tg3 *tp) +{ + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; ) { + struct ring_info *txp; + struct sk_buff *skb; + unsigned int k; + + txp = &tnapi->tx_buffers[i]; + skb = txp->skb; + + if (skb == NULL) { + i++; + continue; + } + + pci_unmap_single(tp->pdev, + pci_unmap_addr(txp, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + txp->skb = NULL; + + i++; + + for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { + txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; + pci_unmap_page(tp->pdev, + pci_unmap_addr(txp, mapping), + skb_shinfo(skb)->frags[k].size, + PCI_DMA_TODEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) + tg3_rx_prodring_free(tp, &tp->prodring[j]); } - if (tp->tx_ring) { - pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, - tp->tx_ring, tp->tx_desc_mapping); - tp->tx_ring = NULL; +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_init_rings(struct tg3 *tp) +{ + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && + tg3_rx_prodring_alloc(tp, &tp->prodring[i])) + return -ENOMEM; } - if (tp->hw_status) { - pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, - tp->hw_status, tp->status_mapping); - tp->hw_status = NULL; + + return 0; +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void tg3_free_consistent(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + if (tnapi->hw_status) { + pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } } + if (tp->hw_stats) { pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } + + for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) + tg3_rx_prodring_fini(tp, &tp->prodring[i]); } /* @@ -4881,54 +6250,93 @@ static void tg3_free_consistent(struct tg3 *tp) */ static int tg3_alloc_consistent(struct tg3 *tp) { - tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * - (TG3_RX_RING_SIZE + - TG3_RX_JUMBO_RING_SIZE)) + - (sizeof(struct tx_ring_info) * - TG3_TX_RING_SIZE), - GFP_KERNEL); - if (!tp->rx_std_buffers) - return -ENOMEM; + int i; - tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; - tp->tx_buffers = (struct tx_ring_info *) - &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; + for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { + if (tg3_rx_prodring_init(tp, &tp->prodring[i])) + goto err_out; + } - tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, - &tp->rx_std_mapping); - if (!tp->rx_std) + tp->hw_stats = pci_alloc_consistent(tp->pdev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping); + if (!tp->hw_stats) goto err_out; - tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, - &tp->rx_jumbo_mapping); + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - if (!tp->rx_jumbo) - goto err_out; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; - tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - &tp->rx_rcb_mapping); - if (!tp->rx_rcb) - goto err_out; + tnapi->hw_status = pci_alloc_consistent(tp->pdev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping); + if (!tnapi->hw_status) + goto err_out; - tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, - &tp->tx_desc_mapping); - if (!tp->tx_ring) - goto err_out; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + sblk = tnapi->hw_status; - tp->hw_status = pci_alloc_consistent(tp->pdev, - TG3_HW_STATUS_SIZE, - &tp->status_mapping); - if (!tp->hw_status) - goto err_out; + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || + (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { + tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * + TG3_TX_RING_SIZE, + GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; - tp->hw_stats = pci_alloc_consistent(tp->pdev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping); - if (!tp->hw_stats) - goto err_out; + tnapi->tx_ring = pci_alloc_consistent(tp->pdev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping); + if (!tnapi->tx_ring) + goto err_out; + } + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + break; + case 2: + tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; + break; + case 3: + tnapi->rx_rcb_prod_idx = &sblk->reserved; + break; + case 4: + tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; + break; + } + + if (tp->irq_cnt == 1) + tnapi->prodring = &tp->prodring[0]; + else if (i) + tnapi->prodring = &tp->prodring[i - 1]; + + /* + * If multivector RSS is enabled, vector 0 does not handle + * rx or tx interrupts. Don't allocate any resources for it. + */ + if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) + continue; - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } return 0; @@ -4961,7 +6369,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int default: break; - }; + } } val = tr32(ofs); @@ -5040,70 +6448,17 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); - if (tp->hw_status) - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } if (tp->hw_stats) memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); return err; } -/* tp->lock is held. */ -static int tg3_nvram_lock(struct tg3 *tp) -{ - if (tp->tg3_flags & TG3_FLAG_NVRAM) { - int i; - - if (tp->nvram_lock_cnt == 0) { - tw32(NVRAM_SWARB, SWARB_REQ_SET1); - for (i = 0; i < 8000; i++) { - if (tr32(NVRAM_SWARB) & SWARB_GNT1) - break; - udelay(20); - } - if (i == 8000) { - tw32(NVRAM_SWARB, SWARB_REQ_CLR1); - return -ENODEV; - } - } - tp->nvram_lock_cnt++; - } - return 0; -} - -/* tp->lock is held. */ -static void tg3_nvram_unlock(struct tg3 *tp) -{ - if (tp->tg3_flags & TG3_FLAG_NVRAM) { - if (tp->nvram_lock_cnt > 0) - tp->nvram_lock_cnt--; - if (tp->nvram_lock_cnt == 0) - tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); - } -} - -/* tp->lock is held. */ -static void tg3_enable_nvram_access(struct tg3 *tp) -{ - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { - u32 nvaccess = tr32(NVRAM_ACCESS); - - tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); - } -} - -/* tp->lock is held. */ -static void tg3_disable_nvram_access(struct tg3 *tp) -{ - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { - u32 nvaccess = tr32(NVRAM_ACCESS); - - tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); - } -} - static void tg3_ape_send_event(struct tg3 *tp, u32 event) { int i; @@ -5114,7 +6469,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event) return; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (apedata != APE_FW_STATUS_READY) + if (!(apedata & APE_FW_STATUS_READY)) return; /* Wait for up to 1 millisecond for APE to service previous event. */ @@ -5164,6 +6519,13 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_START; break; case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + event = APE_EVENT_STATUS_STATE_UNLOAD; break; case RESET_KIND_SUSPEND: @@ -5203,7 +6565,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) default: break; - }; + } } if (kind == RESET_KIND_INIT || @@ -5228,7 +6590,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) default: break; - }; + } } if (kind == RESET_KIND_SHUTDOWN) @@ -5257,7 +6619,7 @@ static void tg3_write_sig_legacy(struct tg3 *tp, int kind) default: break; - }; + } } } @@ -5328,17 +6690,19 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) - pcie_set_readrq(tp->pdev, 4096); - else { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) + pcie_set_readrq(tp->pdev, 4096); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } } /* Make sure PCI-X relaxed ordering bit is clear. */ - if (tp->pcix_cap) { + if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, @@ -5375,10 +6739,12 @@ static int tg3_chip_reset(struct tg3 *tp) { u32 val; void (*write_op)(struct tg3 *, u32, u32); - int err; + int i, err; tg3_nvram_lock(tp); + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + /* No matching tg3_nvram_unlock() after this because * chip reset below will undo the nvram lock. */ @@ -5391,10 +6757,7 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_save_pci_state(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) tw32(GRC_FASTBOOT_PC, 0); /* @@ -5414,13 +6777,24 @@ static int tg3_chip_reset(struct tg3 *tp) * sharing or irqpoll. */ tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; - if (tp->hw_status) { - tp->hw_status->status = 0; - tp->hw_status->status_tag = 0; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; } - tp->last_tag = 0; smp_mb(); - synchronize_irq(tp->pdev->irq); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } /* do the reset */ val = GRC_MISC_CFG_CORECLK_RESET; @@ -5473,7 +6847,9 @@ static int tg3_chip_reset(struct tg3 *tp) udelay(120); - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { + u16 val16; + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { int i; u32 cfg_val; @@ -5486,8 +6862,33 @@ static int tg3_chip_reset(struct tg3 *tp) pci_write_config_dword(tp->pdev, 0xc4, cfg_val | (1 << 15)); } - /* Set PCIE max payload size and clear error status. */ - pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + pci_read_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_DEVCTL, + &val16); + val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)) + val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + pci_write_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_DEVCTL, + val16); + + pcie_set_readrq(tp->pdev, 4096); + + /* Clear error status */ + pci_write_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); } tg3_restore_pci_state(tp); @@ -5526,16 +6927,52 @@ static int tg3_chip_reset(struct tg3 *tp) } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_GMII; tw32_f(MAC_MODE, tp->mac_mode); + } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); + if (tp->mac_mode & MAC_MODE_APE_TX_EN) + tp->mac_mode |= MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); } else tw32_f(MAC_MODE, 0); udelay(40); + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + err = tg3_poll_fw(tp); if (err) return err; + tg3_mdio_start(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + u8 phy_addr; + + phy_addr = tp->phy_addr; + tp->phy_addr = TG3_PHY_PCIE_ADDR; + + tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, + TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); + val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | + TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | + TG3_PCIEPHY_TX0CTRL1_NB_EN; + tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); + udelay(10); + + tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, + TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); + val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | + TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; + tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); + udelay(10); + + tp->phy_addr = phy_addr; + } + if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && - tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); @@ -5551,6 +6988,7 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; + tp->last_event_jiffies = jiffies; if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; } @@ -5564,15 +7002,12 @@ static void tg3_stop_fw(struct tg3 *tp) { if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { - u32 val; - /* Wait for RX cpu to ACK the previous event. */ tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - val = tr32(GRC_RX_CPU_EVENT); - val |= GRC_RX_CPU_DRIVER_EVENT; - tw32(GRC_RX_CPU_EVENT, val); + + tg3_generate_fw_event(tp); /* Wait for RX cpu to ACK this event. */ tg3_wait_for_event_ack(tp); @@ -5591,6 +7026,8 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) tg3_abort_hw(tp, silent); err = tg3_chip_reset(tp); + __tg3_set_mac_addr(tp, 0); + tg3_write_sig_legacy(tp, kind); tg3_write_sig_post_reset(tp, kind); @@ -5600,130 +7037,6 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) return 0; } -#define TG3_FW_RELEASE_MAJOR 0x0 -#define TG3_FW_RELASE_MINOR 0x0 -#define TG3_FW_RELEASE_FIX 0x0 -#define TG3_FW_START_ADDR 0x08000000 -#define TG3_FW_TEXT_ADDR 0x08000000 -#define TG3_FW_TEXT_LEN 0x9c0 -#define TG3_FW_RODATA_ADDR 0x080009c0 -#define TG3_FW_RODATA_LEN 0x60 -#define TG3_FW_DATA_ADDR 0x08000a40 -#define TG3_FW_DATA_LEN 0x20 -#define TG3_FW_SBSS_ADDR 0x08000a60 -#define TG3_FW_SBSS_LEN 0xc -#define TG3_FW_BSS_ADDR 0x08000a70 -#define TG3_FW_BSS_LEN 0x10 - -static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { - 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, - 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, - 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, - 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000, - 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105, - 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0, - 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010, - 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01, - 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c, - 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000, - 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400, - 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c, - 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000, - 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64, - 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000, - 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, - 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68, - 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003, - 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800, - 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, - 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60, - 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008, - 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b, - 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, - 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010, - 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74, - 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c, - 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800, - 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001, - 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028, - 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800, - 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0, - 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, - 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001, - 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810, - 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018, - 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec, - 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c, - 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74, - 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000, - 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c, - 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c, - 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df, - 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000, - 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800, - 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402, - 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00, - 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010, - 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df, - 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001, - 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008, - 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021, - 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018, - 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b, - 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000, - 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008, - 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b, - 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001, - 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821, - 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000, - 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000, - 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821, - 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff, - 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008, - 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010, - 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000, - 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428, - 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c, - 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e, - 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010, - 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000, - 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001, - 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000, - 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824, - 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 -}; - -static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { - 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, - 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, - 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000, - 0x00000000 -}; - -#if 0 /* All zeros, don't eat up space with it. */ -u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000 -}; -#endif - #define RX_CPU_SCRATCH_BASE 0x30000 #define RX_CPU_SCRATCH_SIZE 0x04000 #define TX_CPU_SCRATCH_BASE 0x34000 @@ -5778,15 +7091,9 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) } struct fw_info { - unsigned int text_base; - unsigned int text_len; - const u32 *text_data; - unsigned int rodata_base; - unsigned int rodata_len; - const u32 *rodata_data; - unsigned int data_base; - unsigned int data_len; - const u32 *data_data; + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; }; /* tp->lock is held. */ @@ -5823,24 +7130,11 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b write_op(tp, cpu_scratch_base + i, 0); tw32(cpu_base + CPU_STATE, 0xffffffff); tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->text_len / sizeof(u32)); i++) + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) write_op(tp, (cpu_scratch_base + - (info->text_base & 0xffff) + + (info->fw_base & 0xffff) + (i * sizeof(u32))), - (info->text_data ? - info->text_data[i] : 0)); - for (i = 0; i < (info->rodata_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->rodata_base & 0xffff) + - (i * sizeof(u32))), - (info->rodata_data ? - info->rodata_data[i] : 0)); - for (i = 0; i < (info->data_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->data_base & 0xffff) + - (i * sizeof(u32))), - (info->data_data ? - info->data_data[i] : 0)); + be32_to_cpu(info->fw_data[i])); err = 0; @@ -5852,17 +7146,20 @@ out: static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) { struct fw_info info; + const __be32 *fw_data; int err, i; - info.text_base = TG3_FW_TEXT_ADDR; - info.text_len = TG3_FW_TEXT_LEN; - info.text_data = &tg3FwText[0]; - info.rodata_base = TG3_FW_RODATA_ADDR; - info.rodata_len = TG3_FW_RODATA_LEN; - info.rodata_data = &tg3FwRodata[0]; - info.data_base = TG3_FW_DATA_ADDR; - info.data_len = TG3_FW_DATA_LEN; - info.data_data = NULL; + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, @@ -5878,21 +7175,21 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) /* Now startup only the RX cpu. */ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR) + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) break; tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); udelay(1000); } if (i >= 5) { printk(KERN_ERR PFX "tg3_load_firmware fails for %s " "to set RX CPU PC, is %08x should be %08x\n", tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), - TG3_FW_TEXT_ADDR); + info.fw_base); return -ENODEV; } tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); @@ -5901,547 +7198,36 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) return 0; } - -#define TG3_TSO_FW_RELEASE_MAJOR 0x1 -#define TG3_TSO_FW_RELASE_MINOR 0x6 -#define TG3_TSO_FW_RELEASE_FIX 0x0 -#define TG3_TSO_FW_START_ADDR 0x08000000 -#define TG3_TSO_FW_TEXT_ADDR 0x08000000 -#define TG3_TSO_FW_TEXT_LEN 0x1aa0 -#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0 -#define TG3_TSO_FW_RODATA_LEN 0x60 -#define TG3_TSO_FW_DATA_ADDR 0x08001b20 -#define TG3_TSO_FW_DATA_LEN 0x30 -#define TG3_TSO_FW_SBSS_ADDR 0x08001b50 -#define TG3_TSO_FW_SBSS_LEN 0x2c -#define TG3_TSO_FW_BSS_ADDR 0x08001b80 -#define TG3_TSO_FW_BSS_LEN 0x894 - -static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { - 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, - 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, - 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, - 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800, - 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001, - 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c, - 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001, - 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008, - 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, - 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001, - 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000, - 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001, - 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800, - 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c, - 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, - 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021, - 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800, - 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c, - 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, - 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800, - 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8, - 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8, - 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90, - 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068, - 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c, - 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021, - 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008, - 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021, - 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b, - 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, - 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, - 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020, - 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800, - 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98, - 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902, - 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602, - 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001, - 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c, - 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, - 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4, - 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410, - 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800, - 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4, - 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800, - 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800, - 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800, - 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800, - 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, - 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800, - 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, - 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800, - 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14, - 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800, - 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, - 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002, - 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80, - 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001, - 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003, - 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000, - 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656, - 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078, - 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800, - 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c, - 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c, - 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100, - 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054, - 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c, - 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0, - 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825, - 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, - 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, - 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, - 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, - 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, - 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, - 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, - 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003, - 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, - 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b, - 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98, - 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000, - 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, - 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, - 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, - 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000, - 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, - 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, - 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, - 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014, - 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, - 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a, - 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400, - 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010, - 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e, - 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800, - 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000, - 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000, - 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246, - 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff, - 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821, - 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000, - 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9, - 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc, - 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000, - 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a, - 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286, - 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023, - 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c, - 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010, - 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400, - 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024, - 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800, - 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800, - 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021, - 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8, - 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021, - 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8, - 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60, - 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, - 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000, - 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800, - 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021, - 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021, - 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002, - 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000, - 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800, - 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc, - 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50, - 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025, - 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800, - 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f, - 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40, - 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, - 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, - 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000, - 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008, - 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02, - 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, - 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, - 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, - 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000, - 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008, - 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2, - 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402, - 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4, - 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023, - 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a, - 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004, - 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400, - 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4, - 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800, - 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4, - 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800, - 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4, - 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821, - 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800, - 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6, - 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800, - 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021, - 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008, - 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a, - 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, - 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c, - 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb, - 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821, - 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021, - 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, - 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, - 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02, - 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021, - 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081, - 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800, - 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800, - 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a, - 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02, - 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821, - 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023, - 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff, - 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042, - 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, - 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, - 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, - 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, - 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, - 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821, - 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800, - 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043, - 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021, - 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, - 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800, - 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff, - 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, - 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007, - 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402, - 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff, - 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021, - 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff, - 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005, - 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800, - 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4, - 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b, - 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4, - 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, - 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, - 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, - 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac, - 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022, - 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, - 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0, - 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021, - 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, - 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc, - 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005, - 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080, - 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800, - 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014, - 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823, - 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, - 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, - 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5, - 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, - 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, - 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, - 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005, - 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800, - 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500, - 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023, - 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821, - 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000, - 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021, - 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006, - 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0, - 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006, - 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905, - 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860, - 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab, - 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff, - 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, - 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038, - 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020, - 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450, - 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003, - 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff, - 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, - 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f, - 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000, - 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820, - 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4, - 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, - 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, - 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, - 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002, - 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff, - 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8, - 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438, - 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800, - 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800, - 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000, - 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000, - 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021, - 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, - 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, - 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b, - 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02, - 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, - 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, - 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff, - 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, - 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651, - 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, - 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0, - 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, - 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, - 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000, - 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800, - 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b, - 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, - 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001, - 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800, - 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000, - 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008, - 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, - 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010, - 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, -}; - -static const u32 tg3TsoFwRodata[] = { - 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, - 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, - 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000, - 0x00000000, -}; - -static const u32 tg3TsoFwData[] = { - 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, -}; - /* 5705 needs a special version of the TSO firmware. */ -#define TG3_TSO5_FW_RELEASE_MAJOR 0x1 -#define TG3_TSO5_FW_RELASE_MINOR 0x2 -#define TG3_TSO5_FW_RELEASE_FIX 0x0 -#define TG3_TSO5_FW_START_ADDR 0x00010000 -#define TG3_TSO5_FW_TEXT_ADDR 0x00010000 -#define TG3_TSO5_FW_TEXT_LEN 0xe90 -#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90 -#define TG3_TSO5_FW_RODATA_LEN 0x50 -#define TG3_TSO5_FW_DATA_ADDR 0x00010f00 -#define TG3_TSO5_FW_DATA_LEN 0x20 -#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20 -#define TG3_TSO5_FW_SBSS_LEN 0x28 -#define TG3_TSO5_FW_BSS_ADDR 0x00010f50 -#define TG3_TSO5_FW_BSS_LEN 0x88 - -static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { - 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, - 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, - 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, - 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001, - 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001, - 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378, - 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, - 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, - 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400, - 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000, - 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200, - 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000, - 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, - 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821, - 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, - 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, - 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60, - 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821, - 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000, - 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028, - 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402, - 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014, - 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff, - 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b, - 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004, - 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8, - 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001, - 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021, - 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2, - 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a, - 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, - 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001, - 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001, - 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021, - 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000, - 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c, - 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005, - 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006, - 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c, - 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c, - 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021, - 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001, - 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b, - 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c, - 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76, - 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c, - 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70, - 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c, - 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72, - 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff, - 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78, - 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78, - 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005, - 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d, - 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005, - 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027, - 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d, - 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff, - 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001, - 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000, - 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a, - 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff, - 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001, - 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200, - 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001, - 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021, - 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, - 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00, - 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001, - 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000, - 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003, - 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001, - 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56, - 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4, - 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64, - 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088, - 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001, - 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57, - 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001, - 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001, - 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000, - 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001, - 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823, - 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001, - 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001, - 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001, - 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021, - 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, - 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, - 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001, - 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001, - 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec, - 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000, - 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024, - 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, - 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000, - 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, - 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, - 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001, - 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001, - 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff, - 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c, - 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54, - 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001, - 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, - 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624, - 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, - 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, - 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283, - 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825, - 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003, - 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, - 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c, - 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009, - 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025, - 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008, - 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021, - 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, - 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, - 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014, - 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001, - 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, - 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, - 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020, - 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, - 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20, - 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315, - 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005, - 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001, - 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001, - 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014, - 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8, - 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000, - 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, - 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008, - 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b, - 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd, - 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000, - 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025, - 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008, - 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff, - 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008, - 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021, - 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f, - 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600, - 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40, - 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000, - 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, - 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44, - 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003, - 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001, - 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, - 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c, - 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, - 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, - 0x00000000, 0x00000000, 0x00000000, -}; - -static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { - 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, - 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, - 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, - 0x00000000, 0x00000000, 0x00000000, -}; - -static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { - 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, -}; /* tp->lock is held. */ static int tg3_load_tso_firmware(struct tg3 *tp) { struct fw_info info; + const __be32 *fw_data; unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; int err, i; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) return 0; + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - info.text_base = TG3_TSO5_FW_TEXT_ADDR; - info.text_len = TG3_TSO5_FW_TEXT_LEN; - info.text_data = &tg3Tso5FwText[0]; - info.rodata_base = TG3_TSO5_FW_RODATA_ADDR; - info.rodata_len = TG3_TSO5_FW_RODATA_LEN; - info.rodata_data = &tg3Tso5FwRodata[0]; - info.data_base = TG3_TSO5_FW_DATA_ADDR; - info.data_len = TG3_TSO5_FW_DATA_LEN; - info.data_data = &tg3Tso5FwData[0]; cpu_base = RX_CPU_BASE; cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; - cpu_scratch_size = (info.text_len + - info.rodata_len + - info.data_len + - TG3_TSO5_FW_SBSS_LEN + - TG3_TSO5_FW_BSS_LEN); } else { - info.text_base = TG3_TSO_FW_TEXT_ADDR; - info.text_len = TG3_TSO_FW_TEXT_LEN; - info.text_data = &tg3TsoFwText[0]; - info.rodata_base = TG3_TSO_FW_RODATA_ADDR; - info.rodata_len = TG3_TSO_FW_RODATA_LEN; - info.rodata_data = &tg3TsoFwRodata[0]; - info.data_base = TG3_TSO_FW_DATA_ADDR; - info.data_len = TG3_TSO_FW_DATA_LEN; - info.data_data = &tg3TsoFwData[0]; cpu_base = TX_CPU_BASE; cpu_scratch_base = TX_CPU_SCRATCH_BASE; cpu_scratch_size = TX_CPU_SCRATCH_SIZE; @@ -6455,21 +7241,21 @@ static int tg3_load_tso_firmware(struct tg3 *tp) /* Now startup the cpu. */ tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.text_base); + tw32_f(cpu_base + CPU_PC, info.fw_base); for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.text_base) + if (tr32(cpu_base + CPU_PC) == info.fw_base) break; tw32(cpu_base + CPU_STATE, 0xffffffff); tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.text_base); + tw32_f(cpu_base + CPU_PC, info.fw_base); udelay(1000); } if (i >= 5) { printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " "to set CPU PC, is %08x should be %08x\n", tp->dev->name, tr32(cpu_base + CPU_PC), - info.text_base); + info.fw_base); return -ENODEV; } tw32(cpu_base + CPU_STATE, 0xffffffff); @@ -6478,43 +7264,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp) } -/* tp->lock is held. */ -static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) -{ - u32 addr_high, addr_low; - int i; - - addr_high = ((tp->dev->dev_addr[0] << 8) | - tp->dev->dev_addr[1]); - addr_low = ((tp->dev->dev_addr[2] << 24) | - (tp->dev->dev_addr[3] << 16) | - (tp->dev->dev_addr[4] << 8) | - (tp->dev->dev_addr[5] << 0)); - for (i = 0; i < 4; i++) { - if (i == 1 && skip_mac_1) - continue; - tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - for (i = 0; i < 12; i++) { - tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); - } - } - - addr_high = (tp->dev->dev_addr[0] + - tp->dev->dev_addr[1] + - tp->dev->dev_addr[2] + - tp->dev->dev_addr[3] + - tp->dev->dev_addr[4] + - tp->dev->dev_addr[5]) & - TX_BACKOFF_SEED_MASK; - tw32(MAC_TX_BACKOFF_SEED, addr_high); -} - static int tg3_set_mac_addr(struct net_device *dev, void *p) { struct tg3 *tp = netdev_priv(dev); @@ -6573,24 +7322,188 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, static void __tg3_set_rx_mode(struct net_device *); static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) { - tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); - tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); - tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); - tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); - tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + int i; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); } - tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); - tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + + if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { u32 val = ec->stats_block_coalesce_usecs; + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + if (!netif_carrier_ok(tp->dev)) val = 0; tw32(HOSTCC_STAT_COAL_TICKS, val); } + + for (i = 0; i < tp->irq_cnt - 1; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* Disable all transmit rings but the first. */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + + /* Disable all receive return rings but the first. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + + /* Zero mailbox registers. */ + if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { + for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + } + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (TG3_RX_RCB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (TG3_RX_RCB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + rxrcb += TG3_BDINFO_SIZE; + } } /* tp->lock is held. */ @@ -6598,6 +7511,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { u32 val, rdmac_mode; int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; tg3_disable_ints(tp); @@ -6609,7 +7523,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_abort_hw(tp, 1); } - if (reset_phy) + if (reset_phy && + !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) tg3_phy_reset(tp); err = tg3_chip_reset(tp); @@ -6618,8 +7533,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_legacy(tp, RESET_KIND_INIT); - if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) { + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { val = tr32(TG3_CPMU_CTRL); val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); tw32(TG3_CPMU_CTRL, val); @@ -6640,6 +7554,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3_CPMU_HST_ACC, val); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express @@ -6684,8 +7613,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { /* This value is determined during the probe time DMA * engine test, tg3_test_dma. */ @@ -6731,11 +7665,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { int fw_len; - fw_len = (TG3_TSO5_FW_TEXT_LEN + - TG3_TSO5_FW_RODATA_LEN + - TG3_TSO5_FW_DATA_LEN + - TG3_TSO5_FW_SBSS_LEN + - TG3_TSO5_FW_BSS_LEN); + fw_len = tp->fw_len; fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); @@ -6809,99 +7739,74 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) * configurable. */ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->rx_std_mapping >> 32)); + ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->rx_std_mapping & 0xffffffff)); - tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_BUFFER_DESC); - - /* Don't even try to program the JUMBO/MINI buffer descriptor - * configs on 5705. - */ - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { - tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); - } else { - tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + /* Disable the mini ring */ + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { /* Setup replenish threshold. */ tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->rx_jumbo_mapping >> 32)); + ((u64) tpr->rx_jmb_mapping >> 32)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->rx_jumbo_mapping & 0xffffffff)); + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_JUMBO_BUFFER_DESC); + (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | + BDINFO_FLAGS_USE_EXT_RECV); + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); } - } - - /* There is only one send ring on 5705/5750, no need to explicitly - * disable the others. - */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - /* Clear out send RCB ring in SRAM. */ - for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) - tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - } - - tp->tx_prod = 0; - tp->tx_cons = 0; - tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); - tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); - - tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, - tp->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | + (RX_STD_MAX_SIZE << 2); + else + val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; - /* There is only one receive return ring on 5705/5750, no need - * to explicitly disable the others. - */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; - i += TG3_BDINFO_SIZE) { - tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - } - } + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); - tp->rx_rcb_ptr = 0; - tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); - tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, - tp->rx_rcb_mapping, - (TG3_RX_RCB_RING_SIZE(tp) << - BDINFO_FLAGS_MAXLEN_SHIFT), - 0); + tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? + tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); - tp->rx_std_ptr = tp->rx_pending; - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, - tp->rx_std_ptr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + tw32(STD_REPLENISH_LWM, 32); + tw32(JMB_REPLENISH_LWM, 16); + } - tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? - tp->rx_jumbo_pending : 0; - tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, - tp->rx_jumbo_ptr); + tg3_rings_reset(tp); /* Initialize MAC address and backoff seed. */ __tg3_set_mac_addr(tp, 0); /* MTU + ethernet header + FCS + optional VLAN tag */ - tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8); + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); /* The slot time is changed by tg3_setup_phy if we * run at gigabit with half duplex. @@ -6924,7 +7829,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; @@ -6946,7 +7853,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) - rdmac_mode |= (1 << 27); + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; /* Receive/send statistics. */ if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { @@ -6977,12 +7889,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) __tg3_set_coalesce(tp, &tp->coal); - /* set status block DMA address */ - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->status_mapping >> 32)); - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->status_mapping & 0xffffffff)); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { /* Status/statistics block address. See tg3_timer, * the tg3_periodic_fetch_stats call there, and @@ -6993,7 +7899,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tp->stats_mapping & 0xffffffff)); tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } } tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); @@ -7003,15 +7918,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); - /* Clear statistics/status block in chip, and status block in ram. */ - for (i = NIC_SRAM_STATS_BLK; - i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; - i += sizeof(u32)) { - tg3_write_mem(tp, i, 0); - udelay(40); - } - memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; /* reset to prevent losing 1st rx packet intermittently */ @@ -7019,7 +7925,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(10); } - tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = 0; + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && @@ -7059,8 +7969,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); - tp->last_tag = 0; + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + tw32(MSGINT_MODE, val); + } if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); @@ -7077,7 +7990,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { - if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) && + if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { /* nothing */ @@ -7089,11 +8002,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } /* Enable host coalescing bug fix */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) - val |= (1 << 29); + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; tw32_f(WDMAC_MODE, val); udelay(40); @@ -7133,7 +8046,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); - tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { @@ -7152,25 +8068,49 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(MAC_TX_MODE, tp->tx_mode); udelay(100); + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { + u32 reg = MAC_RSS_INDIR_TBL_0; + u8 *ent = (u8 *)&val; + + /* Setup the indirection table */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + int idx = i % sizeof(val); + + ent[idx] = i % (tp->irq_cnt - 1); + if (idx == sizeof(val) - 1) { + tw32(reg, val); + reg += 4; + } + } + + /* Setup the "secret" hash key. */ + tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); + tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); + tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); + tw32(MAC_RSS_HASH_KEY_3, 0x36621985); + tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); + tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); + tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); + tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); + tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); + tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + } + tp->rx_mode = RX_MODE_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); - if (tp->link_config.phy_is_low_power) { - tp->link_config.phy_is_low_power = 0; - tp->link_config.speed = tp->link_config.orig_speed; - tp->link_config.duplex = tp->link_config.orig_duplex; - tp->link_config.autoneg = tp->link_config.orig_autoneg; - } - - tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - tw32(MAC_LED_CTRL, tp->led_ctrl); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); @@ -7217,19 +8157,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); } - err = tg3_setup_phy(tp, 0); - if (err) - return err; + if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { + if (tp->link_config.phy_is_low_power) { + tp->link_config.phy_is_low_power = 0; + tp->link_config.speed = tp->link_config.orig_speed; + tp->link_config.duplex = tp->link_config.orig_duplex; + tp->link_config.autoneg = tp->link_config.orig_autoneg; + } - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { - u32 tmp; + err = tg3_setup_phy(tp, 0); + if (err) + return err; - /* Clear CRC stats. */ - if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { - tg3_writephy(tp, MII_TG3_TEST1, - tmp | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, 0x14, &tmp); + if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, 0x14, &tmp); + } } } @@ -7282,7 +8231,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) default: break; - }; + } if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) /* Write our heartbeat update interval to APE. */ @@ -7299,21 +8248,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) */ static int tg3_init_hw(struct tg3 *tp, int reset_phy) { - int err; - - /* Force the chip into D0. */ - err = tg3_set_power_state(tp, PCI_D0); - if (err) - goto out; - tg3_switch_clocks(tp); tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); - err = tg3_reset_hw(tp, reset_phy); - -out: - return err; + return tg3_reset_hw(tp, reset_phy); } #define TG3_STAT_ADD32(PSTAT, REG) \ @@ -7378,12 +8317,12 @@ static void tg3_timer(unsigned long __opaque) * IRQ status the mailbox/status_block protocol the chip * uses with the cpu is race prone. */ - if (tp->hw_status->status & SD_STATUS_UPDATED) { + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); } else { tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); } if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { @@ -7462,9 +8401,8 @@ static void tg3_timer(unsigned long __opaque) * resets. */ if (!--tp->asf_counter) { - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { - u32 val; - + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, @@ -7472,9 +8410,8 @@ static void tg3_timer(unsigned long __opaque) tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); /* 5 seconds timeout */ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); - val = tr32(GRC_RX_CPU_EVENT); - val |= GRC_RX_CPU_DRIVER_EVENT; - tw32_f(GRC_RX_CPU_EVENT, val); + + tg3_generate_fw_event(tp); } tp->asf_counter = tp->asf_multiplier; } @@ -7486,13 +8423,22 @@ restart_timer: add_timer(&tp->timer); } -static int tg3_request_irq(struct tg3 *tp) +static int tg3_request_irq(struct tg3 *tp, int irq_num) { irq_handler_t fn; unsigned long flags; - struct net_device *dev = tp->dev; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { fn = tg3_msi; if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) fn = tg3_msi_1shot; @@ -7503,37 +8449,50 @@ static int tg3_request_irq(struct tg3 *tp) fn = tg3_interrupt_tagged; flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; } - return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); } static int tg3_test_interrupt(struct tg3 *tp) { + struct tg3_napi *tnapi = &tp->napi[0]; struct net_device *dev = tp->dev; int err, i, intr_ok = 0; + u32 val; if (!netif_running(dev)) return -ENODEV; tg3_disable_ints(tp); - free_irq(tp->pdev->irq, dev); + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } - err = request_irq(tp->pdev->irq, tg3_test_isr, - IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); if (err) return err; - tp->hw_status->status &= ~SD_STATUS_UPDATED; + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; tg3_enable_ints(tp); tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + tnapi->coal_now); for (i = 0; i < 5; i++) { u32 int_mbox, misc_host_ctrl; - int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + - TG3_64BIT_REG_LOW); + int_mbox = tr32_mailbox(tnapi->int_mbox); misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); if ((int_mbox != 0) || @@ -7547,15 +8506,23 @@ static int tg3_test_interrupt(struct tg3 *tp) tg3_disable_ints(tp); - free_irq(tp->pdev->irq, dev); + free_irq(tnapi->irq_vec, tnapi); - err = tg3_request_irq(tp); + err = tg3_request_irq(tp, 0); if (err) return err; - if (intr_ok) + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } return 0; + } return -EIO; } @@ -7565,7 +8532,6 @@ static int tg3_test_interrupt(struct tg3 *tp) */ static int tg3_test_msi(struct tg3 *tp) { - struct net_device *dev = tp->dev; int err; u16 pci_cmd; @@ -7596,12 +8562,13 @@ static int tg3_test_msi(struct tg3 *tp) "the PCI maintainer and include system chipset information.\n", tp->dev->name); - free_irq(tp->pdev->irq, dev); + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + pci_disable_msi(tp->pdev); tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - err = tg3_request_irq(tp); + err = tg3_request_irq(tp, 0); if (err) return err; @@ -7616,65 +8583,192 @@ static int tg3_test_msi(struct tg3 *tp) tg3_full_unlock(tp); if (err) - free_irq(tp->pdev->irq, dev); + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); return err; } +static int tg3_request_firmware(struct tg3 *tp) +{ + const __be32 *fw_data; + + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", + tp->dev->name, tp->fw_needed); + return -ENOENT; + } + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ + if (tp->fw_len < (tp->fw->size - 12)) { + printk(KERN_ERR "%s: bogus length %d in \"%s\"\n", + tp->dev->name, tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; +} + +static bool tg3_enable_msix(struct tg3 *tp) +{ + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc != 0) { + if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) + return false; + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + printk(KERN_NOTICE + "%s: Requested %d MSI-X vectors, received %d\n", + tp->dev->name, tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; + tp->dev->real_num_tx_queues = tp->irq_cnt - 1; + } else + tp->dev->real_num_tx_queues = 1; + + return true; +} + +static void tg3_ints_init(struct tg3 *tp) +{ + if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && + !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + printk(KERN_WARNING PFX "%s: MSI without TAGGED? " + "Not using MSI.\n", tp->dev->name); + goto defcfg; + } + + if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) + tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; + else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && + pci_enable_msi(tp->pdev) == 0) + tp->tg3_flags2 |= TG3_FLG2_USING_MSI; + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } +defcfg: + if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + tp->dev->real_num_tx_queues = 1; + } +} + +static void tg3_ints_fini(struct tg3 *tp) +{ + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + pci_disable_msix(tp->pdev); + else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) + pci_disable_msi(tp->pdev); + tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; + tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS; +} + static int tg3_open(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - int err; + int i, err; - netif_carrier_off(tp->dev); + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + printk(KERN_WARNING "%s: TSO capability disabled.\n", + tp->dev->name); + tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; + } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + printk(KERN_NOTICE "%s: TSO capability restored.\n", + tp->dev->name); + tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; + } + } - tg3_full_lock(tp, 0); + netif_carrier_off(tp->dev); err = tg3_set_power_state(tp, PCI_D0); - if (err) { - tg3_full_unlock(tp); + if (err) return err; - } + + tg3_full_lock(tp, 0); tg3_disable_ints(tp); tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tg3_full_unlock(tp); + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ err = tg3_alloc_consistent(tp); if (err) - return err; - - if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) { - /* All MSI supporting chips should support tagged - * status. Assert that this is the case. - */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { - printk(KERN_WARNING PFX "%s: MSI without TAGGED? " - "Not using MSI.\n", tp->dev->name); - } else if (pci_enable_msi(tp->pdev) == 0) { - u32 msi_mode; + goto err_out1; - msi_mode = tr32(MSGINT_MODE); - tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); - tp->tg3_flags2 |= TG3_FLG2_USING_MSI; - } - } - err = tg3_request_irq(tp); + tg3_napi_enable(tp); - if (err) { - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; } - tg3_free_consistent(tp); - return err; } - napi_enable(&tp->napi); + if (err) + goto err_out2; tg3_full_lock(tp, 0); @@ -7702,48 +8796,34 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); - if (err) { - napi_disable(&tp->napi); - free_irq(tp->pdev->irq, dev); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - } - tg3_free_consistent(tp); - return err; - } + if (err) + goto err_out3; if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { err = tg3_test_msi(tp); if (err) { tg3_full_lock(tp, 0); - - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - } tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); - tg3_free_consistent(tp); - tg3_full_unlock(tp); - napi_disable(&tp->napi); - - return err; + goto err_out2; } - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { - u32 val = tr32(PCIE_TRANSACTION_CFG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && + (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); - tw32(PCIE_TRANSACTION_CFG, - val | PCIE_TRANS_CFG_1SHOT_MSI); - } + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); } } + tg3_phy_start(tp); + tg3_full_lock(tp, 0); add_timer(&tp->timer); @@ -7752,9 +8832,23 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; + +err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +err_out2: + tg3_napi_disable(tp); + tg3_free_consistent(tp); + +err_out1: + tg3_ints_fini(tp); + return err; } #if 0 @@ -7763,6 +8857,7 @@ static int tg3_open(struct net_device *dev) u32 val32, val32_2, val32_3, val32_4, val32_5; u16 val16; int i; + struct tg3_hw_status *sblk = tp->napi[0]->hw_status; pci_read_config_word(tp->pdev, PCI_STATUS, &val16); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); @@ -7915,14 +9010,15 @@ static int tg3_open(struct net_device *dev) val32, val32_2, val32_3, val32_4, val32_5); /* SW status block */ - printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", - tp->hw_status->status, - tp->hw_status->status_tag, - tp->hw_status->rx_jumbo_consumer, - tp->hw_status->rx_consumer, - tp->hw_status->rx_mini_consumer, - tp->hw_status->idx[0].rx_producer, - tp->hw_status->idx[0].tx_consumer); + printk(KERN_DEBUG + "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + sblk->status, + sblk->status_tag, + sblk->rx_jumbo_consumer, + sblk->rx_consumer, + sblk->rx_mini_consumer, + sblk->idx[0].rx_producer, + sblk->idx[0].tx_consumer); /* SW statistics block */ printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", @@ -7990,15 +9086,18 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); static int tg3_close(struct net_device *dev) { + int i; struct tg3 *tp = netdev_priv(dev); - napi_disable(&tp->napi); + tg3_napi_disable(tp); cancel_work_sync(&tp->reset_task); - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); del_timer_sync(&tp->timer); + tg3_phy_stop(tp); + tg3_full_lock(tp, 1); #if 0 tg3_dump_state(tp); @@ -8012,12 +9111,13 @@ static int tg3_close(struct net_device *dev) tg3_full_unlock(tp); - free_irq(tp->pdev->irq, dev); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } + tg3_ints_fini(tp); + memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), sizeof(tp->net_stats_prev)); memcpy(&tp->estats_prev, tg3_get_estats(tp), @@ -8044,6 +9144,11 @@ static inline unsigned long get_stat64(tg3_stat64_t *val) return ret; } +static inline u64 get_estat64(tg3_stat64_t *val) +{ + return ((u64)val->high << 32) | ((u64)val->low); +} + static unsigned long calc_crc_errors(struct tg3 *tp) { struct tg3_hw_stats *hw_stats = tp->hw_stats; @@ -8072,7 +9177,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp) #define ESTAT_ADD(member) \ estats->member = old_estats->member + \ - get_stat64(&hw_stats->member) + get_estat64(&hw_stats->member) static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) { @@ -8419,17 +9524,16 @@ static int tg3_get_eeprom_len(struct net_device *dev) return tp->nvram_size; } -static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); -static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val); -static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); - static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct tg3 *tp = netdev_priv(dev); int ret; u8 *pd; u32 i, offset, len, b_offset, b_count; - __le32 val; + __be32 val; + + if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + return -EINVAL; if (tp->link_config.phy_is_low_power) return -EAGAIN; @@ -8448,7 +9552,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, /* i.e. offset=1 len=2 */ b_count = len; } - ret = tg3_nvram_read_le(tp, offset-b_offset, &val); + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); if (ret) return ret; memcpy(data, ((char*)&val) + b_offset, b_count); @@ -8460,7 +9564,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, /* read bytes upto the last 4 byte boundary */ pd = &data[eeprom->len]; for (i = 0; i < (len - (len & 3)); i += 4) { - ret = tg3_nvram_read_le(tp, offset + i, &val); + ret = tg3_nvram_read_be32(tp, offset + i, &val); if (ret) { eeprom->len += i; return ret; @@ -8474,7 +9578,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, pd = &data[eeprom->len]; b_count = len & 3; b_offset = offset + len - b_count; - ret = tg3_nvram_read_le(tp, b_offset, &val); + ret = tg3_nvram_read_be32(tp, b_offset, &val); if (ret) return ret; memcpy(pd, &val, b_count); @@ -8491,12 +9595,13 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, int ret; u32 offset, len, b_offset, odd_len; u8 *buf; - __le32 start, end; + __be32 start, end; if (tp->link_config.phy_is_low_power) return -EAGAIN; - if (eeprom->magic != TG3_EEPROM_MAGIC) + if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; offset = eeprom->offset; @@ -8504,7 +9609,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if ((b_offset = (offset & 3))) { /* adjustments to start on required 4 byte boundary */ - ret = tg3_nvram_read_le(tp, offset-b_offset, &start); + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); if (ret) return ret; len += b_offset; @@ -8518,7 +9623,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, /* adjustments to end on required 4 byte boundary */ odd_len = 1; len = (len + 3) & ~3; - ret = tg3_nvram_read_le(tp, offset+len-4, &end); + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); if (ret) return ret; } @@ -8545,7 +9650,15 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - struct tg3 *tp = netdev_priv(dev); + struct tg3 *tp = netdev_priv(dev); + + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); + } cmd->supported = (SUPPORTED_Autoneg); @@ -8570,8 +9683,8 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->speed = tp->link_config.active_speed; cmd->duplex = tp->link_config.active_duplex; } - cmd->phy_address = PHY_ADDR; - cmd->transceiver = 0; + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = tp->link_config.autoneg; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; @@ -8582,26 +9695,66 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { - /* These are the only valid advertisement bits allowed. */ - if (cmd->autoneg == AUTONEG_ENABLE && - (cmd->advertising & ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE))) - return -EINVAL; - /* Fiber can only do SPEED_1000. */ - else if ((cmd->autoneg != AUTONEG_ENABLE) && - (cmd->speed != SPEED_1000)) - return -EINVAL; - /* Copper cannot force SPEED_1000. */ - } else if ((cmd->autoneg != AUTONEG_ENABLE) && - (cmd->speed == SPEED_1000)) + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; - else if ((cmd->speed == SPEED_1000) && - (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) return -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { + if (cmd->speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (cmd->speed != SPEED_100 && + cmd->speed != SPEED_10) + return -EINVAL; + } + } + tg3_full_lock(tp, 0); tp->link_config.autoneg = cmd->autoneg; @@ -8614,7 +9767,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.advertising = 0; tp->link_config.speed = cmd->speed; tp->link_config.duplex = cmd->duplex; - } + } tp->link_config.orig_speed = tp->link_config.speed; tp->link_config.orig_duplex = tp->link_config.duplex; @@ -8642,12 +9795,14 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags & TG3_FLAG_WOL_CAP) + if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && + device_can_wakeup(&tp->pdev->dev)) wol->supported = WAKE_MAGIC; else wol->supported = 0; wol->wolopts = 0; - if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) + if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && + device_can_wakeup(&tp->pdev->dev)) wol->wolopts = WAKE_MAGIC; memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -8655,18 +9810,22 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if ((wol->wolopts & WAKE_MAGIC) && - !(tp->tg3_flags & TG3_FLAG_WOL_CAP)) + !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) return -EINVAL; spin_lock_bh(&tp->lock); - if (wol->wolopts & WAKE_MAGIC) + if (wol->wolopts & WAKE_MAGIC) { tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - else + device_set_wakeup_enable(dp, true); + } else { tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; + device_set_wakeup_enable(dp, false); + } spin_unlock_bh(&tp->lock); return 0; @@ -8693,11 +9852,17 @@ static int tg3_set_tso(struct net_device *dev, u32 value) return -EINVAL; return 0; } - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { + if ((dev->features & NETIF_F_IPV6_CSUM) && + ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { if (value) { dev->features |= NETIF_F_TSO6; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) dev->features |= NETIF_F_TSO_ECN; } else dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); @@ -8708,7 +9873,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value) static int tg3_nway_reset(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - u32 bmcr; int r; if (!netif_running(dev)) @@ -8717,17 +9881,25 @@ static int tg3_nway_reset(struct net_device *dev) if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) return -EINVAL; - spin_lock_bh(&tp->lock); - r = -EINVAL; - tg3_readphy(tp, MII_BMCR, &bmcr); - if (!tg3_readphy(tp, MII_BMCR, &bmcr) && - ((bmcr & BMCR_ANENABLE) || - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { - tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | - BMCR_ANENABLE); - r = 0; + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); } - spin_unlock_bh(&tp->lock); return r; } @@ -8752,13 +9924,13 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * else ering->rx_jumbo_pending = 0; - ering->tx_pending = tp->tx_pending; + ering->tx_pending = tp->napi[0].tx_pending; } static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0, err = 0; + int i, irq_sync = 0, err = 0; if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || @@ -8769,6 +9941,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e return -EINVAL; if (netif_running(dev)) { + tg3_phy_stop(tp); tg3_netif_stop(tp); irq_sync = 1; } @@ -8781,7 +9954,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tp->rx_pending > 63) tp->rx_pending = 63; tp->rx_jumbo_pending = ering->rx_jumbo_pending; - tp->tx_pending = ering->tx_pending; + + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) + tp->napi[i].tx_pending = ering->tx_pending; if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -8792,6 +9967,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tg3_full_unlock(tp); + if (irq_sync && !err) + tg3_phy_start(tp); + return err; } @@ -8801,12 +9979,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; - if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; else epause->rx_pause = 0; - if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) epause->tx_pause = 1; else epause->tx_pause = 0; @@ -8815,37 +9993,93 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0, err = 0; + int err = 0; - if (netif_running(dev)) { - tg3_netif_stop(tp); - irq_sync = 1; - } + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return -EAGAIN; + + if (epause->autoneg) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (epause->rx_pause) { + if (epause->tx_pause) + newadv = ADVERTISED_Pause; + else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + err = phy_start_aneg(phydev); + } + } else { + tp->link_config.advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.advertising |= newadv; + } + } else { + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) + tg3_setup_flow_control(tp, 0, 0); + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); - tg3_full_lock(tp, irq_sync); + if (epause->autoneg) + tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + else + tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; - if (epause->autoneg) - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; - else - tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; - if (epause->rx_pause) - tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX; - else - tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX; - if (epause->tx_pause) - tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX; - else - tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX; + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } - if (netif_running(dev)) { - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, 1); - if (!err) - tg3_netif_start(tp); + tg3_full_unlock(tp); } - tg3_full_unlock(tp); - return err; } @@ -8885,10 +10119,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ethtool_op_set_tx_ipv6_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -8972,10 +10203,13 @@ static void tg3_get_ethtool_stats (struct net_device *dev, static int tg3_test_nvram(struct tg3 *tp) { u32 csum, magic; - __le32 *buf; + __be32 *buf; int i, j, k, err = 0, size; - if (tg3_nvram_read_swab(tp, 0, &magic) != 0) + if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) return -EIO; if (magic == TG3_EEPROM_MAGIC) @@ -9009,14 +10243,15 @@ static int tg3_test_nvram(struct tg3 *tp) err = -EIO; for (i = 0, j = 0; i < size; i += 4, j++) { - if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0) + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) break; } if (i < size) goto out; /* Selfboot format */ - magic = swab32(le32_to_cpu(buf[0])); + magic = be32_to_cpu(buf[0]); if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { u8 *buf8 = (u8 *) buf, csum8 = 0; @@ -9045,7 +10280,7 @@ static int tg3_test_nvram(struct tg3 *tp) if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) { u8 data[NVRAM_SELFBOOT_DATA_SIZE]; - u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; u8 *buf8 = (u8 *) buf; /* Separate the parity bits and the data bytes. */ @@ -9088,13 +10323,13 @@ static int tg3_test_nvram(struct tg3 *tp) /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); - if(csum != le32_to_cpu(buf[0x10/4])) + if (csum != be32_to_cpu(buf[0x10/4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != le32_to_cpu(buf[0xfc/4])) - goto out; + if (csum != be32_to_cpu(buf[0xfc/4])) + goto out; err = 0; @@ -9405,17 +10640,13 @@ static int tg3_test_memory(struct tg3 *tp) int err = 0; int i; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - mem_tbl = mem_tbl_5755; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - mem_tbl = mem_tbl_5906; - else - mem_tbl = mem_tbl_5705; - } else + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + mem_tbl = mem_tbl_5705; + else mem_tbl = mem_tbl_570x; for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { @@ -9433,12 +10664,23 @@ static int tg3_test_memory(struct tg3 *tp) static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) { u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; - u32 desc_idx; + u32 desc_idx, coal_now; struct sk_buff *skb, *rx_skb; u8 *tx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + + if (tp->irq_cnt > 1) { + tnapi = &tp->napi[1]; + rnapi = &tp->napi[1]; + } else { + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; if (loopback_mode == TG3_MAC_LOOPBACK) { /* HW errata - mac loopback fails in some cases on 5780. @@ -9460,18 +10702,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) } else if (loopback_mode == TG3_PHY_LOOPBACK) { u32 val; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 phytest; - - if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) { - u32 phy; - - tg3_writephy(tp, MII_TG3_EPHY_TEST, - phytest | MII_TG3_EPHY_SHADOW_EN); - if (!tg3_readphy(tp, 0x1b, &phy)) - tg3_writephy(tp, 0x1b, phy & ~0x20); - tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); - } + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + tg3_phy_fet_toggle_apd(tp, false); val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; } else val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; @@ -9482,8 +10714,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(40); mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); + if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800); mac_mode |= MAC_MODE_PORT_MODE_MII; } else mac_mode |= MAC_MODE_PORT_MODE_GMII; @@ -9524,37 +10757,40 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) tx_data[i] = (u8) (i & 0xff); map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + rnapi->coal_now); udelay(10); - rx_start_idx = tp->hw_status->idx[0].rx_producer; + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; num_pkts = 0; - tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); + tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); - tp->tx_prod++; + tnapi->tx_prod++; num_pkts++; - tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, - tp->tx_prod); - tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); udelay(10); - /* 250 usec to allow enough time on some 10/100 Mbps devices. */ - for (i = 0; i < 25; i++) { + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - HOSTCC_MODE_NOW); + coal_now); udelay(10); - tx_idx = tp->hw_status->idx[0].tx_consumer; - rx_idx = tp->hw_status->idx[0].rx_producer; - if ((tx_idx == tp->tx_prod) && + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && (rx_idx == (rx_start_idx + num_pkts))) break; } @@ -9562,13 +10798,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); - if (tx_idx != tp->tx_prod) + if (tx_idx != tnapi->tx_prod) goto out; if (rx_idx != rx_start_idx + num_pkts) goto out; - desc = &tp->rx_rcb[rx_start_idx]; + desc = &rnapi->rx_rcb[rx_start_idx]; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key != RXD_OPAQUE_RING_STD) @@ -9582,9 +10818,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) if (rx_len != tx_len) goto out; - rx_skb = tp->rx_std_buffers[desc_idx].skb; + rx_skb = tpr->rx_std_buffers[desc_idx].skb; - map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); + map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); for (i = 14; i < tx_len; i++) { @@ -9615,8 +10851,11 @@ static int tg3_test_loopback(struct tg3 *tp) if (err) return TG3_LOOPBACK_FAILED; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + /* Turn off gphy autopowerdown. */ + if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + tg3_phy_toggle_apd(tp, false); + + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { int i; u32 status; @@ -9643,19 +10882,23 @@ static int tg3_test_loopback(struct tg3 *tp) if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) err |= TG3_MAC_LOOPBACK_FAILED; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { tw32(TG3_CPMU_CTRL, cpmuctrl); /* Release the mutex */ tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); } - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) err |= TG3_PHY_LOOPBACK_FAILED; } + /* Re-enable gphy autopowerdown. */ + if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + return err; } @@ -9678,9 +10921,10 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, data[1] = 1; } if (etest->flags & ETH_TEST_FL_OFFLINE) { - int err, irq_sync = 0; + int err, err2 = 0, irq_sync = 0; if (netif_running(dev)) { + tg3_phy_stop(tp); tg3_netif_stop(tp); irq_sync = 1; } @@ -9721,11 +10965,15 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - if (!tg3_restart_hw(tp, 1)) + err2 = tg3_restart_hw(tp, 1); + if (!err2) tg3_netif_start(tp); } tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); } if (tp->link_config.phy_is_low_power) tg3_set_power_state(tp, PCI_D3hot); @@ -9738,9 +10986,17 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct tg3 *tp = netdev_priv(dev); int err; + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; + if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, data, cmd); + } + switch(cmd) { case SIOCGMIIPHY: - data->phy_id = PHY_ADDR; + data->phy_id = tp->phy_addr; /* fallthru */ case SIOCGMIIREG: { @@ -9765,9 +11021,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) break; /* We have no PHY */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (tp->link_config.phy_is_low_power) return -EAGAIN; @@ -9789,8 +11042,12 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct tg3 *tp = netdev_priv(dev); - if (netif_running(dev)) - tg3_netif_stop(tp); + if (!netif_running(dev)) { + tp->vlgrp = grp; + return; + } + + tg3_netif_stop(tp); tg3_full_lock(tp, 0); @@ -9799,8 +11056,7 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ __tg3_set_rx_mode(dev); - if (netif_running(dev)) - tg3_netif_start(tp); + tg3_netif_start(tp); tg3_full_unlock(tp); } @@ -9907,7 +11163,7 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) tp->nvram_size = EEPROM_CHIP_SIZE; - if (tg3_nvram_read_swab(tp, 0, &magic) != 0) + if (tg3_nvram_read(tp, 0, &magic) != 0) return; if ((magic != TG3_EEPROM_MAGIC) && @@ -9923,7 +11179,7 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) cursize = 0x10; while (cursize < tp->nvram_size) { - if (tg3_nvram_read_swab(tp, cursize, &val) != 0) + if (tg3_nvram_read(tp, cursize, &val) != 0) return; if (val == magic) @@ -9939,7 +11195,8 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) { u32 val; - if (tg3_nvram_read_swab(tp, 0, &val) != 0) + if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || + tg3_nvram_read(tp, 0, &val) != 0) return; /* Selfboot format */ @@ -9950,7 +11207,18 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) if (tg3_nvram_read(tp, 0xf0, &val) == 0) { if (val != 0) { - tp->nvram_size = (val >> 16) * 1024; + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; return; } } @@ -9964,8 +11232,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { tp->tg3_flags2 |= TG3_FLG2_FLASH; - } - else { + } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } @@ -9973,43 +11240,69 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { - case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; - break; - case FLASH_VENDOR_ATMEL_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_ST: - tp->nvram_jedecnum = JEDEC_ST; - tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_VENDOR_SAIFUN: - tp->nvram_jedecnum = JEDEC_SAIFUN; - tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; - break; - case FLASH_VENDOR_SST_SMALL: - case FLASH_VENDOR_SST_LARGE: - tp->nvram_jedecnum = JEDEC_SST; - tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; - break; + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; } - } - else { + } else { tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; } } +static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) +{ + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } +} + static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) { u32 nvcfg1; @@ -10018,51 +11311,31 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - break; + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + break; } if (tp->tg3_flags2 & TG3_FLG2_FLASH) { - switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { - case FLASH_5752PAGE_SIZE_256: - tp->nvram_pagesize = 256; - break; - case FLASH_5752PAGE_SIZE_512: - tp->nvram_pagesize = 512; - break; - case FLASH_5752PAGE_SIZE_1K: - tp->nvram_pagesize = 1024; - break; - case FLASH_5752PAGE_SIZE_2K: - tp->nvram_pagesize = 2048; - break; - case FLASH_5752PAGE_SIZE_4K: - tp->nvram_pagesize = 4096; - break; - case FLASH_5752PAGE_SIZE_264: - tp->nvram_pagesize = 264; - break; - } - } - else { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { /* For eeprom, set pagesize to maximum eeprom size */ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; @@ -10079,51 +11352,51 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; protect = 1; } nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; switch (nvcfg1) { - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - case FLASH_5755VENDOR_ATMEL_FLASH_5: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 264; - if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || - nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) - tp->nvram_size = (protect ? 0x3e200 : - TG3_NVRAM_SIZE_512KB); - else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_128KB); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; - if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_128KB); - else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_128KB : - TG3_NVRAM_SIZE_512KB); - break; + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; } } @@ -10134,117 +11407,264 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } +} + +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; + } + } +} + +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; +} + +static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 264; + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; + tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; + } + break; + default: + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + return; } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; } -static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) + +static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) { - u32 nvcfg1, protect = 0; + u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) { - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; - protect = 1; - } + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; - tp->nvram_pagesize = 256; + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->nvram_pagesize = 256; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; - } + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; - if (protect) { - tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); - } else { - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_size = TG3_NVRAM_SIZE_2MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; } + break; + default: + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + return; } -} -static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) -{ - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; } /* Chips other than 5700/5701 use the NVRAM for fetching info. */ @@ -10280,12 +11700,18 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tg3_get_5755_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_get_5787_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tg3_get_5761_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + tg3_get_5717_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -10302,141 +11728,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) } } -static int tg3_nvram_read_using_eeprom(struct tg3 *tp, - u32 offset, u32 *val) -{ - u32 tmp; - int i; - - if (offset > EEPROM_ADDR_ADDR_MASK || - (offset % 4) != 0) - return -EINVAL; - - tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | - EEPROM_ADDR_DEVID_MASK | - EEPROM_ADDR_READ); - tw32(GRC_EEPROM_ADDR, - tmp | - (0 << EEPROM_ADDR_DEVID_SHIFT) | - ((offset << EEPROM_ADDR_ADDR_SHIFT) & - EEPROM_ADDR_ADDR_MASK) | - EEPROM_ADDR_READ | EEPROM_ADDR_START); - - for (i = 0; i < 1000; i++) { - tmp = tr32(GRC_EEPROM_ADDR); - - if (tmp & EEPROM_ADDR_COMPLETE) - break; - msleep(1); - } - if (!(tmp & EEPROM_ADDR_COMPLETE)) - return -EBUSY; - - *val = tr32(GRC_EEPROM_DATA); - return 0; -} - -#define NVRAM_CMD_TIMEOUT 10000 - -static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) -{ - int i; - - tw32(NVRAM_CMD, nvram_cmd); - for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { - udelay(10); - if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { - udelay(10); - break; - } - } - if (i == NVRAM_CMD_TIMEOUT) { - return -EBUSY; - } - return 0; -} - -static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) -{ - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) - - addr = ((addr / tp->nvram_pagesize) << - ATMEL_AT45DB0X1B_PAGE_POS) + - (addr % tp->nvram_pagesize); - - return addr; -} - -static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) -{ - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) - - addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * - tp->nvram_pagesize) + - (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); - - return addr; -} - -static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) -{ - int ret; - - if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) - return tg3_nvram_read_using_eeprom(tp, offset, val); - - offset = tg3_nvram_phys_addr(tp, offset); - - if (offset > NVRAM_ADDR_MSK) - return -EINVAL; - - ret = tg3_nvram_lock(tp); - if (ret) - return ret; - - tg3_enable_nvram_access(tp); - - tw32(NVRAM_ADDR, offset); - ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | - NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); - - if (ret == 0) - *val = swab32(tr32(NVRAM_RDDATA)); - - tg3_disable_nvram_access(tp); - - tg3_nvram_unlock(tp); - - return ret; -} - -static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val) -{ - u32 v; - int res = tg3_nvram_read(tp, offset, &v); - if (!res) - *val = cpu_to_le32(v); - return res; -} - -static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) -{ - int err; - u32 tmp; - - err = tg3_nvram_read(tp, offset, &tmp); - *val = swab32(tmp); - return err; -} - static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { @@ -10445,13 +11736,19 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, for (i = 0; i < len; i += 4) { u32 addr; - __le32 data; + __be32 data; addr = offset + i; memcpy(&data, buf + i, 4); - tw32(GRC_EEPROM_DATA, le32_to_cpu(data)); + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); val = tr32(GRC_EEPROM_ADDR); tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); @@ -10501,8 +11798,9 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, phy_addr = offset & ~pagemask; for (j = 0; j < pagesize; j += 4) { - if ((ret = tg3_nvram_read_le(tp, phy_addr + j, - (__le32 *) (tmp + j)))) + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) break; } if (ret) @@ -10549,7 +11847,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, __be32 data; data = *((__be32 *) (tmp + j)); - /* swab32(le32_to_cpu(data)), actually */ + tw32(NVRAM_WRDATA, be32_to_cpu(data)); tw32(NVRAM_ADDR, phy_addr + j); @@ -10606,11 +11904,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, if (i == (len - 4)) nvram_cmd |= NVRAM_CMD_LAST; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -10654,7 +11949,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) tg3_enable_nvram_access(tp); if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); @@ -10787,13 +12082,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if ((val & VCPU_CFGSHDW_WOL_ENABLE) && (val & VCPU_CFGSHDW_WOL_MAGPKT)) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - return; + goto done; } tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg, led_cfg; - u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; + u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; int eeprom_phy_serdes = 0; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); @@ -10807,6 +12102,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) (ver > 0) && (ver < 0x100)) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) eeprom_phy_serdes = 1; @@ -10879,7 +12177,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) LED_CTRL_MODE_PHY_2); break; - }; + } if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && @@ -10906,14 +12204,17 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; } - if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; + if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; - if (tp->tg3_flags & TG3_FLAG_WOL_CAP && - nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) + if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; if (cfg2 & (1 << 17)) @@ -10924,6 +12225,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (cfg2 & (1 << 18)) tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; + if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; + if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { u32 cfg3; @@ -10931,7 +12237,18 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; } + + if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE) + tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE; + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; } +done: + device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); + device_set_wakeup_enable(&tp->pdev->dev, + tp->tg3_flags & TG3_FLAG_WOL_ENABLE); } static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) @@ -10989,8 +12306,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) u32 hw_phy_id, hw_phy_id_masked; int err; + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + return tg3_phy_init(tp); + /* Reading the PHY ID register can conflict with ASF - * firwmare access to the PHY hardware. + * firmware access to the PHY hardware. */ err = 0; if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || @@ -11113,55 +12433,46 @@ skip_phy_reset: static void __devinit tg3_read_partno(struct tg3 *tp) { - unsigned char vpd_data[256]; + unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ unsigned int i; u32 magic; - if (tg3_nvram_read_swab(tp, 0x0, &magic)) + if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || + tg3_nvram_read(tp, 0x0, &magic)) goto out_not_found; if (magic == TG3_EEPROM_MAGIC) { - for (i = 0; i < 256; i += 4) { + for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { u32 tmp; - if (tg3_nvram_read(tp, 0x100 + i, &tmp)) + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) goto out_not_found; - vpd_data[i + 0] = ((tmp >> 0) & 0xff); - vpd_data[i + 1] = ((tmp >> 8) & 0xff); - vpd_data[i + 2] = ((tmp >> 16) & 0xff); - vpd_data[i + 3] = ((tmp >> 24) & 0xff); + memcpy(&vpd_data[i], &tmp, sizeof(tmp)); } } else { - int vpd_cap; - - vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); - for (i = 0; i < 256; i += 4) { - u32 tmp, j = 0; - __le32 v; - u16 tmp16; - - pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, - i); - while (j++ < 100) { - pci_read_config_word(tp->pdev, vpd_cap + - PCI_VPD_ADDR, &tmp16); - if (tmp16 & 0x8000) - break; - msleep(1); - } - if (!(tmp16 & 0x8000)) + ssize_t cnt; + unsigned int pos = 0, i = 0; + + for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + TG3_NVM_VPD_LEN - pos, + &vpd_data[pos]); + if (cnt == -ETIMEDOUT || -EINTR) + cnt = 0; + else if (cnt < 0) goto out_not_found; - - pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, - &tmp); - v = cpu_to_le32(tmp); - memcpy(&vpd_data[i], &v, 4); } + if (pos != TG3_NVM_VPD_LEN) + goto out_not_found; } /* Now parse and find the part number. */ - for (i = 0; i < 254; ) { + for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) { unsigned char val = vpd_data[i]; unsigned int block_end; @@ -11180,7 +12491,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp) (vpd_data[i + 2] << 8))); i += 3; - if (block_end > 256) + if (block_end > TG3_NVM_VPD_LEN) goto out_not_found; while (i < (block_end - 2)) { @@ -11189,7 +12500,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp) int partno_len = vpd_data[i + 2]; i += 3; - if (partno_len > 24 || (partno_len + i) > 256) + if (partno_len > TG3_BPN_SIZE || + (partno_len + i) > TG3_NVM_VPD_LEN) goto out_not_found; memcpy(tp->board_part_number, @@ -11208,6 +12520,20 @@ static void __devinit tg3_read_partno(struct tg3 *tp) out_not_found: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) strcpy(tp->board_part_number, "BCM95906"); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + strcpy(tp->board_part_number, "BCM57765"); else strcpy(tp->board_part_number, "none"); } @@ -11216,54 +12542,133 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) { u32 val; - if (tg3_nvram_read_swab(tp, offset, &val) || + if (tg3_nvram_read(tp, offset, &val) || (val & 0xfc000000) != 0x0c000000 || - tg3_nvram_read_swab(tp, offset + 4, &val) || + tg3_nvram_read(tp, offset + 4, &val) || val != 0) return 0; return 1; } -static void __devinit tg3_read_fw_ver(struct tg3 *tp) +static void __devinit tg3_read_bc_ver(struct tg3 *tp) +{ + u32 val, offset, start, ver_offset; + int i; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + if (newver) { + if (tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor); + } +} + +static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) +{ + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); +} + +static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) { - u32 val, offset, start; - u32 ver_offset; - int i, bcnt; + u32 offset, major, minor, build; + + tp->fw_ver[0] = 's'; + tp->fw_ver[1] = 'b'; + tp->fw_ver[2] = '\0'; - if (tg3_nvram_read_swab(tp, 0, &val)) + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) return; - if (val != TG3_EEPROM_MAGIC) + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + default: return; + } - if (tg3_nvram_read_swab(tp, 0xc, &offset) || - tg3_nvram_read_swab(tp, 0x4, &start)) + if (tg3_nvram_read(tp, offset, &val)) return; - offset = tg3_nvram_logical_addr(tp, offset); + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; - if (!tg3_fw_img_is_valid(tp, offset) || - tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) + if (minor > 99 || build > 26) return; - offset = offset + ver_offset - start; - for (i = 0; i < 16; i += 4) { - __le32 v; - if (tg3_nvram_read_le(tp, offset + i, &v)) - return; + snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor); - memcpy(tp->fw_ver + i, &v, 4); + if (build > 0) { + tp->fw_ver[8] = 'a' + build - 1; + tp->fw_ver[9] = '\0'; } +} - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) - return; +static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) +{ + u32 val, offset, start; + int i, vlen; for (offset = TG3_NVM_DIR_START; offset < TG3_NVM_DIR_END; offset += TG3_NVM_DIRENT_SIZE) { - if (tg3_nvram_read_swab(tp, offset, &val)) + if (tg3_nvram_read(tp, offset, &val)) return; if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) @@ -11275,37 +12680,96 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) start = 0x08000000; - else if (tg3_nvram_read_swab(tp, offset - 4, &start)) + else if (tg3_nvram_read(tp, offset - 4, &start)) return; - if (tg3_nvram_read_swab(tp, offset + 4, &offset) || + if (tg3_nvram_read(tp, offset + 4, &offset) || !tg3_fw_img_is_valid(tp, offset) || - tg3_nvram_read_swab(tp, offset + 8, &val)) + tg3_nvram_read(tp, offset + 8, &val)) return; offset += val - start; - bcnt = strlen(tp->fw_ver); + vlen = strlen(tp->fw_ver); - tp->fw_ver[bcnt++] = ','; - tp->fw_ver[bcnt++] = ' '; + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; for (i = 0; i < 4; i++) { - __le32 v; - if (tg3_nvram_read_le(tp, offset, &v)) + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) return; offset += sizeof(v); - if (bcnt > TG3_VER_SIZE - sizeof(v)) { - memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt); + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); break; } - memcpy(&tp->fw_ver[bcnt], &v, sizeof(v)); - bcnt += sizeof(v); + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } +} + +static void __devinit tg3_read_dash_ver(struct tg3 *tp) +{ + int vlen; + u32 apedata; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d", + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); +} + +static void __devinit tg3_read_fw_ver(struct tg3 *tp) +{ + u32 val; + + if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { + tp->fw_ver[0] = 's'; + tp->fw_ver[1] = 'b'; + tp->fw_ver[2] = '\0'; + + return; } + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + else + return; + + if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || + (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return; + + tg3_read_mgmtfw_ver(tp); + tp->fw_ver[TG3_VER_SIZE - 1] = 0; } @@ -11323,11 +12787,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) { }, }; u32 misc_ctrl_reg; - u32 cacheline_sz_reg; u32 pci_state_reg, grc_misc_cfg; u32 val; u16 pci_cmd; - int err, pcie_cap; + int err; /* Force memory write invalidate off. If we leave it on, * then on 5700_BX chips we have to enable a workaround. @@ -11354,9 +12817,26 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { u32 prod_id_asic_rev; - pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, - &prod_id_asic_rev); - tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK; + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN2_PRODID_ASICREV, + &prod_id_asic_rev); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN15_PRODID_ASICREV, + &prod_id_asic_rev); + else + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + + tp->pci_chip_rev_id = prod_id_asic_rev; } /* Wrong chip ID in 5752 A0. This code can be removed later @@ -11493,25 +12973,26 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); - pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ, - &cacheline_sz_reg); - - tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff; - tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff; - tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff; - tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff; - - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) tp->pdev_peer = tg3_find_peer(tp); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -11519,6 +13000,42 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) + tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; + else { + tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; + tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + tp->dev->features |= NETIF_F_IPV6_CSUM; + } + + /* Determine TSO capabilities */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; + else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; + else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + tp->irq_max = 1; + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || @@ -11528,47 +13045,71 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev_peer == tp->pdev)) tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; - } else { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5750 && - tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; + tp->irq_max = TG3_IRQ_MAX_VECS; } } - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) - tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; + else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { + tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; + tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; + + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) + tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); + if (tp->pcie_cap != 0) { + u16 lnkctl; - pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); - if (pcie_cap != 0) { tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; pcie_set_readrq(tp->pdev, 4096); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u16 lnkctl; - - pci_read_config_word(tp->pdev, - pcie_cap + PCI_EXP_LNKCTL, - &lnkctl); - if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) + pci_read_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKCTL, + &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; + } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + printk(KERN_ERR PFX "Cannot find PCI-X " + "capability, aborting.\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tp->tg3_flags |= TG3_FLAG_PCIX_MODE; } /* If we have an AMD 762 or VIA K8T800 chipset, write @@ -11581,42 +13122,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && tp->pci_lat_timer < 64) { tp->pci_lat_timer = 64; - - cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0); - cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8); - cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16); - cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24); - - pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ, - cacheline_sz_reg); - } - - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { - tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); - if (!tp->pcix_cap) { - printk(KERN_ERR PFX "Cannot find PCI-X " - "capability, aborting.\n"); - return -EIO; - } + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); } - pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, - &pci_state_reg); - - if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { - tp->tg3_flags |= TG3_FLAG_PCIX_MODE; + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; - /* If this is a 5700 BX chipset, and we are in PCI-X - * mode, enable register write workaround. + /* If we are in PCI-X mode, enable register write workaround. * * The workaround is to use indirect register accesses * for all chip writes not to mailbox registers. */ - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { u32 pm_reg; tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; @@ -11641,12 +13169,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } } - /* 5700 BX chips need to have their TX producer index mailboxes - * written twice to workaround a bug. - */ - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) - tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; - if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) @@ -11683,7 +13205,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->write32 = tg3_write_flush_reg32; } - if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; @@ -11740,16 +13261,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; - if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 || - tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5761_A1) - tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES; - } - /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). * GPIO1 driven high will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. @@ -11765,8 +13283,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } /* Force the chip into D0. */ err = tg3_set_power_state(tp, PCI_D0); @@ -11776,12 +13305,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) return err; } - /* 5700 B0 chips do not support checksumming correctly due - * to hardware bugs. - */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) - tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; - /* Derive initial jumbo mode from MTU assigned in * ether_setup() via the alloc_etherdev() call */ @@ -11799,12 +13322,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + /* A few boards don't want Ethernet@WireSpeed phy feature */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || + (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; @@ -11814,7 +13340,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || @@ -11824,7 +13355,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) + } else tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; } @@ -11835,8 +13366,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->phy_otp = TG3_OTP_DEFAULT; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; else tp->mi_mode = MAC_MI_MODE_BASE; @@ -11846,9 +13376,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; - /* Initialize MAC MI mode, polling disabled. */ - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; + + err = tg3_mdio_init(tp); + if (err) + return err; /* Initialize data/descriptor byte/word swapping. */ val = tr32(GRC_MODE); @@ -11909,6 +13443,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->misc_host_ctrl); } + /* Preserve the APE MAC_MODE bits */ + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + tp->mac_mode = tr32(MAC_MODE) | + MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = TG3_DEF_MAC_MODE; + /* these are limited to 10/100 only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || @@ -11921,7 +13462,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || + (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) tp->tg3_flags |= TG3_FLAG_10_100_ONLY; err = tg3_phy_probe(tp); @@ -11929,6 +13471,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", pci_name(tp->pdev), err); /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); } tg3_read_partno(tp); @@ -11969,19 +13512,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; - /* All chips before 5787 can get confused if TX buffers - * straddle the 4GB address boundary in some cases. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->dev->hard_start_xmit = tg3_start_xmit; - else - tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; - - tp->rx_offset = 2; + tp->rx_offset = NET_IP_ALIGN; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) tp->rx_offset = 0; @@ -12051,8 +13582,10 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); else tg3_nvram_unlock(tp); - } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) + mac_offset = 0xcc; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mac_offset = 0x10; /* First try to get it from MAC address mailbox. */ @@ -12072,14 +13605,11 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) } if (!addr_ok) { /* Next, try NVRAM. */ - if (!tg3_nvram_read(tp, mac_offset + 0, &hi) && - !tg3_nvram_read(tp, mac_offset + 4, &lo)) { - dev->dev_addr[0] = ((hi >> 16) & 0xff); - dev->dev_addr[1] = ((hi >> 24) & 0xff); - dev->dev_addr[2] = ((lo >> 0) & 0xff); - dev->dev_addr[3] = ((lo >> 8) & 0xff); - dev->dev_addr[4] = ((lo >> 16) & 0xff); - dev->dev_addr[5] = ((lo >> 24) & 0xff); + if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); } /* Finally just fetch it out of the MAC control regs. */ else { @@ -12139,6 +13669,12 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) #endif #endif + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + if (!goal) goto out; @@ -12178,7 +13714,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); break; - }; + } } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { switch (cacheline_size) { case 16: @@ -12195,7 +13731,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; break; - }; + } } else { switch (cacheline_size) { case 16: @@ -12239,7 +13775,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) val |= (DMA_RWCTRL_READ_BNDRY_1024 | DMA_RWCTRL_WRITE_BNDRY_1024); break; - }; + } } out: @@ -12333,7 +13869,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; - int ret; + int ret = 0; buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); if (!buf) { @@ -12346,6 +13882,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + goto out; + if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; @@ -12418,7 +13958,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tg3_switch_clocks(tp); #endif - ret = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) goto out; @@ -12536,7 +14075,9 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -12596,10 +14137,11 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5756: return "5722/5756"; case PHY_ID_BCM5906: return "5906"; case PHY_ID_BCM5761: return "5761"; + case PHY_ID_BCM5717: return "5717"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; - }; + } } static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) @@ -12697,18 +14239,54 @@ static void __devinit tg3_init_coal(struct tg3 *tp) } } +static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats = tg3_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, +#if TG3_VLAN_TAG_USED + .ndo_vlan_rx_register = tg3_vlan_rx_register, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + +static const struct net_device_ops tg3_netdev_ops_dma_bug = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit_dma_bug, + .ndo_get_stats = tg3_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, +#if TG3_VLAN_TAG_USED + .ndo_vlan_rx_register = tg3_vlan_rx_register, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int tg3_version_printed = 0; - resource_size_t tg3reg_base; - unsigned long tg3reg_len; struct net_device *dev; struct tg3 *tp; - int err, pm_cap; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; - DECLARE_MAC_BUF(mac); if (tg3_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -12720,13 +14298,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, return err; } - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - printk(KERN_ERR PFX "Cannot find proper PCI device " - "base address, aborting.\n"); - err = -ENODEV; - goto err_out_disable_pdev; - } - err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources, " @@ -12745,10 +14316,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } - tg3reg_base = pci_resource_start(pdev, 0); - tg3reg_len = pci_resource_len(pdev, 0); - - dev = alloc_etherdev(sizeof(*tp)); + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); err = -ENOMEM; @@ -12759,14 +14327,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_rx_register = tg3_vlan_rx_register; #endif tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; tp->pm_cap = pm_cap; - tp->mac_mode = TG3_DEF_MAC_MODE; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; @@ -12800,7 +14366,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task); - tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); + tp->regs = pci_ioremap_bar(pdev, BAR_0); if (!tp->regs) { printk(KERN_ERR PFX "Cannot map device registers, " "aborting.\n"); @@ -12812,23 +14378,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; - tp->tx_pending = TG3_DEF_TX_RING_PENDING; - - dev->open = tg3_open; - dev->stop = tg3_close; - dev->get_stats = tg3_get_stats; - dev->set_multicast_list = tg3_set_rx_mode; - dev->set_mac_address = tg3_set_mac_addr; - dev->do_ioctl = tg3_ioctl; - dev->tx_timeout = tg3_tx_timeout; - netif_napi_add(dev, &tp->napi, tg3_poll, 64); + dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; - dev->change_mtu = tg3_change_mtu; dev->irq = pdev->irq; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = tg3_poll_controller; -#endif err = tg3_get_invariants(tp); if (err) { @@ -12837,6 +14390,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_iounmap; } + if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + dev->netdev_ops = &tg3_netdev_ops; + else + dev->netdev_ops = &tg3_netdev_ops_dma_bug; + + /* The EPB bridge inside 5714, 5715, and 5780 and any * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. @@ -12844,17 +14404,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * do DMA address check in tg3_start_xmit(). */ if (tp->tg3_flags2 & TG3_FLG2_IS_5788) - persist_dma_mask = dma_mask = DMA_32BIT_MASK; + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { - persist_dma_mask = dma_mask = DMA_40BIT_MASK; + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); #ifdef CONFIG_HIGHMEM - dma_mask = DMA_64BIT_MASK; + dma_mask = DMA_BIT_MASK(64); #endif } else - persist_dma_mask = dma_mask = DMA_64BIT_MASK; + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); /* Configure DMA attributes. */ - if (dma_mask > DMA_32BIT_MASK) { + if (dma_mask > DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; @@ -12867,8 +14427,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } } } - if (err || dma_mask == DMA_32BIT_MASK) { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); @@ -12878,33 +14438,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + /* Selectively allow TSO based on operating conditions */ + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || + (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; + else { + tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); + tp->fw_needed = NULL; } - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || - tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; - } else { - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; - } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && + (dev->features & NETIF_F_IP_CSUM)) dev->features |= NETIF_F_TSO; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) + + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { + if (dev->features & NETIF_F_IPV6_CSUM) dev->features |= NETIF_F_TSO6; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) dev->features |= NETIF_F_TSO_ECN; } - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { @@ -12920,17 +14486,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - printk(KERN_ERR PFX "Cannot find proper PCI device " - "base address for APE, aborting.\n"); - err = -ENODEV; - goto err_out_iounmap; - } - - tg3reg_base = pci_resource_start(pdev, 2); - tg3reg_len = pci_resource_len(pdev, 2); - - tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { printk(KERN_ERR PFX "Cannot map APE registers, " "aborting.\n"); @@ -12939,6 +14495,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } tg3_ape_lock_init(tp); + + if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) + tg3_read_dash_ver(tp); } /* @@ -12958,24 +14517,56 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_apeunmap; } - /* Tigon3 can do ipv4 only... and some chips have buggy - * checksumming. - */ - if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - dev->features |= NETIF_F_IPV6_CSUM; - - tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; - } else - tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; - /* flow control autonegotiation is default behavior */ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; - tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i < 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) { + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); + } else { + tnapi->coal_now = HOSTCC_MODE_NOW; + netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); + } + + if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } tg3_init_coal(tp); @@ -12988,31 +14579,40 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_apeunmap; } - printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] " - "(%s) %s Ethernet %s\n", + printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", dev->name, tp->board_part_number, tp->pci_chip_rev_id, - tg3_phy_string(tp), tg3_bus_string(tp, str), - ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : - ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : - "10/100/1000Base-T")), - print_mac(mac, dev->dev_addr)); - - printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] " - "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n", + dev->dev_addr); + + if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + printk(KERN_INFO + "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + tp->dev->name, phydev->drv->name, + dev_name(&phydev->dev)); + } else + printk(KERN_INFO + "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", + tp->dev->name, tg3_phy_string(tp), + ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : + ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : + "10/100/1000Base-T")), + (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); + + printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", dev->name, (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, - (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", dev->name, tp->dma_rwctrl, - (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : - (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); + (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 : + (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64)); return 0; @@ -13047,7 +14647,16 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) if (dev) { struct tg3 *tp = netdev_priv(dev); + if (tp->fw) + release_firmware(tp->fw); + flush_scheduled_work(); + + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + unregister_netdev(dev); if (tp->aperegs) { iounmap(tp->aperegs); @@ -13068,6 +14677,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); + pci_power_t target_state; int err; /* PCI register 4 needs to be saved whether netif_running() or not. @@ -13080,6 +14690,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) return 0; flush_scheduled_work(); + tg3_phy_stop(tp); tg3_netif_stop(tp); del_timer_sync(&tp->timer); @@ -13095,12 +14706,17 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tg3_full_unlock(tp); - err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); + target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; + + err = tg3_set_power_state(tp, target_state); if (err) { + int err2; + tg3_full_lock(tp, 0); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - if (tg3_restart_hw(tp, 1)) + err2 = tg3_restart_hw(tp, 1); + if (err2) goto out; tp->timer.expires = jiffies + tp->timer_offset; @@ -13111,6 +14727,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) out: tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); } return err; @@ -13148,6 +14767,9 @@ static int tg3_resume(struct pci_dev *pdev) out: tg3_full_unlock(tp); + if (!err) + tg3_phy_start(tp); + return err; }