sky2: Avoid allocating memory in sky2_resume
[safe/jmp/linux-2.6] / drivers / net / tg3.c
index 460a0c2..573054a 100644 (file)
@@ -67,8 +67,8 @@
 #include "tg3.h"
 
 #define DRV_MODULE_NAME                "tg3"
-#define DRV_MODULE_VERSION     "3.109"
-#define DRV_MODULE_RELDATE     "April 2, 2010"
+#define DRV_MODULE_VERSION     "3.110"
+#define DRV_MODULE_RELDATE     "April 9, 2010"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
                                 TG3_TX_RING_SIZE)
 #define NEXT_TX(N)             (((N) + 1) & (TG3_TX_RING_SIZE - 1))
 
+#define TG3_RX_DMA_ALIGN               16
+#define TG3_RX_HEADROOM                        ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
+
 #define TG3_DMA_BYTE_ENAB              64
 
 #define TG3_RX_STD_DMA_SZ              1536
 
 #define TG3_RSS_MIN_NUM_MSIX_VECS      2
 
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode.  The driver
+ * works around this bug by double copying the packet.  This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient.  For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path.  Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD          256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+       #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
+#else
+       #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
+#endif
+
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
 
@@ -362,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
 
 static u32 tg3_read32(struct tg3 *tp, u32 off)
 {
-       return (readl(tp->regs + off));
+       return readl(tp->regs + off);
 }
 
 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -372,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
 
 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
 {
-       return (readl(tp->aperegs + off));
+       return readl(tp->aperegs + off);
 }
 
 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -490,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
 
 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
 {
-       return (readl(tp->regs + off + GRCMBOX_BASE));
+       return readl(tp->regs + off + GRCMBOX_BASE);
 }
 
 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -4379,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
                }
 
                pci_unmap_single(tp->pdev,
-                                pci_unmap_addr(ri, mapping),
+                                dma_unmap_addr(ri, mapping),
                                 skb_headlen(skb),
                                 PCI_DMA_TODEVICE);
 
@@ -4393,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
                                tx_bug = 1;
 
                        pci_unmap_page(tp->pdev,
-                                      pci_unmap_addr(ri, mapping),
+                                      dma_unmap_addr(ri, mapping),
                                       skb_shinfo(skb)->frags[i].size,
                                       PCI_DMA_TODEVICE);
                        sw_idx = NEXT_TX(sw_idx);
@@ -4431,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
        if (!ri->skb)
                return;
 
-       pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+       pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
                         map_sz, PCI_DMA_FROMDEVICE);
        dev_kfree_skb_any(ri->skb);
        ri->skb = NULL;
@@ -4497,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
        }
 
        map->skb = skb;
-       pci_unmap_addr_set(map, mapping, mapping);
+       dma_unmap_addr_set(map, mapping, mapping);
 
        desc->addr_hi = ((u64)mapping >> 32);
        desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4542,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
        }
 
        dest_map->skb = src_map->skb;
-       pci_unmap_addr_set(dest_map, mapping,
-                          pci_unmap_addr(src_map, mapping));
+       dma_unmap_addr_set(dest_map, mapping,
+                          dma_unmap_addr(src_map, mapping));
        dest_desc->addr_hi = src_desc->addr_hi;
        dest_desc->addr_lo = src_desc->addr_lo;
 
@@ -4606,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                struct sk_buff *skb;
                dma_addr_t dma_addr;
                u32 opaque_key, desc_idx, *post_ptr;
+               bool hw_vlan __maybe_unused = false;
+               u16 vtag __maybe_unused = 0;
 
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
                if (opaque_key == RXD_OPAQUE_RING_STD) {
                        ri = &tp->prodring[0].rx_std_buffers[desc_idx];
-                       dma_addr = pci_unmap_addr(ri, mapping);
+                       dma_addr = dma_unmap_addr(ri, mapping);
                        skb = ri->skb;
                        post_ptr = &std_prod_idx;
                        rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
                        ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
-                       dma_addr = pci_unmap_addr(ri, mapping);
+                       dma_addr = dma_unmap_addr(ri, mapping);
                        skb = ri->skb;
                        post_ptr = &jmb_prod_idx;
                } else
@@ -4639,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
                      ETH_FCS_LEN;
 
-               if (len > RX_COPY_THRESHOLD &&
-                   tp->rx_offset == NET_IP_ALIGN) {
-                   /* rx_offset will likely not equal NET_IP_ALIGN
-                    * if this is a 5701 card running in PCI-X mode
-                    * [see tg3_get_invariants()]
-                    */
+               if (len > TG3_RX_COPY_THRESH(tp)) {
                        int skb_size;
 
                        skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4669,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
 
-                       copy_skb = netdev_alloc_skb(tp->dev,
-                                                   len + TG3_RAW_IP_ALIGN);
+                       copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+                                                   TG3_RAW_IP_ALIGN);
                        if (copy_skb == NULL)
                                goto drop_it_no_recycle;
 
-                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
                        skb_put(copy_skb, len);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
                        skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4700,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        goto next_pkt;
                }
 
+               if (desc->type_flags & RXD_FLAG_VLAN &&
+                   !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+                       vtag = desc->err_vlan & RXD_VLAN_MASK;
 #if TG3_VLAN_TAG_USED
-               if (tp->vlgrp != NULL &&
-                   desc->type_flags & RXD_FLAG_VLAN) {
-                       vlan_gro_receive(&tnapi->napi, tp->vlgrp,
-                                        desc->err_vlan & RXD_VLAN_MASK, skb);
-               } else
+                       if (tp->vlgrp)
+                               hw_vlan = true;
+                       else
+#endif
+                       {
+                               struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+                                                   __skb_push(skb, VLAN_HLEN);
+
+                               memmove(ve, skb->data + VLAN_HLEN,
+                                       ETH_ALEN * 2);
+                               ve->h_vlan_proto = htons(ETH_P_8021Q);
+                               ve->h_vlan_TCI = htons(vtag);
+                       }
+               }
+
+#if TG3_VLAN_TAG_USED
+               if (hw_vlan)
+                       vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
+               else
 #endif
                        napi_gro_receive(&tnapi->napi, skb);
 
@@ -5439,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
                        len = skb_shinfo(skb)->frags[i-1].size;
 
                pci_unmap_single(tp->pdev,
-                                pci_unmap_addr(&tnapi->tx_buffers[entry],
+                                dma_unmap_addr(&tnapi->tx_buffers[entry],
                                                mapping),
                                 len, PCI_DMA_TODEVICE);
                if (i == 0) {
                        tnapi->tx_buffers[entry].skb = new_skb;
-                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                       dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
                                           new_addr);
                } else {
                        tnapi->tx_buffers[entry].skb = NULL;
@@ -5574,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
        }
 
        tnapi->tx_buffers[entry].skb = skb;
-       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+       dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
        if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
            !mss && skb->len > ETH_DATA_LEN)
@@ -5600,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
                                goto dma_error;
 
                        tnapi->tx_buffers[entry].skb = NULL;
-                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                       dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
                                           mapping);
 
                        tg3_set_txd(tnapi, entry, mapping, len,
@@ -5630,7 +5665,7 @@ dma_error:
        entry = tnapi->tx_prod;
        tnapi->tx_buffers[entry].skb = NULL;
        pci_unmap_single(tp->pdev,
-                        pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+                        dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
                         skb_headlen(skb),
                         PCI_DMA_TODEVICE);
        for (i = 0; i <= last; i++) {
@@ -5638,7 +5673,7 @@ dma_error:
                entry = NEXT_TX(entry);
 
                pci_unmap_page(tp->pdev,
-                              pci_unmap_addr(&tnapi->tx_buffers[entry],
+                              dma_unmap_addr(&tnapi->tx_buffers[entry],
                                              mapping),
                               frag->size, PCI_DMA_TODEVICE);
        }
@@ -5740,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                hdr_len = ip_tcp_len + tcp_opt_len;
                if (unlikely((ETH_HLEN + hdr_len) > 80) &&
                             (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
-                       return (tg3_tso_bug(tp, skb));
+                       return tg3_tso_bug(tp, skb);
 
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
@@ -5800,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
        }
 
        tnapi->tx_buffers[entry].skb = skb;
-       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+       dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
        would_hit_hwbug = 0;
 
@@ -5836,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                                               len, PCI_DMA_TODEVICE);
 
                        tnapi->tx_buffers[entry].skb = NULL;
-                       pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+                       dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
                                           mapping);
                        if (pci_dma_mapping_error(tp->pdev, mapping))
                                goto dma_error;
@@ -5901,7 +5936,7 @@ dma_error:
        entry = tnapi->tx_prod;
        tnapi->tx_buffers[entry].skb = NULL;
        pci_unmap_single(tp->pdev,
-                        pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+                        dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
                         skb_headlen(skb),
                         PCI_DMA_TODEVICE);
        for (i = 0; i <= last; i++) {
@@ -5909,7 +5944,7 @@ dma_error:
                entry = NEXT_TX(entry);
 
                pci_unmap_page(tp->pdev,
-                              pci_unmap_addr(&tnapi->tx_buffers[entry],
+                              dma_unmap_addr(&tnapi->tx_buffers[entry],
                                              mapping),
                               frag->size, PCI_DMA_TODEVICE);
        }
@@ -6194,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
                        }
 
                        pci_unmap_single(tp->pdev,
-                                        pci_unmap_addr(txp, mapping),
+                                        dma_unmap_addr(txp, mapping),
                                         skb_headlen(skb),
                                         PCI_DMA_TODEVICE);
                        txp->skb = NULL;
@@ -6204,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
                        for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
                                txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
                                pci_unmap_page(tp->pdev,
-                                              pci_unmap_addr(txp, mapping),
+                                              dma_unmap_addr(txp, mapping),
                                               skb_shinfo(skb)->frags[k].size,
                                               PCI_DMA_TODEVICE);
                                i++;
@@ -7642,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(GRC_MODE, grc_mode);
        }
 
+       if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+               u32 grc_mode = tr32(GRC_MODE);
+
+               /* Access the lower 1K of PL PCIE block registers. */
+               val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+               tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+               val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
+               tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+                    val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+               tw32(GRC_MODE, grc_mode);
+
+               val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+               val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+               val |= CPMU_LSPD_10MB_MACCLK_6_25;
+               tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+       }
+
        /* This works around an issue with Athlon chipsets on
         * B3 tigon3 silicon.  This bit has no effect on any
         * other revision.  But do not set this on PCI Express
@@ -7690,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
                val = tr32(TG3PCI_DMA_RW_CTRL) &
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+               if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+                       val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7849,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                        val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
-                             (RX_STD_MAX_SIZE << 2);
+                             (TG3_RX_STD_DMA_SZ << 2);
                else
-                       val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+                       val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
        } else
                val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
 
@@ -8644,6 +8700,7 @@ static int tg3_test_msi(struct tg3 *tp)
        pci_disable_msi(tp->pdev);
 
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+       tp->napi[0].irq_vec = tp->pdev->irq;
 
        err = tg3_request_irq(tp, 0);
        if (err)
@@ -9229,10 +9286,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
                rx_mode |= RX_MODE_PROMISC;
        } else if (dev->flags & IFF_ALLMULTI) {
                /* Accept all multicast. */
-               tg3_set_multi (tp, 1);
+               tg3_set_multi(tp, 1);
        } else if (netdev_mc_empty(dev)) {
                /* Reject all multicast. */
-               tg3_set_multi (tp, 0);
+               tg3_set_multi(tp, 0);
        } else {
                /* Accept one or more multicast(s). */
                struct netdev_hw_addr *ha;
@@ -9974,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
        return 0;
 }
 
-static int tg3_get_sset_count (struct net_device *dev, int sset)
+static int tg3_get_sset_count(struct net_device *dev, int sset)
 {
        switch (sset) {
        case ETH_SS_TEST:
@@ -9986,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
        }
 }
 
-static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
        case ETH_SS_STATS:
@@ -10033,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
        return 0;
 }
 
-static void tg3_get_ethtool_stats (struct net_device *dev,
+static void tg3_get_ethtool_stats(struct net_device *dev,
                                   struct ethtool_stats *estats, u64 *tmp_stats)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -10686,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 
        rx_skb = tpr->rx_std_buffers[desc_idx].skb;
 
-       map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
+       map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
        pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
 
        for (i = 14; i < tx_len; i++) {
@@ -12937,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
                if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
                        tp->dev->features |= NETIF_F_IPV6_CSUM;
+               tp->dev->features |= NETIF_F_GRO;
        }
 
        /* Determine TSO capabilities */
@@ -13447,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        else
                tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-       tp->rx_offset = NET_IP_ALIGN;
+       tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+       tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
-           (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
-               tp->rx_offset = 0;
+           (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+               tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+               tp->rx_copy_thresh = ~(u16)0;
+#endif
+       }
 
        tp->rx_std_max_post = TG3_RX_RING_SIZE;