Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[safe/jmp/linux-2.6] / drivers / net / via-velocity.c
index 47b2882..c93f58f 100644 (file)
@@ -9,7 +9,6 @@
  *
  * TODO
  *     rx_copybreak/alignment
- *     Scatter gather
  *     More testing
  *
  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
@@ -275,7 +274,7 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 
 #define DMA_LENGTH_MIN  0
 #define DMA_LENGTH_MAX  7
-#define DMA_LENGTH_DEF  0
+#define DMA_LENGTH_DEF  6
 
 /* DMA_length[] is used for controlling the DMA length
    0: 8 DWORDs
@@ -298,14 +297,6 @@ VELOCITY_PARAM(DMA_length, "DMA length");
 */
 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 
-#define TX_CSUM_DEF     1
-/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
-   (We only support RX checksum offload now)
-   0: disable csum_offload[checksum offload
-   1: enable checksum offload. (Default)
-*/
-VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
-
 #define FLOW_CNTL_DEF   1
 #define FLOW_CNTL_MIN   1
 #define FLOW_CNTL_MAX   5
@@ -491,7 +482,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 
-       velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
        velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
@@ -905,8 +895,8 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 
        /*
           Check if new status is consisent with current status
-          if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
-          || (mii_status==curr_status)) {
+          if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
+              (mii_status==curr_status)) {
           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
           vptr->mii_status=check_connection_type(vptr->mac_regs);
           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
@@ -1142,8 +1132,8 @@ static void velocity_set_multi(struct net_device *dev)
                writel(0xffffffff, &regs->MARCAM[0]);
                writel(0xffffffff, &regs->MARCAM[4]);
                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
-       } else if ((dev->mc_count > vptr->multicast_limit)
-                  || (dev->flags & IFF_ALLMULTI)) {
+       } else if ((dev->mc_count > vptr->multicast_limit) ||
+                  (dev->flags & IFF_ALLMULTI)) {
                writel(0xffffffff, &regs->MARCAM[0]);
                writel(0xffffffff, &regs->MARCAM[4]);
                rx_mode = (RCR_AM | RCR_AB);
@@ -1643,12 +1633,10 @@ out:
  */
 static int velocity_init_td_ring(struct velocity_info *vptr)
 {
-       dma_addr_t curr;
        int j;
 
        /* Init the TD ring entries */
        for (j = 0; j < vptr->tx.numq; j++) {
-               curr = vptr->tx.pool_dma[j];
 
                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
                                            sizeof(struct velocity_td_info),
@@ -1714,21 +1702,27 @@ err_free_dma_rings_0:
  *     Release an transmit buffer. If the buffer was preallocated then
  *     recycle it, if not then unmap the buffer.
  */
-static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
+static void velocity_free_tx_buf(struct velocity_info *vptr,
+               struct velocity_td_info *tdinfo, struct tx_desc *td)
 {
        struct sk_buff *skb = tdinfo->skb;
-       int i;
-       int pktlen;
 
        /*
         *      Don't unmap the pre-allocated tx_bufs
         */
        if (tdinfo->skb_dma) {
+               int i;
 
-               pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
                for (i = 0; i < tdinfo->nskb_dma; i++) {
-                       pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
-                       tdinfo->skb_dma[i] = 0;
+                       size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+
+                       /* For scatter-gather */
+                       if (skb_shinfo(skb)->nr_frags > 0)
+                               pktlen = max_t(size_t, pktlen,
+                                               td->td_buf[i].size & ~TD_QUEUE);
+
+                       pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
+                                       le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
                }
        }
        dev_kfree_skb_irq(skb);
@@ -1930,7 +1924,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
                                stats->tx_packets++;
                                stats->tx_bytes += tdinfo->skb->len;
                        }
-                       velocity_free_tx_buf(vptr, tdinfo);
+                       velocity_free_tx_buf(vptr, tdinfo, td);
                        vptr->tx.used[qnum]--;
                }
                vptr->tx.tail[qnum] = idx;
@@ -1942,8 +1936,8 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
         *      Look to see if we should kick the transmit network
         *      layer for more work.
         */
-       if (netif_queue_stopped(vptr->dev) && (full == 0)
-           && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
+       if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+           (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
                netif_wake_queue(vptr->dev);
        }
        return works;
@@ -2243,8 +2237,6 @@ static int velocity_open(struct net_device *dev)
        /* Ensure chip is running */
        pci_set_power_state(vptr->pdev, PCI_D0);
 
-       velocity_give_many_rx_descs(vptr);
-
        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
 
        ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2256,6 +2248,8 @@ static int velocity_open(struct net_device *dev)
                goto out;
        }
 
+       velocity_give_many_rx_descs(vptr);
+
        mac_enable_int(vptr->mac_regs);
        netif_start_queue(dev);
        napi_enable(&vptr->napi);
@@ -2345,10 +2339,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
 
                dev->mtu = new_mtu;
 
-               velocity_give_many_rx_descs(vptr);
-
                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
 
+               velocity_give_many_rx_descs(vptr);
+
                mac_enable_int(vptr->mac_regs);
                netif_start_queue(dev);
 
@@ -2529,14 +2523,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
        struct velocity_td_info *tdinfo;
        unsigned long flags;
        int pktlen;
-       __le16 len;
-       int index;
+       int index, prev;
+       int i = 0;
 
        if (skb_padto(skb, ETH_ZLEN))
                goto out;
-       pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
 
-       len = cpu_to_le16(pktlen);
+       /* The hardware can handle at most 7 memory segments, so merge
+        * the skb if there are more */
+       if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       pktlen = skb_shinfo(skb)->nr_frags == 0 ?
+                       max_t(unsigned int, skb->len, ETH_ZLEN) :
+                               skb_headlen(skb);
 
        spin_lock_irqsave(&vptr->lock, flags);
 
@@ -2553,11 +2555,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
         */
        tdinfo->skb = skb;
        tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
-       td_ptr->tdesc0.len = len;
+       td_ptr->tdesc0.len = cpu_to_le16(pktlen);
        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
        td_ptr->td_buf[0].pa_high = 0;
-       td_ptr->td_buf[0].size = len;
-       tdinfo->nskb_dma = 1;
+       td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
+
+       /* Handle fragments */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
+                               frag->page_offset, frag->size,
+                               PCI_DMA_TODEVICE);
+
+               td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+               td_ptr->td_buf[i + 1].pa_high = 0;
+               td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+       }
+       tdinfo->nskb_dma = i + 1;
 
        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
@@ -2569,8 +2584,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
        /*
         *      Handle hardware checksum
         */
-       if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
-                                && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+       if ((dev->features & NETIF_F_IP_CSUM) &&
+           (skb->ip_summed == CHECKSUM_PARTIAL)) {
                const struct iphdr *ip = ip_hdr(skb);
                if (ip->protocol == IPPROTO_TCP)
                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2578,23 +2593,21 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
                td_ptr->tdesc1.TCR |= TCR0_IPCK;
        }
-       {
 
-               int prev = index - 1;
+       prev = index - 1;
+       if (prev < 0)
+               prev = vptr->options.numtx - 1;
+       td_ptr->tdesc0.len |= OWNED_BY_NIC;
+       vptr->tx.used[qnum]++;
+       vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
 
-               if (prev < 0)
-                       prev = vptr->options.numtx - 1;
-               td_ptr->tdesc0.len |= OWNED_BY_NIC;
-               vptr->tx.used[qnum]++;
-               vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
+       if (AVAIL_TD(vptr, qnum) < 1)
+               netif_stop_queue(dev);
 
-               if (AVAIL_TD(vptr, qnum) < 1)
-                       netif_stop_queue(dev);
+       td_ptr = &(vptr->tx.rings[qnum][prev]);
+       td_ptr->td_buf[0].size |= TD_QUEUE;
+       mac_tx_queue_wake(vptr->mac_regs, qnum);
 
-               td_ptr = &(vptr->tx.rings[qnum][prev]);
-               td_ptr->td_buf[0].size |= TD_QUEUE;
-               mac_tx_queue_wake(vptr->mac_regs, qnum);
-       }
        dev->trans_start = jiffies;
        spin_unlock_irqrestore(&vptr->lock, flags);
 out:
@@ -2816,10 +2829,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
 
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
-               NETIF_F_HW_VLAN_RX;
-
-       if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
-               dev->features |= NETIF_F_IP_CSUM;
+               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
 
        ret = register_netdev(dev);
        if (ret < 0)
@@ -3370,10 +3380,13 @@ static const struct ethtool_ops velocity_ethtool_ops = {
        .get_settings   =       velocity_get_settings,
        .set_settings   =       velocity_set_settings,
        .get_drvinfo    =       velocity_get_drvinfo,
+       .set_tx_csum    =       ethtool_op_set_tx_csum,
+       .get_tx_csum    =       ethtool_op_get_tx_csum,
        .get_wol        =       velocity_ethtool_get_wol,
        .set_wol        =       velocity_ethtool_set_wol,
        .get_msglevel   =       velocity_get_msglevel,
        .set_msglevel   =       velocity_set_msglevel,
+       .set_sg         =       ethtool_op_set_sg,
        .get_link       =       velocity_get_link,
        .get_coalesce   =       velocity_get_coalesce,
        .set_coalesce   =       velocity_set_coalesce,