mac80211: free up 2 bytes in skb->cb
[safe/jmp/linux-2.6] / drivers / net / 8139cp.c
index abb2a34..85fa40a 100644 (file)
@@ -78,7 +78,7 @@
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define CP_VLAN_TAG_USED 1
 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
-       do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
+       do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
 #else
 #define CP_VLAN_TAG_USED 0
 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
@@ -127,7 +127,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu
          (CP)->tx_tail - (CP)->tx_head - 1)
 
 #define PKT_BUF_SZ             1536    /* Size of each temporary Rx buffer.*/
-#define RX_OFFSET              2
 #define CP_INTERNAL_PHY                32
 
 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
@@ -304,7 +303,7 @@ static const unsigned int cp_rx_config =
 
 struct cp_desc {
        __le32          opts1;
-       u32             opts2;
+       __le32          opts2;
        __le64          addr;
 };
 
@@ -340,7 +339,6 @@ struct cp_private {
        u32                     rx_config;
        u16                     cpcmd;
 
-       struct net_device_stats net_stats;
        struct cp_extra_stats   cp_stats;
 
        unsigned                rx_head         ____cacheline_aligned;
@@ -457,14 +455,14 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 {
        skb->protocol = eth_type_trans (skb, cp->dev);
 
-       cp->net_stats.rx_packets++;
-       cp->net_stats.rx_bytes += skb->len;
+       cp->dev->stats.rx_packets++;
+       cp->dev->stats.rx_bytes += skb->len;
        cp->dev->last_rx = jiffies;
 
 #if CP_VLAN_TAG_USED
-       if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
+       if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
                vlan_hwaccel_receive_skb(skb, cp->vlgrp,
-                                        be16_to_cpu(desc->opts2 & 0xffff));
+                                        swab16(le32_to_cpu(desc->opts2) & 0xffff));
        } else
 #endif
                netif_receive_skb(skb);
@@ -477,17 +475,17 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
                printk (KERN_DEBUG
                        "%s: rx err, slot %d status 0x%x len %d\n",
                        cp->dev->name, rx_tail, status, len);
-       cp->net_stats.rx_errors++;
+       cp->dev->stats.rx_errors++;
        if (status & RxErrFrame)
-               cp->net_stats.rx_frame_errors++;
+               cp->dev->stats.rx_frame_errors++;
        if (status & RxErrCRC)
-               cp->net_stats.rx_crc_errors++;
+               cp->dev->stats.rx_crc_errors++;
        if ((status & RxErrRunt) || (status & RxErrLong))
-               cp->net_stats.rx_length_errors++;
+               cp->dev->stats.rx_length_errors++;
        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
-               cp->net_stats.rx_length_errors++;
+               cp->dev->stats.rx_length_errors++;
        if (status & RxErrFIFO)
-               cp->net_stats.rx_fifo_errors++;
+               cp->dev->stats.rx_fifo_errors++;
 }
 
 static inline unsigned int cp_rx_csum_ok (u32 status)
@@ -539,7 +537,7 @@ rx_status_loop:
                         * that RX fragments are never encountered
                         */
                        cp_rx_err_acct(cp, rx_tail, status, len);
-                       cp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        cp->cp_stats.rx_frags++;
                        goto rx_next;
                }
@@ -553,14 +551,14 @@ rx_status_loop:
                        printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
                               dev->name, rx_tail, status, len);
 
-               buflen = cp->rx_buf_sz + RX_OFFSET;
-               new_skb = dev_alloc_skb (buflen);
+               buflen = cp->rx_buf_sz + NET_IP_ALIGN;
+               new_skb = netdev_alloc_skb(dev, buflen);
                if (!new_skb) {
-                       cp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        goto rx_next;
                }
 
-               skb_reserve(new_skb, RX_OFFSET);
+               skb_reserve(new_skb, NET_IP_ALIGN);
 
                dma_unmap_single(&cp->pdev->dev, mapping,
                                 buflen, PCI_DMA_FROMDEVICE);
@@ -710,20 +708,20 @@ static void cp_tx (struct cp_private *cp)
                                if (netif_msg_tx_err(cp))
                                        printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
                                               cp->dev->name, status);
-                               cp->net_stats.tx_errors++;
+                               cp->dev->stats.tx_errors++;
                                if (status & TxOWC)
-                                       cp->net_stats.tx_window_errors++;
+                                       cp->dev->stats.tx_window_errors++;
                                if (status & TxMaxCol)
-                                       cp->net_stats.tx_aborted_errors++;
+                                       cp->dev->stats.tx_aborted_errors++;
                                if (status & TxLinkFail)
-                                       cp->net_stats.tx_carrier_errors++;
+                                       cp->dev->stats.tx_carrier_errors++;
                                if (status & TxFIFOUnder)
-                                       cp->net_stats.tx_fifo_errors++;
+                                       cp->dev->stats.tx_fifo_errors++;
                        } else {
-                               cp->net_stats.collisions +=
+                               cp->dev->stats.collisions +=
                                        ((status >> TxColCntShift) & TxColCntMask);
-                               cp->net_stats.tx_packets++;
-                               cp->net_stats.tx_bytes += skb->len;
+                               cp->dev->stats.tx_packets++;
+                               cp->dev->stats.tx_bytes += skb->len;
                                if (netif_msg_tx_done(cp))
                                        printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
                        }
@@ -765,7 +763,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
 
 #if CP_VLAN_TAG_USED
        if (cp->vlgrp && vlan_tx_tag_present(skb))
-               vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
+               vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
 #endif
 
        entry = cp->tx_head;
@@ -956,7 +954,7 @@ static void cp_set_rx_mode (struct net_device *dev)
 static void __cp_get_stats(struct cp_private *cp)
 {
        /* only lower 24 bits valid; write any value to clear */
-       cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
+       cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
        cpw32 (RxMissed, 0);
 }
 
@@ -971,7 +969,7 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev)
                __cp_get_stats(cp);
        spin_unlock_irqrestore(&cp->lock, flags);
 
-       return &cp->net_stats;
+       return &dev->stats;
 }
 
 static void cp_stop_hw (struct cp_private *cp)
@@ -1052,19 +1050,20 @@ static void cp_init_hw (struct cp_private *cp)
        cpw8_f(Cfg9346, Cfg9346_Lock);
 }
 
-static int cp_refill_rx (struct cp_private *cp)
+static int cp_refill_rx(struct cp_private *cp)
 {
+       struct net_device *dev = cp->dev;
        unsigned i;
 
        for (i = 0; i < CP_RX_RING_SIZE; i++) {
                struct sk_buff *skb;
                dma_addr_t mapping;
 
-               skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
+               skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
                if (!skb)
                        goto err_out;
 
-               skb_reserve(skb, RX_OFFSET);
+               skb_reserve(skb, NET_IP_ALIGN);
 
                mapping = dma_map_single(&cp->pdev->dev, skb->data,
                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1142,7 +1141,7 @@ static void cp_clean_rings (struct cp_private *cp)
                                         PCI_DMA_TODEVICE);
                        if (le32_to_cpu(desc->opts1) & LastFrag)
                                dev_kfree_skb(skb);
-                       cp->net_stats.tx_dropped++;
+                       cp->dev->stats.tx_dropped++;
                }
        }
 
@@ -1214,7 +1213,6 @@ static int cp_close (struct net_device *dev)
 
        spin_unlock_irqrestore(&cp->lock, flags);
 
-       synchronize_irq(dev->irq);
        free_irq(dev->irq, dev);
 
        cp_free_rings(cp);