prism54: convert to internal net_device_stats
[safe/jmp/linux-2.6] / drivers / net / chelsio / sge.c
index 89a6827..58f6fc0 100644 (file)
@@ -330,6 +330,8 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
        return max_avail_segs * (p->mtu - 40);
 }
 
+#if 0
+
 /*
  * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
  * data that can be pushed per port.
@@ -357,6 +359,8 @@ void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
        t1_sched_update_parms(sge, port, 0, 0);
 }
 
+#endif  /*  0  */
+
 
 /*
  * get_clock() implements a ns clock (see ktime_get)
@@ -986,11 +990,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port,
        for_each_possible_cpu(cpu) {
                struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
 
-               ss->rx_packets += st->rx_packets;
                ss->rx_cso_good += st->rx_cso_good;
-               ss->tx_packets += st->tx_packets;
                ss->tx_cso += st->tx_cso;
                ss->tx_tso += st->tx_tso;
+               ss->tx_need_hdrroom += st->tx_need_hdrroom;
                ss->vlan_xtract += st->vlan_xtract;
                ss->vlan_insert += st->vlan_insert;
        }
@@ -1032,10 +1035,6 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  *     @pdev: the PCI device that received the packet
  *     @fl: the SGE free list holding the packet
  *     @len: the actual packet length, excluding any SGE padding
- *     @dma_pad: padding at beginning of buffer left by SGE DMA
- *     @skb_pad: padding to be used if the packet is copied
- *     @copy_thres: length threshold under which a packet should be copied
- *     @drop_thres: # of remaining buffers before we start dropping packets
  *
  *     Get the next packet from a free list and complete setup of the
  *     sk_buff.  If the packet is small we make a copy and recycle the
@@ -1062,7 +1061,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
                                            pci_unmap_addr(ce, dma_addr),
                                            pci_unmap_len(ce, dma_len),
                                            PCI_DMA_FROMDEVICE);
-               memcpy(skb->data, ce->skb->data, len);
+               skb_copy_from_linear_data(ce->skb, skb->data, len);
                pci_dma_sync_single_for_device(pdev,
                                               pci_unmap_addr(ce, dma_addr),
                                               pci_unmap_len(ce, dma_len),
@@ -1379,12 +1378,9 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
        }
        __skb_pull(skb, sizeof(*p));
 
-       skb->dev = adapter->port[p->iff].dev;
-       skb->dev->last_rx = jiffies;
        st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
-       st->rx_packets++;
 
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
        if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
            skb->protocol == htons(ETH_P_IP) &&
            (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
@@ -1395,20 +1391,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
 
        if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
                st->vlan_xtract++;
-#ifdef CONFIG_CHELSIO_T1_NAPI
-                       vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
-                                                ntohs(p->vlan));
-#else
-                       vlan_hwaccel_rx(skb, adapter->vlan_grp,
-                                       ntohs(p->vlan));
-#endif
-       } else {
-#ifdef CONFIG_CHELSIO_T1_NAPI
+               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+                                        ntohs(p->vlan));
+       } else
                netif_receive_skb(skb);
-#else
-               netif_rx(skb);
-#endif
-       }
 }
 
 /*
@@ -1567,7 +1553,6 @@ static inline int responses_pending(const struct adapter *adapter)
        return (e->GenerationBit == Q->genbit);
 }
 
-#ifdef CONFIG_CHELSIO_T1_NAPI
 /*
  * A simpler version of process_responses() that handles only pure (i.e.,
  * non data-carrying) responses.  Such respones are too light-weight to justify
@@ -1621,28 +1606,19 @@ static int process_pure_responses(struct adapter *adapter)
  * or protection from interrupts as data interrupts are off at this point and
  * other adapter interrupts do not interfere.
  */
-int t1_poll(struct net_device *dev, int *budget)
+int t1_poll(struct napi_struct *napi, int budget)
 {
-       struct adapter *adapter = dev->priv;
-       int work_done;
-
-       work_done = process_responses(adapter, min(*budget, dev->quota));
-       *budget -= work_done;
-       dev->quota -= work_done;
-
-       if (unlikely(responses_pending(adapter)))
-               return 1;
-
-       netif_rx_complete(dev);
-       writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
-
-       return 0;
+       struct adapter *adapter = container_of(napi, struct adapter, napi);
+       int work_done = process_responses(adapter, budget);
 
+       if (likely(work_done < budget)) {
+               napi_complete(napi);
+               writel(adapter->sge->respQ.cidx,
+                      adapter->regs + A_SG_SLEEPING);
+       }
+       return work_done;
 }
 
-/*
- * NAPI version of the main interrupt handler.
- */
 irqreturn_t t1_interrupt(int irq, void *data)
 {
        struct adapter *adapter = data;
@@ -1650,17 +1626,16 @@ irqreturn_t t1_interrupt(int irq, void *data)
        int handled;
 
        if (likely(responses_pending(adapter))) {
-               struct net_device *dev = sge->netdev;
-
                writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
 
-               if (__netif_rx_schedule_prep(dev)) {
+               if (napi_schedule_prep(&adapter->napi)) {
                        if (process_pure_responses(adapter))
-                               __netif_rx_schedule(dev);
+                               __napi_schedule(&adapter->napi);
                        else {
                                /* no data, no NAPI needed */
                                writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
-                               netif_poll_enable(dev); /* undo schedule_prep */
+                               /* undo schedule_prep */
+                               napi_enable(&adapter->napi);
                        }
                }
                return IRQ_HANDLED;
@@ -1676,52 +1651,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
        return IRQ_RETVAL(handled != 0);
 }
 
-#else
-/*
- * Main interrupt handler, optimized assuming that we took a 'DATA'
- * interrupt.
- *
- * 1. Clear the interrupt
- * 2. Loop while we find valid descriptors and process them; accumulate
- *      information that can be processed after the loop
- * 3. Tell the SGE at which index we stopped processing descriptors
- * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
- *      outstanding TX buffers waiting, replenish RX buffers, potentially
- *      reenable upper layers if they were turned off due to lack of TX
- *      resources which are available again.
- * 5. If we took an interrupt, but no valid respQ descriptors was found we
- *      let the slow_intr_handler run and do error handling.
- */
-irqreturn_t t1_interrupt(int irq, void *cookie)
-{
-       int work_done;
-       struct adapter *adapter = cookie;
-
-       spin_lock(&adapter->async_lock);
-
-       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
-
-       if (likely(responses_pending(adapter)))
-               work_done = process_responses(adapter, -1);
-       else
-               work_done = t1_slow_intr_handler(adapter);
-
-       /*
-        * The unconditional clearing of the PL_CAUSE above may have raced
-        * with DMA completion and the corresponding generation of a response
-        * to cause us to miss the resulting data interrupt.  The next write
-        * is also unconditional to recover the missed interrupt and render
-        * this race harmless.
-        */
-       writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
-
-       if (!work_done)
-               adapter->sge->stats.unhandled_irqs++;
-       spin_unlock(&adapter->async_lock);
-       return IRQ_RETVAL(work_done != 0);
-}
-#endif
-
 /*
  * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
  *
@@ -1849,9 +1778,10 @@ static inline int eth_hdr_len(const void *data)
  */
 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct adapter *adapter = dev->priv;
+       struct adapter *adapter = dev->ml_priv;
        struct sge *sge = adapter->sge;
-       struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
+       struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
+                                               smp_processor_id());
        struct cpl_tx_pkt *cpl;
        struct sk_buff *orig_skb = skb;
        int ret;
@@ -1859,20 +1789,32 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (skb->protocol == htons(ETH_P_CPL5))
                goto send;
 
+       /*
+        * We are using a non-standard hard_header_len.
+        * Allocate more header room in the rare cases it is not big enough.
+        */
+       if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+               skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
+               ++st->tx_need_hdrroom;
+               dev_kfree_skb_any(orig_skb);
+               if (!skb)
+                       return NETDEV_TX_OK;
+       }
+
        if (skb_shinfo(skb)->gso_size) {
                int eth_type;
                struct cpl_tx_pkt_lso *hdr;
 
                ++st->tx_tso;
 
-               eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+               eth_type = skb_network_offset(skb) == ETH_HLEN ?
                        CPL_ETH_II : CPL_ETH_II_VLAN;
 
                hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
                hdr->opcode = CPL_TX_PKT_LSO;
                hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
-               hdr->ip_hdr_words = skb->nh.iph->ihl;
-               hdr->tcp_hdr_words = skb->h.th->doff;
+               hdr->ip_hdr_words = ip_hdr(skb)->ihl;
+               hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
                hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
                                                          skb_shinfo(skb)->gso_size));
                hdr->len = htonl(skb->len - sizeof(*hdr));
@@ -1892,27 +1834,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               /*
-                * We are using a non-standard hard_header_len and some kernel
-                * components, such as pktgen, do not handle it right.
-                * Complain when this happens but try to fix things up.
-                */
-               if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
-                       pr_debug("%s: headroom %d header_len %d\n", dev->name,
-                                skb_headroom(skb), dev->hard_header_len);
-
-                       if (net_ratelimit())
-                               printk(KERN_ERR "%s: inadequate headroom in "
-                                      "Tx packet\n", dev->name);
-                       skb = skb_realloc_headroom(skb, sizeof(*cpl));
-                       dev_kfree_skb_any(orig_skb);
-                       if (!skb)
-                               return NETDEV_TX_OK;
-               }
-
                if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
                    skb->ip_summed == CHECKSUM_PARTIAL &&
-                   skb->nh.iph->protocol == IPPROTO_UDP) {
+                   ip_hdr(skb)->protocol == IPPROTO_UDP) {
                        if (unlikely(skb_checksum_help(skb))) {
                                pr_debug("%s: unable to do udp checksum\n", dev->name);
                                dev_kfree_skb_any(skb);
@@ -1925,7 +1849,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 */
                if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
                        if (skb->protocol == htons(ETH_P_ARP) &&
-                           skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+                           arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
                                adapter->sge->espibug_skb[dev->if_port] = skb;
                                /* We want to re-use this skb later. We
                                 * simply bump the reference count and it
@@ -1955,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                cpl->vlan_valid = 0;
 
 send:
-       st->tx_packets++;
        dev->trans_start = jiffies;
        ret = t1_sge_tx(skb, adapter, 0, dev);
 
@@ -2044,8 +1967,7 @@ void t1_sge_stop(struct sge *sge)
                tx_sched_stop(sge);
 
        for (i = 0; i < MAX_NPORTS; i++)
-               if (sge->espibug_skb[i])
-                       kfree_skb(sge->espibug_skb[i]);
+               kfree_skb(sge->espibug_skb[i]);
 }
 
 /*
@@ -2095,10 +2017,14 @@ static void espibug_workaround_t204(unsigned long data)
                                        0x0, 0x7, 0x43, 0x0, 0x0, 0x0
                                };
 
-                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-                                       ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data + skb->len - 10,
-                                       ch_mac_addr, ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                   sizeof(struct cpl_tx_pkt),
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                              skb->len - 10,
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
                                skb->cb[0] = 0xff;
                        }
 
@@ -2125,10 +2051,14 @@ static void espibug_workaround(unsigned long data)
                        if (!skb->cb[0]) {
                                u8 ch_mac_addr[ETH_ALEN] =
                                    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-                                      ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
-                                      ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                    sizeof(struct cpl_tx_pkt),
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                              skb->len - 10,
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
                                skb->cb[0] = 0xff;
                        }