ixgbe: add clean rx many routine
[safe/jmp/linux-2.6] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2007 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/in.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43
44 #include "ixgbe.h"
45 #include "ixgbe_common.h"
46
47 char ixgbe_driver_name[] = "ixgbe";
48 static const char ixgbe_driver_string[] =
49         "Intel(R) 10 Gigabit PCI Express Network Driver";
50
51 #define DRV_VERSION "1.3.18-k4"
52 const char ixgbe_driver_version[] = DRV_VERSION;
53 static const char ixgbe_copyright[] =
54          "Copyright (c) 1999-2007 Intel Corporation.";
55
56 static const struct ixgbe_info *ixgbe_info_tbl[] = {
57         [board_82598]                   = &ixgbe_82598_info,
58 };
59
60 /* ixgbe_pci_tbl - PCI Device ID Table
61  *
62  * Wildcard entries (PCI_ANY_ID) should come last
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static struct pci_device_id ixgbe_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
70          board_82598 },
71         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
72          board_82598 },
73         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
74          board_82598 },
75         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76          board_82598 },
77         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
78          board_82598 },
79
80         /* required last entry */
81         {0, }
82 };
83 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
84
85 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
86 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
87                             void *p);
88 static struct notifier_block dca_notifier = {
89         .notifier_call = ixgbe_notify_dca,
90         .next          = NULL,
91         .priority      = 0
92 };
93 #endif
94
95 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
96 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_VERSION);
99
100 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
101
102 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
103 {
104         u32 ctrl_ext;
105
106         /* Let firmware take over control of h/w */
107         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
108         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
109                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
110 }
111
112 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
113 {
114         u32 ctrl_ext;
115
116         /* Let firmware know the driver has taken over */
117         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
118         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
119                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
120 }
121
122 #ifdef DEBUG
123 /**
124  * ixgbe_get_hw_dev_name - return device name string
125  * used by hardware layer to print debugging information
126  **/
127 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
128 {
129         struct ixgbe_adapter *adapter = hw->back;
130         struct net_device *netdev = adapter->netdev;
131         return netdev->name;
132 }
133 #endif
134
135 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
136                            u8 msix_vector)
137 {
138         u32 ivar, index;
139
140         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
141         index = (int_alloc_entry >> 2) & 0x1F;
142         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
143         ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
144         ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
145         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
146 }
147
148 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
149                                              struct ixgbe_tx_buffer
150                                              *tx_buffer_info)
151 {
152         if (tx_buffer_info->dma) {
153                 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
154                                tx_buffer_info->length, PCI_DMA_TODEVICE);
155                 tx_buffer_info->dma = 0;
156         }
157         if (tx_buffer_info->skb) {
158                 dev_kfree_skb_any(tx_buffer_info->skb);
159                 tx_buffer_info->skb = NULL;
160         }
161         /* tx_buffer_info must be completely set up in the transmit path */
162 }
163
164 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
165                                        struct ixgbe_ring *tx_ring,
166                                        unsigned int eop)
167 {
168         struct ixgbe_hw *hw = &adapter->hw;
169         u32 head, tail;
170
171         /* Detect a transmit hang in hardware, this serializes the
172          * check with the clearing of time_stamp and movement of eop */
173         head = IXGBE_READ_REG(hw, tx_ring->head);
174         tail = IXGBE_READ_REG(hw, tx_ring->tail);
175         adapter->detect_tx_hung = false;
176         if ((head != tail) &&
177             tx_ring->tx_buffer_info[eop].time_stamp &&
178             time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
179             !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
180                 /* detected Tx unit hang */
181                 union ixgbe_adv_tx_desc *tx_desc;
182                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
183                 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
184                         "  Tx Queue             <%d>\n"
185                         "  TDH, TDT             <%x>, <%x>\n"
186                         "  next_to_use          <%x>\n"
187                         "  next_to_clean        <%x>\n"
188                         "tx_buffer_info[next_to_clean]\n"
189                         "  time_stamp           <%lx>\n"
190                         "  jiffies              <%lx>\n",
191                         tx_ring->queue_index,
192                         head, tail,
193                         tx_ring->next_to_use, eop,
194                         tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
195                 return true;
196         }
197
198         return false;
199 }
200
201 #define IXGBE_MAX_TXD_PWR       14
202 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
203
204 /* Tx Descriptors needed, worst case */
205 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
206                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
207 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
208         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)   /* for context */
209
210 #define GET_TX_HEAD_FROM_RING(ring) (\
211         *(volatile u32 *) \
212         ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
213 static void ixgbe_tx_timeout(struct net_device *netdev);
214
215 /**
216  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
217  * @adapter: board private structure
218  * @tx_ring: tx ring to clean
219  **/
220 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
221                                struct ixgbe_ring *tx_ring)
222 {
223         union ixgbe_adv_tx_desc *tx_desc;
224         struct ixgbe_tx_buffer *tx_buffer_info;
225         struct net_device *netdev = adapter->netdev;
226         struct sk_buff *skb;
227         unsigned int i;
228         u32 head, oldhead;
229         unsigned int count = 0;
230         unsigned int total_bytes = 0, total_packets = 0;
231
232         rmb();
233         head = GET_TX_HEAD_FROM_RING(tx_ring);
234         head = le32_to_cpu(head);
235         i = tx_ring->next_to_clean;
236         while (1) {
237                 while (i != head) {
238                         tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
239                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
240                         skb = tx_buffer_info->skb;
241
242                         if (skb) {
243                                 unsigned int segs, bytecount;
244
245                                 /* gso_segs is currently only valid for tcp */
246                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
247                                 /* multiply data chunks by size of headers */
248                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
249                                             skb->len;
250                                 total_packets += segs;
251                                 total_bytes += bytecount;
252                         }
253
254                         ixgbe_unmap_and_free_tx_resource(adapter,
255                                                          tx_buffer_info);
256
257                         i++;
258                         if (i == tx_ring->count)
259                                 i = 0;
260
261                         count++;
262                         if (count == tx_ring->count)
263                                 goto done_cleaning;
264                 }
265                 oldhead = head;
266                 rmb();
267                 head = GET_TX_HEAD_FROM_RING(tx_ring);
268                 head = le32_to_cpu(head);
269                 if (head == oldhead)
270                         goto done_cleaning;
271         } /* while (1) */
272
273 done_cleaning:
274         tx_ring->next_to_clean = i;
275
276 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
277         if (unlikely(count && netif_carrier_ok(netdev) &&
278                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
279                 /* Make sure that anybody stopping the queue after this
280                  * sees the new next_to_clean.
281                  */
282                 smp_mb();
283                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
284                     !test_bit(__IXGBE_DOWN, &adapter->state)) {
285                         netif_wake_subqueue(netdev, tx_ring->queue_index);
286                         ++adapter->restart_queue;
287                 }
288         }
289
290         if (adapter->detect_tx_hung) {
291                 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
292                         /* schedule immediate reset if we believe we hung */
293                         DPRINTK(PROBE, INFO,
294                                 "tx hang %d detected, resetting adapter\n",
295                                 adapter->tx_timeout_count + 1);
296                         ixgbe_tx_timeout(adapter->netdev);
297                 }
298         }
299
300         /* re-arm the interrupt */
301         if ((total_packets >= tx_ring->work_limit) ||
302             (count == tx_ring->count))
303                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
304
305         tx_ring->total_bytes += total_bytes;
306         tx_ring->total_packets += total_packets;
307         tx_ring->stats.bytes += total_bytes;
308         tx_ring->stats.packets += total_packets;
309         adapter->net_stats.tx_bytes += total_bytes;
310         adapter->net_stats.tx_packets += total_packets;
311         return (total_packets ? true : false);
312 }
313
314 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
315 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
316                                 struct ixgbe_ring *rx_ring)
317 {
318         u32 rxctrl;
319         int cpu = get_cpu();
320         int q = rx_ring - adapter->rx_ring;
321
322         if (rx_ring->cpu != cpu) {
323                 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
324                 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
325                 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
326                 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
327                 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
328                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
329                 rx_ring->cpu = cpu;
330         }
331         put_cpu();
332 }
333
334 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
335                                 struct ixgbe_ring *tx_ring)
336 {
337         u32 txctrl;
338         int cpu = get_cpu();
339         int q = tx_ring - adapter->tx_ring;
340
341         if (tx_ring->cpu != cpu) {
342                 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
343                 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
344                 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
345                 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
346                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
347                 tx_ring->cpu = cpu;
348         }
349         put_cpu();
350 }
351
352 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
353 {
354         int i;
355
356         if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
357                 return;
358
359         for (i = 0; i < adapter->num_tx_queues; i++) {
360                 adapter->tx_ring[i].cpu = -1;
361                 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
362         }
363         for (i = 0; i < adapter->num_rx_queues; i++) {
364                 adapter->rx_ring[i].cpu = -1;
365                 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
366         }
367 }
368
369 static int __ixgbe_notify_dca(struct device *dev, void *data)
370 {
371         struct net_device *netdev = dev_get_drvdata(dev);
372         struct ixgbe_adapter *adapter = netdev_priv(netdev);
373         unsigned long event = *(unsigned long *)data;
374
375         switch (event) {
376         case DCA_PROVIDER_ADD:
377                 /* if we're already enabled, don't do it again */
378                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
379                         break;
380                 /* Always use CB2 mode, difference is masked
381                  * in the CB driver. */
382                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
383                 if (dca_add_requester(dev) == 0) {
384                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
385                         ixgbe_setup_dca(adapter);
386                         break;
387                 }
388                 /* Fall Through since DCA is disabled. */
389         case DCA_PROVIDER_REMOVE:
390                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
391                         dca_remove_requester(dev);
392                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
393                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
394                 }
395                 break;
396         }
397
398         return 0;
399 }
400
401 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
402 /**
403  * ixgbe_receive_skb - Send a completed packet up the stack
404  * @adapter: board private structure
405  * @skb: packet to send up
406  * @status: hardware indication of status of receive
407  * @rx_ring: rx descriptor ring (for a specific queue) to setup
408  * @rx_desc: rx descriptor
409  **/
410 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
411                               struct sk_buff *skb, u8 status,
412                               struct ixgbe_ring *ring,
413                               union ixgbe_adv_rx_desc *rx_desc)
414 {
415         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
416         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
417
418         if (adapter->netdev->features & NETIF_F_LRO &&
419             skb->ip_summed == CHECKSUM_UNNECESSARY) {
420                 if (adapter->vlgrp && is_vlan)
421                         lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
422                                                      adapter->vlgrp, tag,
423                                                      rx_desc);
424                 else
425                         lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
426                 ring->lro_used = true;
427         } else {
428                 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
429                         if (adapter->vlgrp && is_vlan)
430                                 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
431                         else
432                                 netif_receive_skb(skb);
433                 } else {
434                         if (adapter->vlgrp && is_vlan)
435                                 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
436                         else
437                                 netif_rx(skb);
438                 }
439         }
440 }
441
442 /**
443  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
444  * @adapter: address of board private structure
445  * @status_err: hardware indication of status of receive
446  * @skb: skb currently being received and modified
447  **/
448 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
449                                      u32 status_err, struct sk_buff *skb)
450 {
451         skb->ip_summed = CHECKSUM_NONE;
452
453         /* Rx csum disabled */
454         if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
455                 return;
456
457         /* if IP and error */
458         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
459             (status_err & IXGBE_RXDADV_ERR_IPE)) {
460                 adapter->hw_csum_rx_error++;
461                 return;
462         }
463
464         if (!(status_err & IXGBE_RXD_STAT_L4CS))
465                 return;
466
467         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
468                 adapter->hw_csum_rx_error++;
469                 return;
470         }
471
472         /* It must be a TCP or UDP packet with a valid checksum */
473         skb->ip_summed = CHECKSUM_UNNECESSARY;
474         adapter->hw_csum_rx_good++;
475 }
476
477 /**
478  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
479  * @adapter: address of board private structure
480  **/
481 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
482                                    struct ixgbe_ring *rx_ring,
483                                    int cleaned_count)
484 {
485         struct net_device *netdev = adapter->netdev;
486         struct pci_dev *pdev = adapter->pdev;
487         union ixgbe_adv_rx_desc *rx_desc;
488         struct ixgbe_rx_buffer *bi;
489         unsigned int i;
490         unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
491
492         i = rx_ring->next_to_use;
493         bi = &rx_ring->rx_buffer_info[i];
494
495         while (cleaned_count--) {
496                 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
497
498                 if (!bi->page_dma &&
499                     (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
500                         if (!bi->page) {
501                                 bi->page = alloc_page(GFP_ATOMIC);
502                                 if (!bi->page) {
503                                         adapter->alloc_rx_page_failed++;
504                                         goto no_buffers;
505                                 }
506                                 bi->page_offset = 0;
507                         } else {
508                                 /* use a half page if we're re-using */
509                                 bi->page_offset ^= (PAGE_SIZE / 2);
510                         }
511
512                         bi->page_dma = pci_map_page(pdev, bi->page,
513                                                     bi->page_offset,
514                                                     (PAGE_SIZE / 2),
515                                                     PCI_DMA_FROMDEVICE);
516                 }
517
518                 if (!bi->skb) {
519                         struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz);
520
521                         if (!skb) {
522                                 adapter->alloc_rx_buff_failed++;
523                                 goto no_buffers;
524                         }
525
526                         /*
527                          * Make buffer alignment 2 beyond a 16 byte boundary
528                          * this will result in a 16 byte aligned IP header after
529                          * the 14 byte MAC header is removed
530                          */
531                         skb_reserve(skb, NET_IP_ALIGN);
532
533                         bi->skb = skb;
534                         bi->dma = pci_map_single(pdev, skb->data, bufsz,
535                                                  PCI_DMA_FROMDEVICE);
536                 }
537                 /* Refresh the desc even if buffer_addrs didn't change because
538                  * each write-back erases this info. */
539                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
540                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
541                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
542                 } else {
543                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
544                 }
545
546                 i++;
547                 if (i == rx_ring->count)
548                         i = 0;
549                 bi = &rx_ring->rx_buffer_info[i];
550         }
551
552 no_buffers:
553         if (rx_ring->next_to_use != i) {
554                 rx_ring->next_to_use = i;
555                 if (i-- == 0)
556                         i = (rx_ring->count - 1);
557
558                 /*
559                  * Force memory writes to complete before letting h/w
560                  * know there are new descriptors to fetch.  (Only
561                  * applicable for weak-ordered memory model archs,
562                  * such as IA-64).
563                  */
564                 wmb();
565                 writel(i, adapter->hw.hw_addr + rx_ring->tail);
566         }
567 }
568
569 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
570 {
571         return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
572 }
573
574 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
575 {
576         return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
577 }
578
579 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
580                                struct ixgbe_ring *rx_ring,
581                                int *work_done, int work_to_do)
582 {
583         struct net_device *netdev = adapter->netdev;
584         struct pci_dev *pdev = adapter->pdev;
585         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
586         struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
587         struct sk_buff *skb;
588         unsigned int i;
589         u32 len, staterr;
590         u16 hdr_info;
591         bool cleaned = false;
592         int cleaned_count = 0;
593         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
594
595         i = rx_ring->next_to_clean;
596         rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
597         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
598         rx_buffer_info = &rx_ring->rx_buffer_info[i];
599
600         while (staterr & IXGBE_RXD_STAT_DD) {
601                 u32 upper_len = 0;
602                 if (*work_done >= work_to_do)
603                         break;
604                 (*work_done)++;
605
606                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
607                         hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
608                         len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
609                                IXGBE_RXDADV_HDRBUFLEN_SHIFT;
610                         if (hdr_info & IXGBE_RXDADV_SPH)
611                                 adapter->rx_hdr_split++;
612                         if (len > IXGBE_RX_HDR_SIZE)
613                                 len = IXGBE_RX_HDR_SIZE;
614                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
615                 } else {
616                         len = le16_to_cpu(rx_desc->wb.upper.length);
617                 }
618
619                 cleaned = true;
620                 skb = rx_buffer_info->skb;
621                 prefetch(skb->data - NET_IP_ALIGN);
622                 rx_buffer_info->skb = NULL;
623
624                 if (len && !skb_shinfo(skb)->nr_frags) {
625                         pci_unmap_single(pdev, rx_buffer_info->dma,
626                                          rx_ring->rx_buf_len + NET_IP_ALIGN,
627                                          PCI_DMA_FROMDEVICE);
628                         skb_put(skb, len);
629                 }
630
631                 if (upper_len) {
632                         pci_unmap_page(pdev, rx_buffer_info->page_dma,
633                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
634                         rx_buffer_info->page_dma = 0;
635                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
636                                            rx_buffer_info->page,
637                                            rx_buffer_info->page_offset,
638                                            upper_len);
639
640                         if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
641                             (page_count(rx_buffer_info->page) != 1))
642                                 rx_buffer_info->page = NULL;
643                         else
644                                 get_page(rx_buffer_info->page);
645
646                         skb->len += upper_len;
647                         skb->data_len += upper_len;
648                         skb->truesize += upper_len;
649                 }
650
651                 i++;
652                 if (i == rx_ring->count)
653                         i = 0;
654                 next_buffer = &rx_ring->rx_buffer_info[i];
655
656                 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
657                 prefetch(next_rxd);
658
659                 cleaned_count++;
660                 if (staterr & IXGBE_RXD_STAT_EOP) {
661                         rx_ring->stats.packets++;
662                         rx_ring->stats.bytes += skb->len;
663                 } else {
664                         rx_buffer_info->skb = next_buffer->skb;
665                         rx_buffer_info->dma = next_buffer->dma;
666                         next_buffer->skb = skb;
667                         next_buffer->dma = 0;
668                         adapter->non_eop_descs++;
669                         goto next_desc;
670                 }
671
672                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
673                         dev_kfree_skb_irq(skb);
674                         goto next_desc;
675                 }
676
677                 ixgbe_rx_checksum(adapter, staterr, skb);
678
679                 /* probably a little skewed due to removing CRC */
680                 total_rx_bytes += skb->len;
681                 total_rx_packets++;
682
683                 skb->protocol = eth_type_trans(skb, netdev);
684                 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
685                 netdev->last_rx = jiffies;
686
687 next_desc:
688                 rx_desc->wb.upper.status_error = 0;
689
690                 /* return some buffers to hardware, one at a time is too slow */
691                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
692                         ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
693                         cleaned_count = 0;
694                 }
695
696                 /* use prefetched values */
697                 rx_desc = next_rxd;
698                 rx_buffer_info = next_buffer;
699
700                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
701         }
702
703         if (rx_ring->lro_used) {
704                 lro_flush_all(&rx_ring->lro_mgr);
705                 rx_ring->lro_used = false;
706         }
707
708         rx_ring->next_to_clean = i;
709         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
710
711         if (cleaned_count)
712                 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
713
714         rx_ring->total_packets += total_rx_packets;
715         rx_ring->total_bytes += total_rx_bytes;
716         adapter->net_stats.rx_bytes += total_rx_bytes;
717         adapter->net_stats.rx_packets += total_rx_packets;
718
719         return cleaned;
720 }
721
722 static int ixgbe_clean_rxonly(struct napi_struct *, int);
723 /**
724  * ixgbe_configure_msix - Configure MSI-X hardware
725  * @adapter: board private structure
726  *
727  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
728  * interrupts.
729  **/
730 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
731 {
732         struct ixgbe_q_vector *q_vector;
733         int i, j, q_vectors, v_idx, r_idx;
734         u32 mask;
735
736         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
737
738         /* Populate the IVAR table and set the ITR values to the
739          * corresponding register.
740          */
741         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
742                 q_vector = &adapter->q_vector[v_idx];
743                 /* XXX for_each_bit(...) */
744                 r_idx = find_first_bit(q_vector->rxr_idx,
745                                       adapter->num_rx_queues);
746
747                 for (i = 0; i < q_vector->rxr_count; i++) {
748                         j = adapter->rx_ring[r_idx].reg_idx;
749                         ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
750                         r_idx = find_next_bit(q_vector->rxr_idx,
751                                               adapter->num_rx_queues,
752                                               r_idx + 1);
753                 }
754                 r_idx = find_first_bit(q_vector->txr_idx,
755                                        adapter->num_tx_queues);
756
757                 for (i = 0; i < q_vector->txr_count; i++) {
758                         j = adapter->tx_ring[r_idx].reg_idx;
759                         ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
760                         r_idx = find_next_bit(q_vector->txr_idx,
761                                               adapter->num_tx_queues,
762                                               r_idx + 1);
763                 }
764
765                 /* if this is a tx only vector halve the interrupt rate */
766                 if (q_vector->txr_count && !q_vector->rxr_count)
767                         q_vector->eitr = (adapter->eitr_param >> 1);
768                 else
769                         /* rx only */
770                         q_vector->eitr = adapter->eitr_param;
771
772                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
773                                 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
774         }
775
776         ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
777         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
778
779         /* set up to autoclear timer, and the vectors */
780         mask = IXGBE_EIMS_ENABLE_MASK;
781         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
782         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
783 }
784
785 enum latency_range {
786         lowest_latency = 0,
787         low_latency = 1,
788         bulk_latency = 2,
789         latency_invalid = 255
790 };
791
792 /**
793  * ixgbe_update_itr - update the dynamic ITR value based on statistics
794  * @adapter: pointer to adapter
795  * @eitr: eitr setting (ints per sec) to give last timeslice
796  * @itr_setting: current throttle rate in ints/second
797  * @packets: the number of packets during this measurement interval
798  * @bytes: the number of bytes during this measurement interval
799  *
800  *      Stores a new ITR value based on packets and byte
801  *      counts during the last interrupt.  The advantage of per interrupt
802  *      computation is faster updates and more accurate ITR for the current
803  *      traffic pattern.  Constants in this function were computed
804  *      based on theoretical maximum wire speed and thresholds were set based
805  *      on testing data as well as attempting to minimize response time
806  *      while increasing bulk throughput.
807  *      this functionality is controlled by the InterruptThrottleRate module
808  *      parameter (see ixgbe_param.c)
809  **/
810 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
811                            u32 eitr, u8 itr_setting,
812                            int packets, int bytes)
813 {
814         unsigned int retval = itr_setting;
815         u32 timepassed_us;
816         u64 bytes_perint;
817
818         if (packets == 0)
819                 goto update_itr_done;
820
821
822         /* simple throttlerate management
823          *    0-20MB/s lowest (100000 ints/s)
824          *   20-100MB/s low   (20000 ints/s)
825          *  100-1249MB/s bulk (8000 ints/s)
826          */
827         /* what was last interrupt timeslice? */
828         timepassed_us = 1000000/eitr;
829         bytes_perint = bytes / timepassed_us; /* bytes/usec */
830
831         switch (itr_setting) {
832         case lowest_latency:
833                 if (bytes_perint > adapter->eitr_low)
834                         retval = low_latency;
835                 break;
836         case low_latency:
837                 if (bytes_perint > adapter->eitr_high)
838                         retval = bulk_latency;
839                 else if (bytes_perint <= adapter->eitr_low)
840                         retval = lowest_latency;
841                 break;
842         case bulk_latency:
843                 if (bytes_perint <= adapter->eitr_high)
844                         retval = low_latency;
845                 break;
846         }
847
848 update_itr_done:
849         return retval;
850 }
851
852 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
853 {
854         struct ixgbe_adapter *adapter = q_vector->adapter;
855         struct ixgbe_hw *hw = &adapter->hw;
856         u32 new_itr;
857         u8 current_itr, ret_itr;
858         int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
859                               sizeof(struct ixgbe_q_vector);
860         struct ixgbe_ring *rx_ring, *tx_ring;
861
862         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
863         for (i = 0; i < q_vector->txr_count; i++) {
864                 tx_ring = &(adapter->tx_ring[r_idx]);
865                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
866                                            q_vector->tx_itr,
867                                            tx_ring->total_packets,
868                                            tx_ring->total_bytes);
869                 /* if the result for this queue would decrease interrupt
870                  * rate for this vector then use that result */
871                 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
872                                     q_vector->tx_itr - 1 : ret_itr);
873                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
874                                       r_idx + 1);
875         }
876
877         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
878         for (i = 0; i < q_vector->rxr_count; i++) {
879                 rx_ring = &(adapter->rx_ring[r_idx]);
880                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
881                                            q_vector->rx_itr,
882                                            rx_ring->total_packets,
883                                            rx_ring->total_bytes);
884                 /* if the result for this queue would decrease interrupt
885                  * rate for this vector then use that result */
886                 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
887                                     q_vector->rx_itr - 1 : ret_itr);
888                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
889                                       r_idx + 1);
890         }
891
892         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
893
894         switch (current_itr) {
895         /* counts and packets in update_itr are dependent on these numbers */
896         case lowest_latency:
897                 new_itr = 100000;
898                 break;
899         case low_latency:
900                 new_itr = 20000; /* aka hwitr = ~200 */
901                 break;
902         case bulk_latency:
903         default:
904                 new_itr = 8000;
905                 break;
906         }
907
908         if (new_itr != q_vector->eitr) {
909                 u32 itr_reg;
910                 /* do an exponential smoothing */
911                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
912                 q_vector->eitr = new_itr;
913                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
914                 /* must write high and low 16 bits to reset counter */
915                 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
916                         itr_reg);
917                 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
918         }
919
920         return;
921 }
922
923
924 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
925 {
926         struct ixgbe_hw *hw = &adapter->hw;
927
928         adapter->lsc_int++;
929         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
930         adapter->link_check_timeout = jiffies;
931         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
932                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
933                 schedule_work(&adapter->watchdog_task);
934         }
935 }
936
937 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
938 {
939         struct net_device *netdev = data;
940         struct ixgbe_adapter *adapter = netdev_priv(netdev);
941         struct ixgbe_hw *hw = &adapter->hw;
942         u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
943
944         if (eicr & IXGBE_EICR_LSC)
945                 ixgbe_check_lsc(adapter);
946
947         if (!test_bit(__IXGBE_DOWN, &adapter->state))
948                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
949
950         return IRQ_HANDLED;
951 }
952
953 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
954 {
955         struct ixgbe_q_vector *q_vector = data;
956         struct ixgbe_adapter  *adapter = q_vector->adapter;
957         struct ixgbe_ring     *tx_ring;
958         int i, r_idx;
959
960         if (!q_vector->txr_count)
961                 return IRQ_HANDLED;
962
963         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
964         for (i = 0; i < q_vector->txr_count; i++) {
965                 tx_ring = &(adapter->tx_ring[r_idx]);
966 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
967                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
968                         ixgbe_update_tx_dca(adapter, tx_ring);
969 #endif
970                 tx_ring->total_bytes = 0;
971                 tx_ring->total_packets = 0;
972                 ixgbe_clean_tx_irq(adapter, tx_ring);
973                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
974                                       r_idx + 1);
975         }
976
977         return IRQ_HANDLED;
978 }
979
980 /**
981  * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
982  * @irq: unused
983  * @data: pointer to our q_vector struct for this interrupt vector
984  **/
985 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
986 {
987         struct ixgbe_q_vector *q_vector = data;
988         struct ixgbe_adapter  *adapter = q_vector->adapter;
989         struct ixgbe_ring  *rx_ring;
990         int r_idx;
991         int i;
992
993         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
994         for (i = 0;  i < q_vector->rxr_count; i++) {
995                 rx_ring = &(adapter->rx_ring[r_idx]);
996                 rx_ring->total_bytes = 0;
997                 rx_ring->total_packets = 0;
998                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
999                                       r_idx + 1);
1000         }
1001
1002         if (!q_vector->rxr_count)
1003                 return IRQ_HANDLED;
1004
1005         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1006         rx_ring = &(adapter->rx_ring[r_idx]);
1007         /* disable interrupts on this vector only */
1008         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1009         netif_rx_schedule(adapter->netdev, &q_vector->napi);
1010
1011         return IRQ_HANDLED;
1012 }
1013
1014 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1015 {
1016         ixgbe_msix_clean_rx(irq, data);
1017         ixgbe_msix_clean_tx(irq, data);
1018
1019         return IRQ_HANDLED;
1020 }
1021
1022 /**
1023  * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1024  * @napi: napi struct with our devices info in it
1025  * @budget: amount of work driver is allowed to do this pass, in packets
1026  *
1027  * This function is optimized for cleaning one queue only on a single
1028  * q_vector!!!
1029  **/
1030 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1031 {
1032         struct ixgbe_q_vector *q_vector =
1033                                container_of(napi, struct ixgbe_q_vector, napi);
1034         struct ixgbe_adapter *adapter = q_vector->adapter;
1035         struct ixgbe_ring *rx_ring = NULL;
1036         int work_done = 0;
1037         long r_idx;
1038
1039         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1040         rx_ring = &(adapter->rx_ring[r_idx]);
1041 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1042         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1043                 ixgbe_update_rx_dca(adapter, rx_ring);
1044 #endif
1045
1046         ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1047
1048         /* If all Rx work done, exit the polling mode */
1049         if (work_done < budget) {
1050                 netif_rx_complete(adapter->netdev, napi);
1051                 if (adapter->itr_setting & 3)
1052                         ixgbe_set_itr_msix(q_vector);
1053                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1054                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
1055         }
1056
1057         return work_done;
1058 }
1059
1060 /**
1061  * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1062  * @napi: napi struct with our devices info in it
1063  * @budget: amount of work driver is allowed to do this pass, in packets
1064  *
1065  * This function will clean more than one rx queue associated with a
1066  * q_vector.
1067  **/
1068 static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1069 {
1070         struct ixgbe_q_vector *q_vector =
1071                                container_of(napi, struct ixgbe_q_vector, napi);
1072         struct ixgbe_adapter *adapter = q_vector->adapter;
1073         struct net_device *netdev = adapter->netdev;
1074         struct ixgbe_ring *rx_ring = NULL;
1075         int work_done = 0, i;
1076         long r_idx;
1077         u16 enable_mask = 0;
1078
1079         /* attempt to distribute budget to each queue fairly, but don't allow
1080          * the budget to go below 1 because we'll exit polling */
1081         budget /= (q_vector->rxr_count ?: 1);
1082         budget = max(budget, 1);
1083         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1084         for (i = 0; i < q_vector->rxr_count; i++) {
1085                 rx_ring = &(adapter->rx_ring[r_idx]);
1086 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1087                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1088                         ixgbe_update_rx_dca(adapter, rx_ring);
1089 #endif
1090                 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1091                 enable_mask |= rx_ring->v_idx;
1092                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1093                                       r_idx + 1);
1094         }
1095
1096         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1097         rx_ring = &(adapter->rx_ring[r_idx]);
1098         /* If all Rx work done, exit the polling mode */
1099         if ((work_done == 0) || !netif_running(netdev)) {
1100                 netif_rx_complete(netdev, napi);
1101                 if (adapter->itr_setting & 3)
1102                         ixgbe_set_itr_msix(q_vector);
1103                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1104                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1105                 return 0;
1106         }
1107
1108         return work_done;
1109 }
1110 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1111                                      int r_idx)
1112 {
1113         a->q_vector[v_idx].adapter = a;
1114         set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1115         a->q_vector[v_idx].rxr_count++;
1116         a->rx_ring[r_idx].v_idx = 1 << v_idx;
1117 }
1118
1119 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1120                                      int r_idx)
1121 {
1122         a->q_vector[v_idx].adapter = a;
1123         set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1124         a->q_vector[v_idx].txr_count++;
1125         a->tx_ring[r_idx].v_idx = 1 << v_idx;
1126 }
1127
1128 /**
1129  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1130  * @adapter: board private structure to initialize
1131  * @vectors: allotted vector count for descriptor rings
1132  *
1133  * This function maps descriptor rings to the queue-specific vectors
1134  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1135  * one vector per ring/queue, but on a constrained vector budget, we
1136  * group the rings as "efficiently" as possible.  You would add new
1137  * mapping configurations in here.
1138  **/
1139 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1140                                       int vectors)
1141 {
1142         int v_start = 0;
1143         int rxr_idx = 0, txr_idx = 0;
1144         int rxr_remaining = adapter->num_rx_queues;
1145         int txr_remaining = adapter->num_tx_queues;
1146         int i, j;
1147         int rqpv, tqpv;
1148         int err = 0;
1149
1150         /* No mapping required if MSI-X is disabled. */
1151         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1152                 goto out;
1153
1154         /*
1155          * The ideal configuration...
1156          * We have enough vectors to map one per queue.
1157          */
1158         if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1159                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1160                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1161
1162                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1163                         map_vector_to_txq(adapter, v_start, txr_idx);
1164
1165                 goto out;
1166         }
1167
1168         /*
1169          * If we don't have enough vectors for a 1-to-1
1170          * mapping, we'll have to group them so there are
1171          * multiple queues per vector.
1172          */
1173         /* Re-adjusting *qpv takes care of the remainder. */
1174         for (i = v_start; i < vectors; i++) {
1175                 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1176                 for (j = 0; j < rqpv; j++) {
1177                         map_vector_to_rxq(adapter, i, rxr_idx);
1178                         rxr_idx++;
1179                         rxr_remaining--;
1180                 }
1181         }
1182         for (i = v_start; i < vectors; i++) {
1183                 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1184                 for (j = 0; j < tqpv; j++) {
1185                         map_vector_to_txq(adapter, i, txr_idx);
1186                         txr_idx++;
1187                         txr_remaining--;
1188                 }
1189         }
1190
1191 out:
1192         return err;
1193 }
1194
1195 /**
1196  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1197  * @adapter: board private structure
1198  *
1199  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1200  * interrupts from the kernel.
1201  **/
1202 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1203 {
1204         struct net_device *netdev = adapter->netdev;
1205         irqreturn_t (*handler)(int, void *);
1206         int i, vector, q_vectors, err;
1207
1208         /* Decrement for Other and TCP Timer vectors */
1209         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1210
1211         /* Map the Tx/Rx rings to the vectors we were allotted. */
1212         err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1213         if (err)
1214                 goto out;
1215
1216 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1217                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1218                          &ixgbe_msix_clean_many)
1219         for (vector = 0; vector < q_vectors; vector++) {
1220                 handler = SET_HANDLER(&adapter->q_vector[vector]);
1221                 sprintf(adapter->name[vector], "%s:v%d-%s",
1222                         netdev->name, vector,
1223                         (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1224                          ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1225                 err = request_irq(adapter->msix_entries[vector].vector,
1226                                   handler, 0, adapter->name[vector],
1227                                   &(adapter->q_vector[vector]));
1228                 if (err) {
1229                         DPRINTK(PROBE, ERR,
1230                                 "request_irq failed for MSIX interrupt "
1231                                 "Error: %d\n", err);
1232                         goto free_queue_irqs;
1233                 }
1234         }
1235
1236         sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1237         err = request_irq(adapter->msix_entries[vector].vector,
1238                           &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1239         if (err) {
1240                 DPRINTK(PROBE, ERR,
1241                         "request_irq for msix_lsc failed: %d\n", err);
1242                 goto free_queue_irqs;
1243         }
1244
1245         return 0;
1246
1247 free_queue_irqs:
1248         for (i = vector - 1; i >= 0; i--)
1249                 free_irq(adapter->msix_entries[--vector].vector,
1250                          &(adapter->q_vector[i]));
1251         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1252         pci_disable_msix(adapter->pdev);
1253         kfree(adapter->msix_entries);
1254         adapter->msix_entries = NULL;
1255 out:
1256         return err;
1257 }
1258
1259 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1260 {
1261         struct ixgbe_hw *hw = &adapter->hw;
1262         struct ixgbe_q_vector *q_vector = adapter->q_vector;
1263         u8 current_itr;
1264         u32 new_itr = q_vector->eitr;
1265         struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1266         struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1267
1268         q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1269                                             q_vector->tx_itr,
1270                                             tx_ring->total_packets,
1271                                             tx_ring->total_bytes);
1272         q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1273                                             q_vector->rx_itr,
1274                                             rx_ring->total_packets,
1275                                             rx_ring->total_bytes);
1276
1277         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1278
1279         switch (current_itr) {
1280         /* counts and packets in update_itr are dependent on these numbers */
1281         case lowest_latency:
1282                 new_itr = 100000;
1283                 break;
1284         case low_latency:
1285                 new_itr = 20000; /* aka hwitr = ~200 */
1286                 break;
1287         case bulk_latency:
1288                 new_itr = 8000;
1289                 break;
1290         default:
1291                 break;
1292         }
1293
1294         if (new_itr != q_vector->eitr) {
1295                 u32 itr_reg;
1296                 /* do an exponential smoothing */
1297                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1298                 q_vector->eitr = new_itr;
1299                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1300                 /* must write high and low 16 bits to reset counter */
1301                 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1302         }
1303
1304         return;
1305 }
1306
1307 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1308
1309 /**
1310  * ixgbe_intr - legacy mode Interrupt Handler
1311  * @irq: interrupt number
1312  * @data: pointer to a network interface device structure
1313  * @pt_regs: CPU registers structure
1314  **/
1315 static irqreturn_t ixgbe_intr(int irq, void *data)
1316 {
1317         struct net_device *netdev = data;
1318         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1319         struct ixgbe_hw *hw = &adapter->hw;
1320         u32 eicr;
1321
1322
1323         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1324          * therefore no explict interrupt disable is necessary */
1325         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1326         if (!eicr) {
1327                 /* shared interrupt alert!
1328                  * make sure interrupts are enabled because the read will
1329                  * have disabled interrupts due to EIAM */
1330                 ixgbe_irq_enable(adapter);
1331                 return IRQ_NONE;        /* Not our interrupt */
1332         }
1333
1334         if (eicr & IXGBE_EICR_LSC)
1335                 ixgbe_check_lsc(adapter);
1336
1337         if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1338                 adapter->tx_ring[0].total_packets = 0;
1339                 adapter->tx_ring[0].total_bytes = 0;
1340                 adapter->rx_ring[0].total_packets = 0;
1341                 adapter->rx_ring[0].total_bytes = 0;
1342                 /* would disable interrupts here but EIAM disabled it */
1343                 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
1344         }
1345
1346         return IRQ_HANDLED;
1347 }
1348
1349 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1350 {
1351         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1352
1353         for (i = 0; i < q_vectors; i++) {
1354                 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1355                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1356                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1357                 q_vector->rxr_count = 0;
1358                 q_vector->txr_count = 0;
1359         }
1360 }
1361
1362 /**
1363  * ixgbe_request_irq - initialize interrupts
1364  * @adapter: board private structure
1365  *
1366  * Attempts to configure interrupts using the best available
1367  * capabilities of the hardware and kernel.
1368  **/
1369 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1370 {
1371         struct net_device *netdev = adapter->netdev;
1372         int err;
1373
1374         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1375                 err = ixgbe_request_msix_irqs(adapter);
1376         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1377                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1378                                   netdev->name, netdev);
1379         } else {
1380                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1381                                   netdev->name, netdev);
1382         }
1383
1384         if (err)
1385                 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1386
1387         return err;
1388 }
1389
1390 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1391 {
1392         struct net_device *netdev = adapter->netdev;
1393
1394         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1395                 int i, q_vectors;
1396
1397                 q_vectors = adapter->num_msix_vectors;
1398
1399                 i = q_vectors - 1;
1400                 free_irq(adapter->msix_entries[i].vector, netdev);
1401
1402                 i--;
1403                 for (; i >= 0; i--) {
1404                         free_irq(adapter->msix_entries[i].vector,
1405                                  &(adapter->q_vector[i]));
1406                 }
1407
1408                 ixgbe_reset_q_vectors(adapter);
1409         } else {
1410                 free_irq(adapter->pdev->irq, netdev);
1411         }
1412 }
1413
1414 /**
1415  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1416  * @adapter: board private structure
1417  **/
1418 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1419 {
1420         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1421         IXGBE_WRITE_FLUSH(&adapter->hw);
1422         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1423                 int i;
1424                 for (i = 0; i < adapter->num_msix_vectors; i++)
1425                         synchronize_irq(adapter->msix_entries[i].vector);
1426         } else {
1427                 synchronize_irq(adapter->pdev->irq);
1428         }
1429 }
1430
1431 /**
1432  * ixgbe_irq_enable - Enable default interrupt generation settings
1433  * @adapter: board private structure
1434  **/
1435 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1436 {
1437         u32 mask;
1438         mask = IXGBE_EIMS_ENABLE_MASK;
1439         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1440         IXGBE_WRITE_FLUSH(&adapter->hw);
1441 }
1442
1443 /**
1444  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1445  *
1446  **/
1447 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1448 {
1449         struct ixgbe_hw *hw = &adapter->hw;
1450
1451         IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1452                         EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1453
1454         ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1455         ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1456
1457         map_vector_to_rxq(adapter, 0, 0);
1458         map_vector_to_txq(adapter, 0, 0);
1459
1460         DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
1461 }
1462
1463 /**
1464  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1465  * @adapter: board private structure
1466  *
1467  * Configure the Tx unit of the MAC after a reset.
1468  **/
1469 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1470 {
1471         u64 tdba, tdwba;
1472         struct ixgbe_hw *hw = &adapter->hw;
1473         u32 i, j, tdlen, txctrl;
1474
1475         /* Setup the HW Tx Head and Tail descriptor pointers */
1476         for (i = 0; i < adapter->num_tx_queues; i++) {
1477                 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1478                 j = ring->reg_idx;
1479                 tdba = ring->dma;
1480                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1481                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1482                                 (tdba & DMA_32BIT_MASK));
1483                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1484                 tdwba = ring->dma +
1485                         (ring->count * sizeof(union ixgbe_adv_tx_desc));
1486                 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1487                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1488                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
1489                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1490                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1491                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1492                 adapter->tx_ring[i].head = IXGBE_TDH(j);
1493                 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1494                 /* Disable Tx Head Writeback RO bit, since this hoses
1495                  * bookkeeping if things aren't delivered in order.
1496                  */
1497                 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1498                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1499                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1500         }
1501 }
1502
1503 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1504
1505 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1506 {
1507         struct ixgbe_ring *rx_ring;
1508         u32 srrctl;
1509         int queue0;
1510         unsigned long mask;
1511
1512         /* program one srrctl register per VMDq index */
1513         if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1514                 long shift, len;
1515                 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1516                 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1517                 shift = find_first_bit(&mask, len);
1518                 queue0 = index & mask;
1519                 index = (index & mask) >> shift;
1520         /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1521         } else {
1522                 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1523                 queue0 = index & mask;
1524                 index = index & mask;
1525         }
1526
1527         rx_ring = &adapter->rx_ring[queue0];
1528
1529         srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1530
1531         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1532         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1533
1534         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1535                 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1536                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1537                 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1538                             IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1539                            IXGBE_SRRCTL_BSIZEHDR_MASK);
1540         } else {
1541                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1542
1543                 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1544                         srrctl |= IXGBE_RXBUFFER_2048 >>
1545                                   IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1546                 else
1547                         srrctl |= rx_ring->rx_buf_len >>
1548                                   IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1549         }
1550         IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1551 }
1552
1553 /**
1554  * ixgbe_get_skb_hdr - helper function for LRO header processing
1555  * @skb: pointer to sk_buff to be added to LRO packet
1556  * @iphdr: pointer to tcp header structure
1557  * @tcph: pointer to tcp header structure
1558  * @hdr_flags: pointer to header flags
1559  * @priv: private data
1560  **/
1561 static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1562                              u64 *hdr_flags, void *priv)
1563 {
1564         union ixgbe_adv_rx_desc *rx_desc = priv;
1565
1566         /* Verify that this is a valid IPv4 TCP packet */
1567         if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1568              (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1569                 return -1;
1570
1571         /* Set network headers */
1572         skb_reset_network_header(skb);
1573         skb_set_transport_header(skb, ip_hdrlen(skb));
1574         *iphdr = ip_hdr(skb);
1575         *tcph = tcp_hdr(skb);
1576         *hdr_flags = LRO_IPV4 | LRO_TCP;
1577         return 0;
1578 }
1579
1580 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1581                         (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1582
1583 /**
1584  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1585  * @adapter: board private structure
1586  *
1587  * Configure the Rx unit of the MAC after a reset.
1588  **/
1589 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1590 {
1591         u64 rdba;
1592         struct ixgbe_hw *hw = &adapter->hw;
1593         struct net_device *netdev = adapter->netdev;
1594         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1595         int i, j;
1596         u32 rdlen, rxctrl, rxcsum;
1597         static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1598                           0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1599                           0x6A3E67EA, 0x14364D17, 0x3BED200D};
1600         u32 fctrl, hlreg0;
1601         u32 pages;
1602         u32 reta = 0, mrqc;
1603         u32 rdrxctl;
1604         int rx_buf_len;
1605
1606         /* Decide whether to use packet split mode or not */
1607         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1608
1609         /* Set the RX buffer length according to the mode */
1610         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1611                 rx_buf_len = IXGBE_RX_HDR_SIZE;
1612         } else {
1613                 if (netdev->mtu <= ETH_DATA_LEN)
1614                         rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1615                 else
1616                         rx_buf_len = ALIGN(max_frame, 1024);
1617         }
1618
1619         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1620         fctrl |= IXGBE_FCTRL_BAM;
1621         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
1622         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1623
1624         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1625         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1626                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1627         else
1628                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1629         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1630
1631         pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1632
1633         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1634         /* disable receives while setting up the descriptors */
1635         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1636         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1637
1638         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1639          * the Base and Length of the Rx Descriptor Ring */
1640         for (i = 0; i < adapter->num_rx_queues; i++) {
1641                 rdba = adapter->rx_ring[i].dma;
1642                 j = adapter->rx_ring[i].reg_idx;
1643                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1644                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1645                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1646                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1647                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1648                 adapter->rx_ring[i].head = IXGBE_RDH(j);
1649                 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1650                 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1651                 /* Intitial LRO Settings */
1652                 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1653                 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1654                 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1655                 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1656                 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1657                         adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1658                 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1659                 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1660                 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1661
1662                 ixgbe_configure_srrctl(adapter, j);
1663         }
1664
1665         /*
1666          * For VMDq support of different descriptor types or
1667          * buffer sizes through the use of multiple SRRCTL
1668          * registers, RDRXCTL.MVMEN must be set to 1
1669          *
1670          * also, the manual doesn't mention it clearly but DCA hints
1671          * will only use queue 0's tags unless this bit is set.  Side
1672          * effects of setting this bit are only that SRRCTL must be
1673          * fully programmed [0..15]
1674          */
1675         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1676         rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1677         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1678
1679
1680         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1681                 /* Fill out redirection table */
1682                 for (i = 0, j = 0; i < 128; i++, j++) {
1683                         if (j == adapter->ring_feature[RING_F_RSS].indices)
1684                                 j = 0;
1685                         /* reta = 4-byte sliding window of
1686                          * 0x00..(indices-1)(indices-1)00..etc. */
1687                         reta = (reta << 8) | (j * 0x11);
1688                         if ((i & 3) == 3)
1689                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1690                 }
1691
1692                 /* Fill out hash function seeds */
1693                 for (i = 0; i < 10; i++)
1694                         IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
1695
1696                 mrqc = IXGBE_MRQC_RSSEN
1697                     /* Perform hash on these packet types */
1698                        | IXGBE_MRQC_RSS_FIELD_IPV4
1699                        | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1700                        | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1701                        | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1702                        | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1703                        | IXGBE_MRQC_RSS_FIELD_IPV6
1704                        | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1705                        | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1706                        | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1707                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1708         }
1709
1710         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1711
1712         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1713             adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1714                 /* Disable indicating checksum in descriptor, enables
1715                  * RSS hash */
1716                 rxcsum |= IXGBE_RXCSUM_PCSD;
1717         }
1718         if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1719                 /* Enable IPv4 payload checksum for UDP fragments
1720                  * if PCSD is not set */
1721                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1722         }
1723
1724         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1725 }
1726
1727 static void ixgbe_vlan_rx_register(struct net_device *netdev,
1728                                    struct vlan_group *grp)
1729 {
1730         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1731         u32 ctrl;
1732
1733         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1734                 ixgbe_irq_disable(adapter);
1735         adapter->vlgrp = grp;
1736
1737         if (grp) {
1738                 /* enable VLAN tag insert/strip */
1739                 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1740                 ctrl |= IXGBE_VLNCTRL_VME;
1741                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1742                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1743         }
1744
1745         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1746                 ixgbe_irq_enable(adapter);
1747 }
1748
1749 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1750 {
1751         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1752
1753         /* add VID to filter table */
1754         ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1755 }
1756
1757 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1758 {
1759         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1760
1761         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1762                 ixgbe_irq_disable(adapter);
1763
1764         vlan_group_set_device(adapter->vlgrp, vid, NULL);
1765
1766         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1767                 ixgbe_irq_enable(adapter);
1768
1769         /* remove VID from filter table */
1770         ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1771 }
1772
1773 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1774 {
1775         ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1776
1777         if (adapter->vlgrp) {
1778                 u16 vid;
1779                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1780                         if (!vlan_group_get_device(adapter->vlgrp, vid))
1781                                 continue;
1782                         ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1783                 }
1784         }
1785 }
1786
1787 static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1788 {
1789         struct dev_mc_list *mc_ptr;
1790         u8 *addr = *mc_addr_ptr;
1791         *vmdq = 0;
1792
1793         mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1794         if (mc_ptr->next)
1795                 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1796         else
1797                 *mc_addr_ptr = NULL;
1798
1799         return addr;
1800 }
1801
1802 /**
1803  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1804  * @netdev: network interface device structure
1805  *
1806  * The set_rx_method entry point is called whenever the unicast/multicast
1807  * address list or the network interface flags are updated.  This routine is
1808  * responsible for configuring the hardware for proper unicast, multicast and
1809  * promiscuous mode.
1810  **/
1811 static void ixgbe_set_rx_mode(struct net_device *netdev)
1812 {
1813         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1814         struct ixgbe_hw *hw = &adapter->hw;
1815         u32 fctrl, vlnctrl;
1816         u8 *addr_list = NULL;
1817         int addr_count = 0;
1818
1819         /* Check for Promiscuous and All Multicast modes */
1820
1821         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1822         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1823
1824         if (netdev->flags & IFF_PROMISC) {
1825                 hw->addr_ctrl.user_set_promisc = 1;
1826                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1827                 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1828         } else {
1829                 if (netdev->flags & IFF_ALLMULTI) {
1830                         fctrl |= IXGBE_FCTRL_MPE;
1831                         fctrl &= ~IXGBE_FCTRL_UPE;
1832                 } else {
1833                         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1834                 }
1835                 vlnctrl |= IXGBE_VLNCTRL_VFE;
1836                 hw->addr_ctrl.user_set_promisc = 0;
1837         }
1838
1839         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1840         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1841
1842         /* reprogram secondary unicast list */
1843         addr_count = netdev->uc_count;
1844         if (addr_count)
1845                 addr_list = netdev->uc_list->dmi_addr;
1846         ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
1847                                   ixgbe_addr_list_itr);
1848
1849         /* reprogram multicast list */
1850         addr_count = netdev->mc_count;
1851         if (addr_count)
1852                 addr_list = netdev->mc_list->dmi_addr;
1853         ixgbe_update_mc_addr_list(hw, addr_list, addr_count,
1854                                   ixgbe_addr_list_itr);
1855 }
1856
1857 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1858 {
1859         int q_idx;
1860         struct ixgbe_q_vector *q_vector;
1861         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1862
1863         /* legacy and MSI only use one vector */
1864         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1865                 q_vectors = 1;
1866
1867         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1868                 struct napi_struct *napi;
1869                 q_vector = &adapter->q_vector[q_idx];
1870                 if (!q_vector->rxr_count)
1871                         continue;
1872                 napi = &q_vector->napi;
1873                 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1874                     (q_vector->rxr_count > 1))
1875                         napi->poll = &ixgbe_clean_rxonly_many;
1876
1877                 napi_enable(napi);
1878         }
1879 }
1880
1881 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1882 {
1883         int q_idx;
1884         struct ixgbe_q_vector *q_vector;
1885         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1886
1887         /* legacy and MSI only use one vector */
1888         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1889                 q_vectors = 1;
1890
1891         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1892                 q_vector = &adapter->q_vector[q_idx];
1893                 if (!q_vector->rxr_count)
1894                         continue;
1895                 napi_disable(&q_vector->napi);
1896         }
1897 }
1898
1899 static void ixgbe_configure(struct ixgbe_adapter *adapter)
1900 {
1901         struct net_device *netdev = adapter->netdev;
1902         int i;
1903
1904         ixgbe_set_rx_mode(netdev);
1905
1906         ixgbe_restore_vlan(adapter);
1907
1908         ixgbe_configure_tx(adapter);
1909         ixgbe_configure_rx(adapter);
1910         for (i = 0; i < adapter->num_rx_queues; i++)
1911                 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1912                                            (adapter->rx_ring[i].count - 1));
1913 }
1914
1915 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1916 {
1917         struct net_device *netdev = adapter->netdev;
1918         struct ixgbe_hw *hw = &adapter->hw;
1919         int i, j = 0;
1920         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1921         u32 txdctl, rxdctl, mhadd;
1922         u32 gpie;
1923
1924         ixgbe_get_hw_control(adapter);
1925
1926         if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1927             (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1928                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1929                         gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1930                                 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1931                 } else {
1932                         /* MSI only */
1933                         gpie = 0;
1934                 }
1935                 /* XXX: to interrupt immediately for EICS writes, enable this */
1936                 /* gpie |= IXGBE_GPIE_EIMEN; */
1937                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1938         }
1939
1940         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1941                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1942                  * specifically only auto mask tx and rx interrupts */
1943                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1944         }
1945
1946         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1947         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1948                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1949                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1950
1951                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1952         }
1953
1954         for (i = 0; i < adapter->num_tx_queues; i++) {
1955                 j = adapter->tx_ring[i].reg_idx;
1956                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1957                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1958                 txdctl |= (8 << 16);
1959                 txdctl |= IXGBE_TXDCTL_ENABLE;
1960                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1961         }
1962
1963         for (i = 0; i < adapter->num_rx_queues; i++) {
1964                 j = adapter->rx_ring[i].reg_idx;
1965                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1966                 /* enable PTHRESH=32 descriptors (half the internal cache)
1967                  * and HTHRESH=0 descriptors (to minimize latency on fetch),
1968                  * this also removes a pesky rx_no_buffer_count increment */
1969                 rxdctl |= 0x0020;
1970                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1971                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1972         }
1973         /* enable all receives */
1974         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1975         rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1976         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1977
1978         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1979                 ixgbe_configure_msix(adapter);
1980         else
1981                 ixgbe_configure_msi_and_legacy(adapter);
1982
1983         clear_bit(__IXGBE_DOWN, &adapter->state);
1984         ixgbe_napi_enable_all(adapter);
1985
1986         /* clear any pending interrupts, may auto mask */
1987         IXGBE_READ_REG(hw, IXGBE_EICR);
1988
1989         ixgbe_irq_enable(adapter);
1990
1991         /* bring the link up in the watchdog, this could race with our first
1992          * link up interrupt but shouldn't be a problem */
1993         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1994         adapter->link_check_timeout = jiffies;
1995         mod_timer(&adapter->watchdog_timer, jiffies);
1996         return 0;
1997 }
1998
1999 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2000 {
2001         WARN_ON(in_interrupt());
2002         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2003                 msleep(1);
2004         ixgbe_down(adapter);
2005         ixgbe_up(adapter);
2006         clear_bit(__IXGBE_RESETTING, &adapter->state);
2007 }
2008
2009 int ixgbe_up(struct ixgbe_adapter *adapter)
2010 {
2011         /* hardware has been reset, we need to reload some things */
2012         ixgbe_configure(adapter);
2013
2014         return ixgbe_up_complete(adapter);
2015 }
2016
2017 void ixgbe_reset(struct ixgbe_adapter *adapter)
2018 {
2019         if (ixgbe_init_hw(&adapter->hw))
2020                 DPRINTK(PROBE, ERR, "Hardware Error\n");
2021
2022         /* reprogram the RAR[0] in case user changed it. */
2023         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2024
2025 }
2026
2027 #ifdef CONFIG_PM
2028 static int ixgbe_resume(struct pci_dev *pdev)
2029 {
2030         struct net_device *netdev = pci_get_drvdata(pdev);
2031         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2032         u32 err;
2033
2034         pci_set_power_state(pdev, PCI_D0);
2035         pci_restore_state(pdev);
2036         err = pci_enable_device(pdev);
2037         if (err) {
2038                 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
2039                                 "suspend\n");
2040                 return err;
2041         }
2042         pci_set_master(pdev);
2043
2044         pci_enable_wake(pdev, PCI_D3hot, 0);
2045         pci_enable_wake(pdev, PCI_D3cold, 0);
2046
2047         if (netif_running(netdev)) {
2048                 err = ixgbe_request_irq(adapter);
2049                 if (err)
2050                         return err;
2051         }
2052
2053         ixgbe_reset(adapter);
2054
2055         if (netif_running(netdev))
2056                 ixgbe_up(adapter);
2057
2058         netif_device_attach(netdev);
2059
2060         return 0;
2061 }
2062 #endif
2063
2064 /**
2065  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2066  * @adapter: board private structure
2067  * @rx_ring: ring to free buffers from
2068  **/
2069 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2070                                 struct ixgbe_ring *rx_ring)
2071 {
2072         struct pci_dev *pdev = adapter->pdev;
2073         unsigned long size;
2074         unsigned int i;
2075
2076         /* Free all the Rx ring sk_buffs */
2077
2078         for (i = 0; i < rx_ring->count; i++) {
2079                 struct ixgbe_rx_buffer *rx_buffer_info;
2080
2081                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2082                 if (rx_buffer_info->dma) {
2083                         pci_unmap_single(pdev, rx_buffer_info->dma,
2084                                          rx_ring->rx_buf_len,
2085                                          PCI_DMA_FROMDEVICE);
2086                         rx_buffer_info->dma = 0;
2087                 }
2088                 if (rx_buffer_info->skb) {
2089                         dev_kfree_skb(rx_buffer_info->skb);
2090                         rx_buffer_info->skb = NULL;
2091                 }
2092                 if (!rx_buffer_info->page)
2093                         continue;
2094                 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2095                                PCI_DMA_FROMDEVICE);
2096                 rx_buffer_info->page_dma = 0;
2097                 put_page(rx_buffer_info->page);
2098                 rx_buffer_info->page = NULL;
2099                 rx_buffer_info->page_offset = 0;
2100         }
2101
2102         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2103         memset(rx_ring->rx_buffer_info, 0, size);
2104
2105         /* Zero out the descriptor ring */
2106         memset(rx_ring->desc, 0, rx_ring->size);
2107
2108         rx_ring->next_to_clean = 0;
2109         rx_ring->next_to_use = 0;
2110
2111         writel(0, adapter->hw.hw_addr + rx_ring->head);
2112         writel(0, adapter->hw.hw_addr + rx_ring->tail);
2113 }
2114
2115 /**
2116  * ixgbe_clean_tx_ring - Free Tx Buffers
2117  * @adapter: board private structure
2118  * @tx_ring: ring to be cleaned
2119  **/
2120 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
2121                                 struct ixgbe_ring *tx_ring)
2122 {
2123         struct ixgbe_tx_buffer *tx_buffer_info;
2124         unsigned long size;
2125         unsigned int i;
2126
2127         /* Free all the Tx ring sk_buffs */
2128
2129         for (i = 0; i < tx_ring->count; i++) {
2130                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2131                 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2132         }
2133
2134         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2135         memset(tx_ring->tx_buffer_info, 0, size);
2136
2137         /* Zero out the descriptor ring */
2138         memset(tx_ring->desc, 0, tx_ring->size);
2139
2140         tx_ring->next_to_use = 0;
2141         tx_ring->next_to_clean = 0;
2142
2143         writel(0, adapter->hw.hw_addr + tx_ring->head);
2144         writel(0, adapter->hw.hw_addr + tx_ring->tail);
2145 }
2146
2147 /**
2148  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2149  * @adapter: board private structure
2150  **/
2151 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2152 {
2153         int i;
2154
2155         for (i = 0; i < adapter->num_rx_queues; i++)
2156                 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2157 }
2158
2159 /**
2160  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2161  * @adapter: board private structure
2162  **/
2163 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2164 {
2165         int i;
2166
2167         for (i = 0; i < adapter->num_tx_queues; i++)
2168                 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2169 }
2170
2171 void ixgbe_down(struct ixgbe_adapter *adapter)
2172 {
2173         struct net_device *netdev = adapter->netdev;
2174         u32 rxctrl;
2175
2176         /* signal that we are down to the interrupt handler */
2177         set_bit(__IXGBE_DOWN, &adapter->state);
2178
2179         /* disable receives */
2180         rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2181         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2182                         rxctrl & ~IXGBE_RXCTRL_RXEN);
2183
2184         netif_tx_disable(netdev);
2185
2186         /* disable transmits in the hardware */
2187
2188         /* flush both disables */
2189         IXGBE_WRITE_FLUSH(&adapter->hw);
2190         msleep(10);
2191
2192         ixgbe_irq_disable(adapter);
2193
2194         ixgbe_napi_disable_all(adapter);
2195         del_timer_sync(&adapter->watchdog_timer);
2196         cancel_work_sync(&adapter->watchdog_task);
2197
2198         netif_carrier_off(netdev);
2199         netif_tx_stop_all_queues(netdev);
2200
2201 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2202         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2203                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2204                 dca_remove_requester(&adapter->pdev->dev);
2205         }
2206
2207 #endif
2208         if (!pci_channel_offline(adapter->pdev))
2209                 ixgbe_reset(adapter);
2210         ixgbe_clean_all_tx_rings(adapter);
2211         ixgbe_clean_all_rx_rings(adapter);
2212
2213 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2214         /* since we reset the hardware DCA settings were cleared */
2215         if (dca_add_requester(&adapter->pdev->dev) == 0) {
2216                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2217                 /* always use CB2 mode, difference is masked
2218                  * in the CB driver */
2219                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
2220                 ixgbe_setup_dca(adapter);
2221         }
2222 #endif
2223 }
2224
2225 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2226 {
2227         struct net_device *netdev = pci_get_drvdata(pdev);
2228         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2229 #ifdef CONFIG_PM
2230         int retval = 0;
2231 #endif
2232
2233         netif_device_detach(netdev);
2234
2235         if (netif_running(netdev)) {
2236                 ixgbe_down(adapter);
2237                 ixgbe_free_irq(adapter);
2238         }
2239
2240 #ifdef CONFIG_PM
2241         retval = pci_save_state(pdev);
2242         if (retval)
2243                 return retval;
2244 #endif
2245
2246         pci_enable_wake(pdev, PCI_D3hot, 0);
2247         pci_enable_wake(pdev, PCI_D3cold, 0);
2248
2249         ixgbe_release_hw_control(adapter);
2250
2251         pci_disable_device(pdev);
2252
2253         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2254
2255         return 0;
2256 }
2257
2258 static void ixgbe_shutdown(struct pci_dev *pdev)
2259 {
2260         ixgbe_suspend(pdev, PMSG_SUSPEND);
2261 }
2262
2263 /**
2264  * ixgbe_poll - NAPI Rx polling callback
2265  * @napi: structure for representing this polling device
2266  * @budget: how many packets driver is allowed to clean
2267  *
2268  * This function is used for legacy and MSI, NAPI mode
2269  **/
2270 static int ixgbe_poll(struct napi_struct *napi, int budget)
2271 {
2272         struct ixgbe_q_vector *q_vector = container_of(napi,
2273                                           struct ixgbe_q_vector, napi);
2274         struct ixgbe_adapter *adapter = q_vector->adapter;
2275         int tx_cleaned = 0, work_done = 0;
2276
2277 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2278         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2279                 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2280                 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2281         }
2282 #endif
2283
2284         tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2285         ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
2286
2287         if (tx_cleaned)
2288                 work_done = budget;
2289
2290         /* If budget not fully consumed, exit the polling mode */
2291         if (work_done < budget) {
2292                 netif_rx_complete(adapter->netdev, napi);
2293                 if (adapter->itr_setting & 3)
2294                         ixgbe_set_itr(adapter);
2295                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2296                         ixgbe_irq_enable(adapter);
2297         }
2298
2299         return work_done;
2300 }
2301
2302 /**
2303  * ixgbe_tx_timeout - Respond to a Tx Hang
2304  * @netdev: network interface device structure
2305  **/
2306 static void ixgbe_tx_timeout(struct net_device *netdev)
2307 {
2308         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2309
2310         /* Do the reset outside of interrupt context */
2311         schedule_work(&adapter->reset_task);
2312 }
2313
2314 static void ixgbe_reset_task(struct work_struct *work)
2315 {
2316         struct ixgbe_adapter *adapter;
2317         adapter = container_of(work, struct ixgbe_adapter, reset_task);
2318
2319         adapter->tx_timeout_count++;
2320
2321         ixgbe_reinit_locked(adapter);
2322 }
2323
2324 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2325                                        int vectors)
2326 {
2327         int err, vector_threshold;
2328
2329         /* We'll want at least 3 (vector_threshold):
2330          * 1) TxQ[0] Cleanup
2331          * 2) RxQ[0] Cleanup
2332          * 3) Other (Link Status Change, etc.)
2333          * 4) TCP Timer (optional)
2334          */
2335         vector_threshold = MIN_MSIX_COUNT;
2336
2337         /* The more we get, the more we will assign to Tx/Rx Cleanup
2338          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2339          * Right now, we simply care about how many we'll get; we'll
2340          * set them up later while requesting irq's.
2341          */
2342         while (vectors >= vector_threshold) {
2343                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2344                                       vectors);
2345                 if (!err) /* Success in acquiring all requested vectors. */
2346                         break;
2347                 else if (err < 0)
2348                         vectors = 0; /* Nasty failure, quit now */
2349                 else /* err == number of vectors we should try again with */
2350                         vectors = err;
2351         }
2352
2353         if (vectors < vector_threshold) {
2354                 /* Can't allocate enough MSI-X interrupts?  Oh well.
2355                  * This just means we'll go with either a single MSI
2356                  * vector or fall back to legacy interrupts.
2357                  */
2358                 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2359                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2360                 kfree(adapter->msix_entries);
2361                 adapter->msix_entries = NULL;
2362                 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2363                 adapter->num_tx_queues = 1;
2364                 adapter->num_rx_queues = 1;
2365         } else {
2366                 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2367                 adapter->num_msix_vectors = vectors;
2368         }
2369 }
2370
2371 static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2372 {
2373         int nrq, ntq;
2374         int feature_mask = 0, rss_i, rss_m;
2375
2376         /* Number of supported queues */
2377         switch (adapter->hw.mac.type) {
2378         case ixgbe_mac_82598EB:
2379                 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2380                 rss_m = 0;
2381                 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2382
2383                 switch (adapter->flags & feature_mask) {
2384                 case (IXGBE_FLAG_RSS_ENABLED):
2385                         rss_m = 0xF;
2386                         nrq = rss_i;
2387                         ntq = rss_i;
2388                         break;
2389                 case 0:
2390                 default:
2391                         rss_i = 0;
2392                         rss_m = 0;
2393                         nrq = 1;
2394                         ntq = 1;
2395                         break;
2396                 }
2397
2398                 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2399                 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2400                 break;
2401         default:
2402                 nrq = 1;
2403                 ntq = 1;
2404                 break;
2405         }
2406
2407         adapter->num_rx_queues = nrq;
2408         adapter->num_tx_queues = ntq;
2409 }
2410
2411 /**
2412  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2413  * @adapter: board private structure to initialize
2414  *
2415  * Once we know the feature-set enabled for the device, we'll cache
2416  * the register offset the descriptor ring is assigned to.
2417  **/
2418 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2419 {
2420         /* TODO: Remove all uses of the indices in the cases where multiple
2421          *       features are OR'd together, if the feature set makes sense.
2422          */
2423         int feature_mask = 0, rss_i;
2424         int i, txr_idx, rxr_idx;
2425
2426         /* Number of supported queues */
2427         switch (adapter->hw.mac.type) {
2428         case ixgbe_mac_82598EB:
2429                 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2430                 txr_idx = 0;
2431                 rxr_idx = 0;
2432                 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2433                 switch (adapter->flags & feature_mask) {
2434                 case (IXGBE_FLAG_RSS_ENABLED):
2435                         for (i = 0; i < adapter->num_rx_queues; i++)
2436                                 adapter->rx_ring[i].reg_idx = i;
2437                         for (i = 0; i < adapter->num_tx_queues; i++)
2438                                 adapter->tx_ring[i].reg_idx = i;
2439                         break;
2440                 case 0:
2441                 default:
2442                         break;
2443                 }
2444                 break;
2445         default:
2446                 break;
2447         }
2448 }
2449
2450 /**
2451  * ixgbe_alloc_queues - Allocate memory for all rings
2452  * @adapter: board private structure to initialize
2453  *
2454  * We allocate one ring per queue at run-time since we don't know the
2455  * number of queues at compile-time.  The polling_netdev array is
2456  * intended for Multiqueue, but should work fine with a single queue.
2457  **/
2458 static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2459 {
2460         int i;
2461
2462         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2463                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
2464         if (!adapter->tx_ring)
2465                 goto err_tx_ring_allocation;
2466
2467         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2468                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
2469         if (!adapter->rx_ring)
2470                 goto err_rx_ring_allocation;
2471
2472         for (i = 0; i < adapter->num_tx_queues; i++) {
2473                 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2474                 adapter->tx_ring[i].queue_index = i;
2475         }
2476         for (i = 0; i < adapter->num_rx_queues; i++) {
2477                 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2478                 adapter->rx_ring[i].queue_index = i;
2479         }
2480
2481         ixgbe_cache_ring_register(adapter);
2482
2483         return 0;
2484
2485 err_rx_ring_allocation:
2486         kfree(adapter->tx_ring);
2487 err_tx_ring_allocation:
2488         return -ENOMEM;
2489 }
2490
2491 /**
2492  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2493  * @adapter: board private structure to initialize
2494  *
2495  * Attempt to configure the interrupts using the best available
2496  * capabilities of the hardware and the kernel.
2497  **/
2498 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2499                                                     *adapter)
2500 {
2501         int err = 0;
2502         int vector, v_budget;
2503
2504         /*
2505          * It's easy to be greedy for MSI-X vectors, but it really
2506          * doesn't do us much good if we have a lot more vectors
2507          * than CPU's.  So let's be conservative and only ask for
2508          * (roughly) twice the number of vectors as there are CPU's.
2509          */
2510         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2511                        (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2512
2513         /*
2514          * At the same time, hardware can only support a maximum of
2515          * MAX_MSIX_COUNT vectors.  With features such as RSS and VMDq,
2516          * we can easily reach upwards of 64 Rx descriptor queues and
2517          * 32 Tx queues.  Thus, we cap it off in those rare cases where
2518          * the cpu count also exceeds our vector limit.
2519          */
2520         v_budget = min(v_budget, MAX_MSIX_COUNT);
2521
2522         /* A failure in MSI-X entry allocation isn't fatal, but it does
2523          * mean we disable MSI-X capabilities of the adapter. */
2524         adapter->msix_entries = kcalloc(v_budget,
2525                                         sizeof(struct msix_entry), GFP_KERNEL);
2526         if (!adapter->msix_entries) {
2527                 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2528                 ixgbe_set_num_queues(adapter);
2529                 kfree(adapter->tx_ring);
2530                 kfree(adapter->rx_ring);
2531                 err = ixgbe_alloc_queues(adapter);
2532                 if (err) {
2533                         DPRINTK(PROBE, ERR, "Unable to allocate memory "
2534                                             "for queues\n");
2535                         goto out;
2536                 }
2537
2538                 goto try_msi;
2539         }
2540
2541         for (vector = 0; vector < v_budget; vector++)
2542                 adapter->msix_entries[vector].entry = vector;
2543
2544         ixgbe_acquire_msix_vectors(adapter, v_budget);
2545
2546         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2547                 goto out;
2548
2549 try_msi:
2550         err = pci_enable_msi(adapter->pdev);
2551         if (!err) {
2552                 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2553         } else {
2554                 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2555                                    "falling back to legacy.  Error: %d\n", err);
2556                 /* reset err */
2557                 err = 0;
2558         }
2559
2560 out:
2561         /* Notify the stack of the (possibly) reduced Tx Queue count. */
2562         adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2563
2564         return err;
2565 }
2566
2567 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2568 {
2569         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2570                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2571                 pci_disable_msix(adapter->pdev);
2572                 kfree(adapter->msix_entries);
2573                 adapter->msix_entries = NULL;
2574         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2575                 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2576                 pci_disable_msi(adapter->pdev);
2577         }
2578         return;
2579 }
2580
2581 /**
2582  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2583  * @adapter: board private structure to initialize
2584  *
2585  * We determine which interrupt scheme to use based on...
2586  * - Kernel support (MSI, MSI-X)
2587  *   - which can be user-defined (via MODULE_PARAM)
2588  * - Hardware queue count (num_*_queues)
2589  *   - defined by miscellaneous hardware support/features (RSS, etc.)
2590  **/
2591 static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2592 {
2593         int err;
2594
2595         /* Number of supported queues */
2596         ixgbe_set_num_queues(adapter);
2597
2598         err = ixgbe_alloc_queues(adapter);
2599         if (err) {
2600                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2601                 goto err_alloc_queues;
2602         }
2603
2604         err = ixgbe_set_interrupt_capability(adapter);
2605         if (err) {
2606                 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2607                 goto err_set_interrupt;
2608         }
2609
2610         DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2611                            "Tx Queue count = %u\n",
2612                 (adapter->num_rx_queues > 1) ? "Enabled" :
2613                 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2614
2615         set_bit(__IXGBE_DOWN, &adapter->state);
2616
2617         return 0;
2618
2619 err_set_interrupt:
2620         kfree(adapter->tx_ring);
2621         kfree(adapter->rx_ring);
2622 err_alloc_queues:
2623         return err;
2624 }
2625
2626 /**
2627  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2628  * @adapter: board private structure to initialize
2629  *
2630  * ixgbe_sw_init initializes the Adapter private data structure.
2631  * Fields are initialized based on PCI device information and
2632  * OS network device settings (MTU size).
2633  **/
2634 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2635 {
2636         struct ixgbe_hw *hw = &adapter->hw;
2637         struct pci_dev *pdev = adapter->pdev;
2638         unsigned int rss;
2639
2640         /* Set capability flags */
2641         rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2642         adapter->ring_feature[RING_F_RSS].indices = rss;
2643         adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2644
2645         /* default flow control settings */
2646         hw->fc.original_type = ixgbe_fc_none;
2647         hw->fc.type = ixgbe_fc_none;
2648         hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2649         hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2650         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2651         hw->fc.send_xon = true;
2652
2653         /* select 10G link by default */
2654         hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2655         if (hw->mac.ops.reset(hw)) {
2656                 dev_err(&pdev->dev, "HW Init failed\n");
2657                 return -EIO;
2658         }
2659         if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2660                                          false)) {
2661                 dev_err(&pdev->dev, "Link Speed setup failed\n");
2662                 return -EIO;
2663         }
2664
2665         /* enable itr by default in dynamic mode */
2666         adapter->itr_setting = 1;
2667         adapter->eitr_param = 20000;
2668
2669         /* set defaults for eitr in MegaBytes */
2670         adapter->eitr_low = 10;
2671         adapter->eitr_high = 20;
2672
2673         /* set default ring sizes */
2674         adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2675         adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2676
2677         /* initialize eeprom parameters */
2678         if (ixgbe_init_eeprom(hw)) {
2679                 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2680                 return -EIO;
2681         }
2682
2683         /* enable rx csum by default */
2684         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2685
2686         set_bit(__IXGBE_DOWN, &adapter->state);
2687
2688         return 0;
2689 }
2690
2691 /**
2692  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2693  * @adapter: board private structure
2694  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2695  *
2696  * Return 0 on success, negative on failure
2697  **/
2698 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2699                              struct ixgbe_ring *tx_ring)
2700 {
2701         struct pci_dev *pdev = adapter->pdev;
2702         int size;
2703
2704         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2705         tx_ring->tx_buffer_info = vmalloc(size);
2706         if (!tx_ring->tx_buffer_info)
2707                 goto err;
2708         memset(tx_ring->tx_buffer_info, 0, size);
2709
2710         /* round up to nearest 4K */
2711         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2712                         sizeof(u32);
2713         tx_ring->size = ALIGN(tx_ring->size, 4096);
2714
2715         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2716                                              &tx_ring->dma);
2717         if (!tx_ring->desc)
2718                 goto err;
2719
2720         tx_ring->next_to_use = 0;
2721         tx_ring->next_to_clean = 0;
2722         tx_ring->work_limit = tx_ring->count;
2723         return 0;
2724
2725 err:
2726         vfree(tx_ring->tx_buffer_info);
2727         tx_ring->tx_buffer_info = NULL;
2728         DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2729                             "descriptor ring\n");
2730         return -ENOMEM;
2731 }
2732
2733 /**
2734  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2735  * @adapter: board private structure
2736  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2737  *
2738  * Returns 0 on success, negative on failure
2739  **/
2740 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2741                              struct ixgbe_ring *rx_ring)
2742 {
2743         struct pci_dev *pdev = adapter->pdev;
2744         int size;
2745
2746         size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2747         rx_ring->lro_mgr.lro_arr = vmalloc(size);
2748         if (!rx_ring->lro_mgr.lro_arr)
2749                 return -ENOMEM;
2750         memset(rx_ring->lro_mgr.lro_arr, 0, size);
2751
2752         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2753         rx_ring->rx_buffer_info = vmalloc(size);
2754         if (!rx_ring->rx_buffer_info) {
2755                 DPRINTK(PROBE, ERR,
2756                         "vmalloc allocation failed for the rx desc ring\n");
2757                 goto alloc_failed;
2758         }
2759         memset(rx_ring->rx_buffer_info, 0, size);
2760
2761         /* Round up to nearest 4K */
2762         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2763         rx_ring->size = ALIGN(rx_ring->size, 4096);
2764
2765         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
2766
2767         if (!rx_ring->desc) {
2768                 DPRINTK(PROBE, ERR,
2769                         "Memory allocation failed for the rx desc ring\n");
2770                 vfree(rx_ring->rx_buffer_info);
2771                 goto alloc_failed;
2772         }
2773
2774         rx_ring->next_to_clean = 0;
2775         rx_ring->next_to_use = 0;
2776
2777         return 0;
2778
2779 alloc_failed:
2780         vfree(rx_ring->lro_mgr.lro_arr);
2781         rx_ring->lro_mgr.lro_arr = NULL;
2782         return -ENOMEM;
2783 }
2784
2785 /**
2786  * ixgbe_free_tx_resources - Free Tx Resources per Queue
2787  * @adapter: board private structure
2788  * @tx_ring: Tx descriptor ring for a specific queue
2789  *
2790  * Free all transmit software resources
2791  **/
2792 void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2793                              struct ixgbe_ring *tx_ring)
2794 {
2795         struct pci_dev *pdev = adapter->pdev;
2796
2797         ixgbe_clean_tx_ring(adapter, tx_ring);
2798
2799         vfree(tx_ring->tx_buffer_info);
2800         tx_ring->tx_buffer_info = NULL;
2801
2802         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2803
2804         tx_ring->desc = NULL;
2805 }
2806
2807 /**
2808  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2809  * @adapter: board private structure
2810  *
2811  * Free all transmit software resources
2812  **/
2813 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2814 {
2815         int i;
2816
2817         for (i = 0; i < adapter->num_tx_queues; i++)
2818                 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2819 }
2820
2821 /**
2822  * ixgbe_ree_rx_resources - Free Rx Resources
2823  * @adapter: board private structure
2824  * @rx_ring: ring to clean the resources from
2825  *
2826  * Free all receive software resources
2827  **/
2828 void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2829                              struct ixgbe_ring *rx_ring)
2830 {
2831         struct pci_dev *pdev = adapter->pdev;
2832
2833         vfree(rx_ring->lro_mgr.lro_arr);
2834         rx_ring->lro_mgr.lro_arr = NULL;
2835
2836         ixgbe_clean_rx_ring(adapter, rx_ring);
2837
2838         vfree(rx_ring->rx_buffer_info);
2839         rx_ring->rx_buffer_info = NULL;
2840
2841         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2842
2843         rx_ring->desc = NULL;
2844 }
2845
2846 /**
2847  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2848  * @adapter: board private structure
2849  *
2850  * Free all receive software resources
2851  **/
2852 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2853 {
2854         int i;
2855
2856         for (i = 0; i < adapter->num_rx_queues; i++)
2857                 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2858 }
2859
2860 /**
2861  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2862  * @adapter: board private structure
2863  *
2864  * If this function returns with an error, then it's possible one or
2865  * more of the rings is populated (while the rest are not).  It is the
2866  * callers duty to clean those orphaned rings.
2867  *
2868  * Return 0 on success, negative on failure
2869  **/
2870 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2871 {
2872         int i, err = 0;
2873
2874         for (i = 0; i < adapter->num_tx_queues; i++) {
2875                 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2876                 if (err) {
2877                         DPRINTK(PROBE, ERR,
2878                                 "Allocation for Tx Queue %u failed\n", i);
2879                         break;
2880                 }
2881         }
2882
2883         return err;
2884 }
2885
2886 /**
2887  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2888  * @adapter: board private structure
2889  *
2890  * If this function returns with an error, then it's possible one or
2891  * more of the rings is populated (while the rest are not).  It is the
2892  * callers duty to clean those orphaned rings.
2893  *
2894  * Return 0 on success, negative on failure
2895  **/
2896
2897 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2898 {
2899         int i, err = 0;
2900
2901         for (i = 0; i < adapter->num_rx_queues; i++) {
2902                 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2903                 if (err) {
2904                         DPRINTK(PROBE, ERR,
2905                                 "Allocation for Rx Queue %u failed\n", i);
2906                         break;
2907                 }
2908         }
2909
2910         return err;
2911 }
2912
2913 /**
2914  * ixgbe_change_mtu - Change the Maximum Transfer Unit
2915  * @netdev: network interface device structure
2916  * @new_mtu: new value for maximum frame size
2917  *
2918  * Returns 0 on success, negative on failure
2919  **/
2920 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2921 {
2922         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2923         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2924
2925         /* MTU < 68 is an error and causes problems on some kernels */
2926         if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2927                 return -EINVAL;
2928
2929         DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2930                 netdev->mtu, new_mtu);
2931         /* must set new MTU before calling down or up */
2932         netdev->mtu = new_mtu;
2933
2934         if (netif_running(netdev))
2935                 ixgbe_reinit_locked(adapter);
2936
2937         return 0;
2938 }
2939
2940 /**
2941  * ixgbe_open - Called when a network interface is made active
2942  * @netdev: network interface device structure
2943  *
2944  * Returns 0 on success, negative value on failure
2945  *
2946  * The open entry point is called when a network interface is made
2947  * active by the system (IFF_UP).  At this point all resources needed
2948  * for transmit and receive operations are allocated, the interrupt
2949  * handler is registered with the OS, the watchdog timer is started,
2950  * and the stack is notified that the interface is ready.
2951  **/
2952 static int ixgbe_open(struct net_device *netdev)
2953 {
2954         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2955         int err;
2956
2957         /* disallow open during test */
2958         if (test_bit(__IXGBE_TESTING, &adapter->state))
2959                 return -EBUSY;
2960
2961         /* allocate transmit descriptors */
2962         err = ixgbe_setup_all_tx_resources(adapter);
2963         if (err)
2964                 goto err_setup_tx;
2965
2966         /* allocate receive descriptors */
2967         err = ixgbe_setup_all_rx_resources(adapter);
2968         if (err)
2969                 goto err_setup_rx;
2970
2971         ixgbe_configure(adapter);
2972
2973         err = ixgbe_request_irq(adapter);
2974         if (err)
2975                 goto err_req_irq;
2976
2977         err = ixgbe_up_complete(adapter);
2978         if (err)
2979                 goto err_up;
2980
2981         netif_tx_start_all_queues(netdev);
2982
2983         return 0;
2984
2985 err_up:
2986         ixgbe_release_hw_control(adapter);
2987         ixgbe_free_irq(adapter);
2988 err_req_irq:
2989         ixgbe_free_all_rx_resources(adapter);
2990 err_setup_rx:
2991         ixgbe_free_all_tx_resources(adapter);
2992 err_setup_tx:
2993         ixgbe_reset(adapter);
2994
2995         return err;
2996 }
2997
2998 /**
2999  * ixgbe_close - Disables a network interface
3000  * @netdev: network interface device structure
3001  *
3002  * Returns 0, this is not allowed to fail
3003  *
3004  * The close entry point is called when an interface is de-activated
3005  * by the OS.  The hardware is still under the drivers control, but
3006  * needs to be disabled.  A global MAC reset is issued to stop the
3007  * hardware, and all transmit and receive resources are freed.
3008  **/
3009 static int ixgbe_close(struct net_device *netdev)
3010 {
3011         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3012
3013         ixgbe_down(adapter);
3014         ixgbe_free_irq(adapter);
3015
3016         ixgbe_free_all_tx_resources(adapter);
3017         ixgbe_free_all_rx_resources(adapter);
3018
3019         ixgbe_release_hw_control(adapter);
3020
3021         return 0;
3022 }
3023
3024 /**
3025  * ixgbe_update_stats - Update the board statistics counters.
3026  * @adapter: board private structure
3027  **/
3028 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3029 {
3030         struct ixgbe_hw *hw = &adapter->hw;
3031         u64 total_mpc = 0;
3032         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
3033
3034         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3035         for (i = 0; i < 8; i++) {
3036                 /* for packet buffers not used, the register should read 0 */
3037                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3038                 missed_rx += mpc;
3039                 adapter->stats.mpc[i] += mpc;
3040                 total_mpc += adapter->stats.mpc[i];
3041                 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3042         }
3043         adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3044         /* work around hardware counting issue */
3045         adapter->stats.gprc -= missed_rx;
3046
3047         /* 82598 hardware only has a 32 bit counter in the high register */
3048         adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3049         adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3050         adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3051         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3052         adapter->stats.bprc += bprc;
3053         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3054         adapter->stats.mprc -= bprc;
3055         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3056         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3057         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3058         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3059         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3060         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3061         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3062         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3063         adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3064         adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3065         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3066         adapter->stats.lxontxc += lxon;
3067         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3068         adapter->stats.lxofftxc += lxoff;
3069         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3070         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3071         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3072         /*
3073          * 82598 errata - tx of flow control packets is included in tx counters
3074          */
3075         xon_off_tot = lxon + lxoff;
3076         adapter->stats.gptc -= xon_off_tot;
3077         adapter->stats.mptc -= xon_off_tot;
3078         adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
3079         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3080         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3081         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3082         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3083         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3084         adapter->stats.ptc64 -= xon_off_tot;
3085         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3086         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3087         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3088         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3089         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3090         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3091
3092         /* Fill out the OS statistics structure */
3093         adapter->net_stats.multicast = adapter->stats.mprc;
3094
3095         /* Rx Errors */
3096         adapter->net_stats.rx_errors = adapter->stats.crcerrs +
3097                                                 adapter->stats.rlec;
3098         adapter->net_stats.rx_dropped = 0;
3099         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3100         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3101         adapter->net_stats.rx_missed_errors = total_mpc;
3102 }
3103
3104 /**
3105  * ixgbe_watchdog - Timer Call-back
3106  * @data: pointer to adapter cast into an unsigned long
3107  **/
3108 static void ixgbe_watchdog(unsigned long data)
3109 {
3110         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3111         struct ixgbe_hw *hw = &adapter->hw;
3112
3113         /* Do the watchdog outside of interrupt context due to the lovely
3114          * delays that some of the newer hardware requires */
3115         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3116                 /* Cause software interrupt to ensure rx rings are cleaned */
3117                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3118                         u32 eics =
3119                          (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3120                         IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3121                 } else {
3122                         /* For legacy and MSI interrupts don't set any bits that
3123                          * are enabled for EIAM, because this operation would
3124                          * set *both* EIMS and EICS for any bit in EIAM */
3125                         IXGBE_WRITE_REG(hw, IXGBE_EICS,
3126                                     (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3127                 }
3128                 /* Reset the timer */
3129                 mod_timer(&adapter->watchdog_timer,
3130                           round_jiffies(jiffies + 2 * HZ));
3131         }
3132
3133         schedule_work(&adapter->watchdog_task);
3134 }
3135
3136 /**
3137  *  ixgbe_watchdog_task - worker thread to bring link up
3138  *  @work: pointer to work_struct containing our data
3139  **/
3140 static void ixgbe_watchdog_task(struct work_struct *work)
3141 {
3142         struct ixgbe_adapter *adapter = container_of(work,
3143                                                      struct ixgbe_adapter,
3144                                                      watchdog_task);
3145         struct net_device *netdev = adapter->netdev;
3146         struct ixgbe_hw *hw = &adapter->hw;
3147         u32 link_speed = adapter->link_speed;
3148         bool link_up = adapter->link_up;
3149
3150         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3151
3152         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3153                 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3154                 if (link_up ||
3155                     time_after(jiffies, (adapter->link_check_timeout +
3156                                          IXGBE_TRY_LINK_TIMEOUT))) {
3157                         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3158                         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3159                 }
3160                 adapter->link_up = link_up;
3161                 adapter->link_speed = link_speed;
3162         }
3163
3164         if (link_up) {
3165                 if (!netif_carrier_ok(netdev)) {
3166                         u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3167                         u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
3168 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3169 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3170                         DPRINTK(LINK, INFO, "NIC Link is Up %s, "
3171                                 "Flow Control: %s\n",
3172                                 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3173                                  "10 Gbps" :
3174                                  (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3175                                   "1 Gbps" : "unknown speed")),
3176                                 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3177                                  (FLOW_RX ? "RX" :
3178                                  (FLOW_TX ? "TX" : "None"))));
3179
3180                         netif_carrier_on(netdev);
3181                         netif_tx_wake_all_queues(netdev);
3182                 } else {
3183                         /* Force detection of hung controller */
3184                         adapter->detect_tx_hung = true;
3185                 }
3186         } else {
3187                 adapter->link_up = false;
3188                 adapter->link_speed = 0;
3189                 if (netif_carrier_ok(netdev)) {
3190                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
3191                         netif_carrier_off(netdev);
3192                         netif_tx_stop_all_queues(netdev);
3193                 }
3194         }
3195
3196         ixgbe_update_stats(adapter);
3197         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
3198 }
3199
3200 static int ixgbe_tso(struct ixgbe_adapter *adapter,
3201                          struct ixgbe_ring *tx_ring, struct sk_buff *skb,
3202                          u32 tx_flags, u8 *hdr_len)
3203 {
3204         struct ixgbe_adv_tx_context_desc *context_desc;
3205         unsigned int i;
3206         int err;
3207         struct ixgbe_tx_buffer *tx_buffer_info;
3208         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3209         u32 mss_l4len_idx = 0, l4len;
3210
3211         if (skb_is_gso(skb)) {
3212                 if (skb_header_cloned(skb)) {
3213                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3214                         if (err)
3215                                 return err;
3216                 }
3217                 l4len = tcp_hdrlen(skb);
3218                 *hdr_len += l4len;
3219
3220                 if (skb->protocol == htons(ETH_P_IP)) {
3221                         struct iphdr *iph = ip_hdr(skb);
3222                         iph->tot_len = 0;
3223                         iph->check = 0;
3224                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3225                                                                  iph->daddr, 0,
3226                                                                  IPPROTO_TCP,
3227                                                                  0);
3228                         adapter->hw_tso_ctxt++;
3229                 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3230                         ipv6_hdr(skb)->payload_len = 0;
3231                         tcp_hdr(skb)->check =
3232                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3233                                              &ipv6_hdr(skb)->daddr,
3234                                              0, IPPROTO_TCP, 0);
3235                         adapter->hw_tso6_ctxt++;
3236                 }
3237
3238                 i = tx_ring->next_to_use;
3239
3240                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3241                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3242
3243                 /* VLAN MACLEN IPLEN */
3244                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3245                         vlan_macip_lens |=
3246                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3247                 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3248                                     IXGBE_ADVTXD_MACLEN_SHIFT);
3249                 *hdr_len += skb_network_offset(skb);
3250                 vlan_macip_lens |=
3251                     (skb_transport_header(skb) - skb_network_header(skb));
3252                 *hdr_len +=
3253                     (skb_transport_header(skb) - skb_network_header(skb));
3254                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3255                 context_desc->seqnum_seed = 0;
3256
3257                 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3258                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3259                                     IXGBE_ADVTXD_DTYP_CTXT);
3260
3261                 if (skb->protocol == htons(ETH_P_IP))
3262                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3263                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3264                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3265
3266                 /* MSS L4LEN IDX */
3267                 mss_l4len_idx |=
3268                     (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3269                 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3270                 /* use index 1 for TSO */
3271                 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3272                 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3273
3274                 tx_buffer_info->time_stamp = jiffies;
3275                 tx_buffer_info->next_to_watch = i;
3276
3277                 i++;
3278                 if (i == tx_ring->count)
3279                         i = 0;
3280                 tx_ring->next_to_use = i;
3281
3282                 return true;
3283         }
3284         return false;
3285 }
3286
3287 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3288                                    struct ixgbe_ring *tx_ring,
3289                                    struct sk_buff *skb, u32 tx_flags)
3290 {
3291         struct ixgbe_adv_tx_context_desc *context_desc;
3292         unsigned int i;
3293         struct ixgbe_tx_buffer *tx_buffer_info;
3294         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3295
3296         if (skb->ip_summed == CHECKSUM_PARTIAL ||
3297             (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3298                 i = tx_ring->next_to_use;
3299                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3300                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3301
3302                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3303                         vlan_macip_lens |=
3304                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3305                 vlan_macip_lens |= (skb_network_offset(skb) <<
3306                                     IXGBE_ADVTXD_MACLEN_SHIFT);
3307                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3308                         vlan_macip_lens |= (skb_transport_header(skb) -
3309                                             skb_network_header(skb));
3310
3311                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3312                 context_desc->seqnum_seed = 0;
3313
3314                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3315                                     IXGBE_ADVTXD_DTYP_CTXT);
3316
3317                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3318                         switch (skb->protocol) {
3319                         case __constant_htons(ETH_P_IP):
3320                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3321                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3322                                         type_tucmd_mlhl |=
3323                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3324                                 break;
3325
3326                         case __constant_htons(ETH_P_IPV6):
3327                                 /* XXX what about other V6 headers?? */
3328                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3329                                         type_tucmd_mlhl |=
3330                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3331                                 break;
3332
3333                         default:
3334                                 if (unlikely(net_ratelimit())) {
3335                                         DPRINTK(PROBE, WARNING,
3336                                          "partial checksum but proto=%x!\n",
3337                                          skb->protocol);
3338                                 }
3339                                 break;
3340                         }
3341                 }
3342
3343                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3344                 /* use index zero for tx checksum offload */
3345                 context_desc->mss_l4len_idx = 0;
3346
3347                 tx_buffer_info->time_stamp = jiffies;
3348                 tx_buffer_info->next_to_watch = i;
3349                 adapter->hw_csum_tx_good++;
3350                 i++;
3351                 if (i == tx_ring->count)
3352                         i = 0;
3353                 tx_ring->next_to_use = i;
3354
3355                 return true;
3356         }
3357         return false;
3358 }
3359
3360 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3361                         struct ixgbe_ring *tx_ring,
3362                         struct sk_buff *skb, unsigned int first)
3363 {
3364         struct ixgbe_tx_buffer *tx_buffer_info;
3365         unsigned int len = skb->len;
3366         unsigned int offset = 0, size, count = 0, i;
3367         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3368         unsigned int f;
3369
3370         len -= skb->data_len;
3371
3372         i = tx_ring->next_to_use;
3373
3374         while (len) {
3375                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3376                 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3377
3378                 tx_buffer_info->length = size;
3379                 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3380                                                   skb->data + offset,
3381                                                   size, PCI_DMA_TODEVICE);
3382                 tx_buffer_info->time_stamp = jiffies;
3383                 tx_buffer_info->next_to_watch = i;
3384
3385                 len -= size;
3386                 offset += size;
3387                 count++;
3388                 i++;
3389                 if (i == tx_ring->count)
3390                         i = 0;
3391         }
3392
3393         for (f = 0; f < nr_frags; f++) {
3394                 struct skb_frag_struct *frag;
3395
3396                 frag = &skb_shinfo(skb)->frags[f];
3397                 len = frag->size;
3398                 offset = frag->page_offset;
3399
3400                 while (len) {
3401                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
3402                         size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3403
3404                         tx_buffer_info->length = size;
3405                         tx_buffer_info->dma = pci_map_page(adapter->pdev,
3406                                                         frag->page,
3407                                                         offset,
3408                                                         size, PCI_DMA_TODEVICE);
3409                         tx_buffer_info->time_stamp = jiffies;
3410                         tx_buffer_info->next_to_watch = i;
3411
3412                         len -= size;
3413                         offset += size;
3414                         count++;
3415                         i++;
3416                         if (i == tx_ring->count)
3417                                 i = 0;
3418                 }
3419         }
3420         if (i == 0)
3421                 i = tx_ring->count - 1;
3422         else
3423                 i = i - 1;
3424         tx_ring->tx_buffer_info[i].skb = skb;
3425         tx_ring->tx_buffer_info[first].next_to_watch = i;
3426
3427         return count;
3428 }
3429
3430 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3431                                struct ixgbe_ring *tx_ring,
3432                                int tx_flags, int count, u32 paylen, u8 hdr_len)
3433 {
3434         union ixgbe_adv_tx_desc *tx_desc = NULL;
3435         struct ixgbe_tx_buffer *tx_buffer_info;
3436         u32 olinfo_status = 0, cmd_type_len = 0;
3437         unsigned int i;
3438         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3439
3440         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3441
3442         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3443
3444         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3445                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3446
3447         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3448                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3449
3450                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3451                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3452
3453                 /* use index 1 context for tso */
3454                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3455                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3456                         olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3457                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3458
3459         } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3460                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3461                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3462
3463         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3464
3465         i = tx_ring->next_to_use;
3466         while (count--) {
3467                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3468                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3469                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3470                 tx_desc->read.cmd_type_len =
3471                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3472                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3473
3474                 i++;
3475                 if (i == tx_ring->count)
3476                         i = 0;
3477         }
3478
3479         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3480
3481         /*
3482          * Force memory writes to complete before letting h/w
3483          * know there are new descriptors to fetch.  (Only
3484          * applicable for weak-ordered memory model archs,
3485          * such as IA-64).
3486          */
3487         wmb();
3488
3489         tx_ring->next_to_use = i;
3490         writel(i, adapter->hw.hw_addr + tx_ring->tail);
3491 }
3492
3493 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3494                                  struct ixgbe_ring *tx_ring, int size)
3495 {
3496         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3497
3498         netif_stop_subqueue(netdev, tx_ring->queue_index);
3499         /* Herbert's original patch had:
3500          *  smp_mb__after_netif_stop_queue();
3501          * but since that doesn't exist yet, just open code it. */
3502         smp_mb();
3503
3504         /* We need to check again in a case another CPU has just
3505          * made room available. */
3506         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3507                 return -EBUSY;
3508
3509         /* A reprieve! - use start_queue because it doesn't call schedule */
3510         netif_start_subqueue(netdev, tx_ring->queue_index);
3511         ++adapter->restart_queue;
3512         return 0;
3513 }
3514
3515 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3516                                struct ixgbe_ring *tx_ring, int size)
3517 {
3518         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3519                 return 0;
3520         return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3521 }
3522
3523
3524 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3525 {
3526         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3527         struct ixgbe_ring *tx_ring;
3528         unsigned int len = skb->len;
3529         unsigned int first;
3530         unsigned int tx_flags = 0;
3531         u8 hdr_len = 0;
3532         int r_idx = 0, tso;
3533         unsigned int mss = 0;
3534         int count = 0;
3535         unsigned int f;
3536         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3537         len -= skb->data_len;
3538         r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3539         tx_ring = &adapter->tx_ring[r_idx];
3540
3541
3542         if (skb->len <= 0) {
3543                 dev_kfree_skb(skb);
3544                 return NETDEV_TX_OK;
3545         }
3546         mss = skb_shinfo(skb)->gso_size;
3547
3548         if (mss)
3549                 count++;
3550         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3551                 count++;
3552
3553         count += TXD_USE_COUNT(len);
3554         for (f = 0; f < nr_frags; f++)
3555                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3556
3557         if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3558                 adapter->tx_busy++;
3559                 return NETDEV_TX_BUSY;
3560         }
3561         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3562                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3563                 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3564         }
3565
3566         if (skb->protocol == htons(ETH_P_IP))
3567                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3568         first = tx_ring->next_to_use;
3569         tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3570         if (tso < 0) {
3571                 dev_kfree_skb_any(skb);
3572                 return NETDEV_TX_OK;
3573         }
3574
3575         if (tso)
3576                 tx_flags |= IXGBE_TX_FLAGS_TSO;
3577         else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3578                  (skb->ip_summed == CHECKSUM_PARTIAL))
3579                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3580
3581         ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3582                            ixgbe_tx_map(adapter, tx_ring, skb, first),
3583                            skb->len, hdr_len);
3584
3585         netdev->trans_start = jiffies;
3586
3587         ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3588
3589         return NETDEV_TX_OK;
3590 }
3591
3592 /**
3593  * ixgbe_get_stats - Get System Network Statistics
3594  * @netdev: network interface device structure
3595  *
3596  * Returns the address of the device statistics structure.
3597  * The statistics are actually updated from the timer callback.
3598  **/
3599 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3600 {
3601         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3602
3603         /* only return the current stats */
3604         return &adapter->net_stats;
3605 }
3606
3607 /**
3608  * ixgbe_set_mac - Change the Ethernet Address of the NIC
3609  * @netdev: network interface device structure
3610  * @p: pointer to an address structure
3611  *
3612  * Returns 0 on success, negative on failure
3613  **/
3614 static int ixgbe_set_mac(struct net_device *netdev, void *p)
3615 {
3616         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3617         struct sockaddr *addr = p;
3618
3619         if (!is_valid_ether_addr(addr->sa_data))
3620                 return -EADDRNOTAVAIL;
3621
3622         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3623         memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3624
3625         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3626
3627         return 0;
3628 }
3629
3630 #ifdef CONFIG_NET_POLL_CONTROLLER
3631 /*
3632  * Polling 'interrupt' - used by things like netconsole to send skbs
3633  * without having to re-enable interrupts. It's not called while
3634  * the interrupt routine is executing.
3635  */
3636 static void ixgbe_netpoll(struct net_device *netdev)
3637 {
3638         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3639
3640         disable_irq(adapter->pdev->irq);
3641         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3642         ixgbe_intr(adapter->pdev->irq, netdev);
3643         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3644         enable_irq(adapter->pdev->irq);
3645 }
3646 #endif
3647
3648 /**
3649  * ixgbe_napi_add_all - prep napi structs for use
3650  * @adapter: private struct
3651  * helper function to napi_add each possible q_vector->napi
3652  */
3653 static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3654 {
3655         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3656         int (*poll)(struct napi_struct *, int);
3657
3658         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3659                 poll = &ixgbe_clean_rxonly;
3660         } else {
3661                 poll = &ixgbe_poll;
3662                 /* only one q_vector for legacy modes */
3663                 q_vectors = 1;
3664         }
3665
3666         for (i = 0; i < q_vectors; i++) {
3667                 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3668                 netif_napi_add(adapter->netdev, &q_vector->napi,
3669                                (*poll), 64);
3670         }
3671 }
3672
3673 /**
3674  * ixgbe_probe - Device Initialization Routine
3675  * @pdev: PCI device information struct
3676  * @ent: entry in ixgbe_pci_tbl
3677  *
3678  * Returns 0 on success, negative on failure
3679  *
3680  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3681  * The OS initialization, configuring of the adapter private structure,
3682  * and a hardware reset occur.
3683  **/
3684 static int __devinit ixgbe_probe(struct pci_dev *pdev,
3685                                  const struct pci_device_id *ent)
3686 {
3687         struct net_device *netdev;
3688         struct ixgbe_adapter *adapter = NULL;
3689         struct ixgbe_hw *hw;
3690         const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3691         static int cards_found;
3692         int i, err, pci_using_dac;
3693         u16 link_status, link_speed, link_width;
3694         u32 part_num;
3695
3696         err = pci_enable_device(pdev);
3697         if (err)
3698                 return err;
3699
3700         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3701             !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3702                 pci_using_dac = 1;
3703         } else {
3704                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3705                 if (err) {
3706                         err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3707                         if (err) {
3708                                 dev_err(&pdev->dev, "No usable DMA "
3709                                         "configuration, aborting\n");
3710                                 goto err_dma;
3711                         }
3712                 }
3713                 pci_using_dac = 0;
3714         }
3715
3716         err = pci_request_regions(pdev, ixgbe_driver_name);
3717         if (err) {
3718                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3719                 goto err_pci_reg;
3720         }
3721
3722         pci_set_master(pdev);
3723         pci_save_state(pdev);
3724
3725         netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3726         if (!netdev) {
3727                 err = -ENOMEM;
3728                 goto err_alloc_etherdev;
3729         }
3730
3731         SET_NETDEV_DEV(netdev, &pdev->dev);
3732
3733         pci_set_drvdata(pdev, netdev);
3734         adapter = netdev_priv(netdev);
3735
3736         adapter->netdev = netdev;
3737         adapter->pdev = pdev;
3738         hw = &adapter->hw;
3739         hw->back = adapter;
3740         adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3741
3742         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3743                               pci_resource_len(pdev, 0));
3744         if (!hw->hw_addr) {
3745                 err = -EIO;
3746                 goto err_ioremap;
3747         }
3748
3749         for (i = 1; i <= 5; i++) {
3750                 if (pci_resource_len(pdev, i) == 0)
3751                         continue;
3752         }
3753
3754         netdev->open = &ixgbe_open;
3755         netdev->stop = &ixgbe_close;
3756         netdev->hard_start_xmit = &ixgbe_xmit_frame;
3757         netdev->get_stats = &ixgbe_get_stats;
3758         netdev->set_rx_mode = &ixgbe_set_rx_mode;
3759         netdev->set_multicast_list = &ixgbe_set_rx_mode;
3760         netdev->set_mac_address = &ixgbe_set_mac;
3761         netdev->change_mtu = &ixgbe_change_mtu;
3762         ixgbe_set_ethtool_ops(netdev);
3763         netdev->tx_timeout = &ixgbe_tx_timeout;
3764         netdev->watchdog_timeo = 5 * HZ;
3765         netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3766         netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3767         netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3768 #ifdef CONFIG_NET_POLL_CONTROLLER
3769         netdev->poll_controller = ixgbe_netpoll;
3770 #endif
3771         strcpy(netdev->name, pci_name(pdev));
3772
3773         adapter->bd_number = cards_found;
3774
3775         /* PCI config space info */
3776         hw->vendor_id = pdev->vendor;
3777         hw->device_id = pdev->device;
3778         hw->revision_id = pdev->revision;
3779         hw->subsystem_vendor_id = pdev->subsystem_vendor;
3780         hw->subsystem_device_id = pdev->subsystem_device;
3781
3782         /* Setup hw api */
3783         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3784         hw->mac.type  = ii->mac;
3785
3786         err = ii->get_invariants(hw);
3787         if (err)
3788                 goto err_hw_init;
3789
3790         /* setup the private structure */
3791         err = ixgbe_sw_init(adapter);
3792         if (err)
3793                 goto err_sw_init;
3794
3795         netdev->features = NETIF_F_SG |
3796                            NETIF_F_IP_CSUM |
3797                            NETIF_F_HW_VLAN_TX |
3798                            NETIF_F_HW_VLAN_RX |
3799                            NETIF_F_HW_VLAN_FILTER;
3800
3801         netdev->features |= NETIF_F_IPV6_CSUM;
3802         netdev->features |= NETIF_F_TSO;
3803         netdev->features |= NETIF_F_TSO6;
3804         netdev->features |= NETIF_F_LRO;
3805
3806         netdev->vlan_features |= NETIF_F_TSO;
3807         netdev->vlan_features |= NETIF_F_TSO6;
3808         netdev->vlan_features |= NETIF_F_IP_CSUM;
3809         netdev->vlan_features |= NETIF_F_SG;
3810
3811         if (pci_using_dac)
3812                 netdev->features |= NETIF_F_HIGHDMA;
3813
3814         /* make sure the EEPROM is good */
3815         if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3816                 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3817                 err = -EIO;
3818                 goto err_eeprom;
3819         }
3820
3821         memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3822         memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3823
3824         if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3825                 err = -EIO;
3826                 goto err_eeprom;
3827         }
3828
3829         init_timer(&adapter->watchdog_timer);
3830         adapter->watchdog_timer.function = &ixgbe_watchdog;
3831         adapter->watchdog_timer.data = (unsigned long)adapter;
3832
3833         INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3834         INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
3835
3836         err = ixgbe_init_interrupt_scheme(adapter);
3837         if (err)
3838                 goto err_sw_init;
3839
3840         /* print bus type/speed/width info */
3841         pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3842         link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3843         link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3844         dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3845                  "%02x:%02x:%02x:%02x:%02x:%02x\n",
3846                 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3847                  (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3848                  "Unknown"),
3849                 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3850                  (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3851                  (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3852                  (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3853                  "Unknown"),
3854                 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3855                 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3856         ixgbe_read_part_num(hw, &part_num);
3857         dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3858                  hw->mac.type, hw->phy.type,
3859                  (part_num >> 8), (part_num & 0xff));
3860
3861         if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3862                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3863                          "this card is not sufficient for optimal "
3864                          "performance.\n");
3865                 dev_warn(&pdev->dev, "For optimal performance a x8 "
3866                          "PCI-Express slot is required.\n");
3867         }
3868
3869         /* reset the hardware with the new settings */
3870         ixgbe_start_hw(hw);
3871
3872         netif_carrier_off(netdev);
3873         netif_tx_stop_all_queues(netdev);
3874
3875         ixgbe_napi_add_all(adapter);
3876
3877         strcpy(netdev->name, "eth%d");
3878         err = register_netdev(netdev);
3879         if (err)
3880                 goto err_register;
3881
3882 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3883         if (dca_add_requester(&pdev->dev) == 0) {
3884                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3885                 /* always use CB2 mode, difference is masked
3886                  * in the CB driver */
3887                 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3888                 ixgbe_setup_dca(adapter);
3889         }
3890 #endif
3891
3892         dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3893         cards_found++;
3894         return 0;
3895
3896 err_register:
3897         ixgbe_release_hw_control(adapter);
3898 err_hw_init:
3899 err_sw_init:
3900         ixgbe_reset_interrupt_capability(adapter);
3901 err_eeprom:
3902         iounmap(hw->hw_addr);
3903 err_ioremap:
3904         free_netdev(netdev);
3905 err_alloc_etherdev:
3906         pci_release_regions(pdev);
3907 err_pci_reg:
3908 err_dma:
3909         pci_disable_device(pdev);
3910         return err;
3911 }
3912
3913 /**
3914  * ixgbe_remove - Device Removal Routine
3915  * @pdev: PCI device information struct
3916  *
3917  * ixgbe_remove is called by the PCI subsystem to alert the driver
3918  * that it should release a PCI device.  The could be caused by a
3919  * Hot-Plug event, or because the driver is going to be removed from
3920  * memory.
3921  **/
3922 static void __devexit ixgbe_remove(struct pci_dev *pdev)
3923 {
3924         struct net_device *netdev = pci_get_drvdata(pdev);
3925         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3926
3927         set_bit(__IXGBE_DOWN, &adapter->state);
3928         del_timer_sync(&adapter->watchdog_timer);
3929
3930         flush_scheduled_work();
3931
3932 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3933         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3934                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3935                 dca_remove_requester(&pdev->dev);
3936                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3937         }
3938
3939 #endif
3940         unregister_netdev(netdev);
3941
3942         ixgbe_reset_interrupt_capability(adapter);
3943
3944         ixgbe_release_hw_control(adapter);
3945
3946         iounmap(adapter->hw.hw_addr);
3947         pci_release_regions(pdev);
3948
3949         DPRINTK(PROBE, INFO, "complete\n");
3950         kfree(adapter->tx_ring);
3951         kfree(adapter->rx_ring);
3952
3953         free_netdev(netdev);
3954
3955         pci_disable_device(pdev);
3956 }
3957
3958 /**
3959  * ixgbe_io_error_detected - called when PCI error is detected
3960  * @pdev: Pointer to PCI device
3961  * @state: The current pci connection state
3962  *
3963  * This function is called after a PCI bus error affecting
3964  * this device has been detected.
3965  */
3966 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3967                                                 pci_channel_state_t state)
3968 {
3969         struct net_device *netdev = pci_get_drvdata(pdev);
3970         struct ixgbe_adapter *adapter = netdev->priv;
3971
3972         netif_device_detach(netdev);
3973
3974         if (netif_running(netdev))
3975                 ixgbe_down(adapter);
3976         pci_disable_device(pdev);
3977
3978         /* Request a slot slot reset. */
3979         return PCI_ERS_RESULT_NEED_RESET;
3980 }
3981
3982 /**
3983  * ixgbe_io_slot_reset - called after the pci bus has been reset.
3984  * @pdev: Pointer to PCI device
3985  *
3986  * Restart the card from scratch, as if from a cold-boot.
3987  */
3988 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3989 {
3990         struct net_device *netdev = pci_get_drvdata(pdev);
3991         struct ixgbe_adapter *adapter = netdev->priv;
3992
3993         if (pci_enable_device(pdev)) {
3994                 DPRINTK(PROBE, ERR,
3995                         "Cannot re-enable PCI device after reset.\n");
3996                 return PCI_ERS_RESULT_DISCONNECT;
3997         }
3998         pci_set_master(pdev);
3999         pci_restore_state(pdev);
4000
4001         pci_enable_wake(pdev, PCI_D3hot, 0);
4002         pci_enable_wake(pdev, PCI_D3cold, 0);
4003
4004         ixgbe_reset(adapter);
4005
4006         return PCI_ERS_RESULT_RECOVERED;
4007 }
4008
4009 /**
4010  * ixgbe_io_resume - called when traffic can start flowing again.
4011  * @pdev: Pointer to PCI device
4012  *
4013  * This callback is called when the error recovery driver tells us that
4014  * its OK to resume normal operation.
4015  */
4016 static void ixgbe_io_resume(struct pci_dev *pdev)
4017 {
4018         struct net_device *netdev = pci_get_drvdata(pdev);
4019         struct ixgbe_adapter *adapter = netdev->priv;
4020
4021         if (netif_running(netdev)) {
4022                 if (ixgbe_up(adapter)) {
4023                         DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
4024                         return;
4025                 }
4026         }
4027
4028         netif_device_attach(netdev);
4029
4030 }
4031
4032 static struct pci_error_handlers ixgbe_err_handler = {
4033         .error_detected = ixgbe_io_error_detected,
4034         .slot_reset = ixgbe_io_slot_reset,
4035         .resume = ixgbe_io_resume,
4036 };
4037
4038 static struct pci_driver ixgbe_driver = {
4039         .name     = ixgbe_driver_name,
4040         .id_table = ixgbe_pci_tbl,
4041         .probe    = ixgbe_probe,
4042         .remove   = __devexit_p(ixgbe_remove),
4043 #ifdef CONFIG_PM
4044         .suspend  = ixgbe_suspend,
4045         .resume   = ixgbe_resume,
4046 #endif
4047         .shutdown = ixgbe_shutdown,
4048         .err_handler = &ixgbe_err_handler
4049 };
4050
4051 /**
4052  * ixgbe_init_module - Driver Registration Routine
4053  *
4054  * ixgbe_init_module is the first routine called when the driver is
4055  * loaded. All it does is register with the PCI subsystem.
4056  **/
4057 static int __init ixgbe_init_module(void)
4058 {
4059         int ret;
4060         printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
4061                ixgbe_driver_string, ixgbe_driver_version);
4062
4063         printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
4064
4065 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4066         dca_register_notify(&dca_notifier);
4067
4068 #endif
4069         ret = pci_register_driver(&ixgbe_driver);
4070         return ret;
4071 }
4072 module_init(ixgbe_init_module);
4073
4074 /**
4075  * ixgbe_exit_module - Driver Exit Cleanup Routine
4076  *
4077  * ixgbe_exit_module is called just before the driver is removed
4078  * from memory.
4079  **/
4080 static void __exit ixgbe_exit_module(void)
4081 {
4082 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4083         dca_unregister_notify(&dca_notifier);
4084 #endif
4085         pci_unregister_driver(&ixgbe_driver);
4086 }
4087
4088 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4089 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4090                             void *p)
4091 {
4092         int ret_val;
4093
4094         ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
4095                                          __ixgbe_notify_dca);
4096
4097         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4098 }
4099 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
4100
4101 module_exit(ixgbe_exit_module);
4102
4103 /* ixgbe_main.c */