igb: make tx hang check multiqueue, check eop descriptor
[safe/jmp/linux-2.6] / drivers / net / igb / igb_main.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007-2009 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
47 #ifdef CONFIG_IGB_DCA
48 #include <linux/dca.h>
49 #endif
50 #include "igb.h"
51
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56                                 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
58
59 static const struct e1000_info *igb_info_tbl[] = {
60         [board_82575] = &e1000_82575_info,
61 };
62
63 static struct pci_device_id igb_pci_tbl[] = {
64         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
67         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
69         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
70         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
71         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74         /* required last entry */
75         {0, }
76 };
77
78 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
79
80 void igb_reset(struct igb_adapter *);
81 static int igb_setup_all_tx_resources(struct igb_adapter *);
82 static int igb_setup_all_rx_resources(struct igb_adapter *);
83 static void igb_free_all_tx_resources(struct igb_adapter *);
84 static void igb_free_all_rx_resources(struct igb_adapter *);
85 static void igb_setup_mrqc(struct igb_adapter *);
86 void igb_update_stats(struct igb_adapter *);
87 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
88 static void __devexit igb_remove(struct pci_dev *pdev);
89 static int igb_sw_init(struct igb_adapter *);
90 static int igb_open(struct net_device *);
91 static int igb_close(struct net_device *);
92 static void igb_configure_tx(struct igb_adapter *);
93 static void igb_configure_rx(struct igb_adapter *);
94 static void igb_clean_all_tx_rings(struct igb_adapter *);
95 static void igb_clean_all_rx_rings(struct igb_adapter *);
96 static void igb_clean_tx_ring(struct igb_ring *);
97 static void igb_clean_rx_ring(struct igb_ring *);
98 static void igb_set_rx_mode(struct net_device *);
99 static void igb_update_phy_info(unsigned long);
100 static void igb_watchdog(unsigned long);
101 static void igb_watchdog_task(struct work_struct *);
102 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
103 static struct net_device_stats *igb_get_stats(struct net_device *);
104 static int igb_change_mtu(struct net_device *, int);
105 static int igb_set_mac(struct net_device *, void *);
106 static void igb_set_uta(struct igb_adapter *adapter);
107 static irqreturn_t igb_intr(int irq, void *);
108 static irqreturn_t igb_intr_msi(int irq, void *);
109 static irqreturn_t igb_msix_other(int irq, void *);
110 static irqreturn_t igb_msix_ring(int irq, void *);
111 #ifdef CONFIG_IGB_DCA
112 static void igb_update_dca(struct igb_q_vector *);
113 static void igb_setup_dca(struct igb_adapter *);
114 #endif /* CONFIG_IGB_DCA */
115 static bool igb_clean_tx_irq(struct igb_q_vector *);
116 static int igb_poll(struct napi_struct *, int);
117 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
118 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
119 static void igb_tx_timeout(struct net_device *);
120 static void igb_reset_task(struct work_struct *);
121 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122 static void igb_vlan_rx_add_vid(struct net_device *, u16);
123 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124 static void igb_restore_vlan(struct igb_adapter *);
125 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
126 static void igb_ping_all_vfs(struct igb_adapter *);
127 static void igb_msg_task(struct igb_adapter *);
128 static void igb_vmm_control(struct igb_adapter *);
129 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
130 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
131
132 #ifdef CONFIG_PM
133 static int igb_suspend(struct pci_dev *, pm_message_t);
134 static int igb_resume(struct pci_dev *);
135 #endif
136 static void igb_shutdown(struct pci_dev *);
137 #ifdef CONFIG_IGB_DCA
138 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
139 static struct notifier_block dca_notifier = {
140         .notifier_call  = igb_notify_dca,
141         .next           = NULL,
142         .priority       = 0
143 };
144 #endif
145 #ifdef CONFIG_NET_POLL_CONTROLLER
146 /* for netdump / net console */
147 static void igb_netpoll(struct net_device *);
148 #endif
149 #ifdef CONFIG_PCI_IOV
150 static unsigned int max_vfs = 0;
151 module_param(max_vfs, uint, 0);
152 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
153                  "per physical function");
154 #endif /* CONFIG_PCI_IOV */
155
156 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
157                      pci_channel_state_t);
158 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
159 static void igb_io_resume(struct pci_dev *);
160
161 static struct pci_error_handlers igb_err_handler = {
162         .error_detected = igb_io_error_detected,
163         .slot_reset = igb_io_slot_reset,
164         .resume = igb_io_resume,
165 };
166
167
168 static struct pci_driver igb_driver = {
169         .name     = igb_driver_name,
170         .id_table = igb_pci_tbl,
171         .probe    = igb_probe,
172         .remove   = __devexit_p(igb_remove),
173 #ifdef CONFIG_PM
174         /* Power Managment Hooks */
175         .suspend  = igb_suspend,
176         .resume   = igb_resume,
177 #endif
178         .shutdown = igb_shutdown,
179         .err_handler = &igb_err_handler
180 };
181
182 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
183 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
184 MODULE_LICENSE("GPL");
185 MODULE_VERSION(DRV_VERSION);
186
187 /**
188  * igb_read_clock - read raw cycle counter (to be used by time counter)
189  */
190 static cycle_t igb_read_clock(const struct cyclecounter *tc)
191 {
192         struct igb_adapter *adapter =
193                 container_of(tc, struct igb_adapter, cycles);
194         struct e1000_hw *hw = &adapter->hw;
195         u64 stamp = 0;
196         int shift = 0;
197
198         stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199         stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
200         return stamp;
201 }
202
203 #ifdef DEBUG
204 /**
205  * igb_get_hw_dev_name - return device name string
206  * used by hardware layer to print debugging information
207  **/
208 char *igb_get_hw_dev_name(struct e1000_hw *hw)
209 {
210         struct igb_adapter *adapter = hw->back;
211         return adapter->netdev->name;
212 }
213
214 /**
215  * igb_get_time_str - format current NIC and system time as string
216  */
217 static char *igb_get_time_str(struct igb_adapter *adapter,
218                               char buffer[160])
219 {
220         cycle_t hw = adapter->cycles.read(&adapter->cycles);
221         struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
222         struct timespec sys;
223         struct timespec delta;
224         getnstimeofday(&sys);
225
226         delta = timespec_sub(nic, sys);
227
228         sprintf(buffer,
229                 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
230                 hw,
231                 (long)nic.tv_sec, nic.tv_nsec,
232                 (long)sys.tv_sec, sys.tv_nsec,
233                 (long)delta.tv_sec, delta.tv_nsec);
234
235         return buffer;
236 }
237 #endif
238
239 /**
240  * igb_init_module - Driver Registration Routine
241  *
242  * igb_init_module is the first routine called when the driver is
243  * loaded. All it does is register with the PCI subsystem.
244  **/
245 static int __init igb_init_module(void)
246 {
247         int ret;
248         printk(KERN_INFO "%s - version %s\n",
249                igb_driver_string, igb_driver_version);
250
251         printk(KERN_INFO "%s\n", igb_copyright);
252
253 #ifdef CONFIG_IGB_DCA
254         dca_register_notify(&dca_notifier);
255 #endif
256         ret = pci_register_driver(&igb_driver);
257         return ret;
258 }
259
260 module_init(igb_init_module);
261
262 /**
263  * igb_exit_module - Driver Exit Cleanup Routine
264  *
265  * igb_exit_module is called just before the driver is removed
266  * from memory.
267  **/
268 static void __exit igb_exit_module(void)
269 {
270 #ifdef CONFIG_IGB_DCA
271         dca_unregister_notify(&dca_notifier);
272 #endif
273         pci_unregister_driver(&igb_driver);
274 }
275
276 module_exit(igb_exit_module);
277
278 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
279 /**
280  * igb_cache_ring_register - Descriptor ring to register mapping
281  * @adapter: board private structure to initialize
282  *
283  * Once we know the feature-set enabled for the device, we'll cache
284  * the register offset the descriptor ring is assigned to.
285  **/
286 static void igb_cache_ring_register(struct igb_adapter *adapter)
287 {
288         int i;
289         u32 rbase_offset = adapter->vfs_allocated_count;
290
291         switch (adapter->hw.mac.type) {
292         case e1000_82576:
293                 /* The queues are allocated for virtualization such that VF 0
294                  * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
295                  * In order to avoid collision we start at the first free queue
296                  * and continue consuming queues in the same sequence
297                  */
298                 for (i = 0; i < adapter->num_rx_queues; i++)
299                         adapter->rx_ring[i].reg_idx = rbase_offset +
300                                                       Q_IDX_82576(i);
301                 for (i = 0; i < adapter->num_tx_queues; i++)
302                         adapter->tx_ring[i].reg_idx = rbase_offset +
303                                                       Q_IDX_82576(i);
304                 break;
305         case e1000_82575:
306         default:
307                 for (i = 0; i < adapter->num_rx_queues; i++)
308                         adapter->rx_ring[i].reg_idx = i;
309                 for (i = 0; i < adapter->num_tx_queues; i++)
310                         adapter->tx_ring[i].reg_idx = i;
311                 break;
312         }
313 }
314
315 static void igb_free_queues(struct igb_adapter *adapter)
316 {
317         kfree(adapter->tx_ring);
318         kfree(adapter->rx_ring);
319
320         adapter->tx_ring = NULL;
321         adapter->rx_ring = NULL;
322
323         adapter->num_rx_queues = 0;
324         adapter->num_tx_queues = 0;
325 }
326
327 /**
328  * igb_alloc_queues - Allocate memory for all rings
329  * @adapter: board private structure to initialize
330  *
331  * We allocate one ring per queue at run-time since we don't know the
332  * number of queues at compile-time.
333  **/
334 static int igb_alloc_queues(struct igb_adapter *adapter)
335 {
336         int i;
337
338         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
339                                    sizeof(struct igb_ring), GFP_KERNEL);
340         if (!adapter->tx_ring)
341                 goto err;
342
343         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
344                                    sizeof(struct igb_ring), GFP_KERNEL);
345         if (!adapter->rx_ring)
346                 goto err;
347
348         for (i = 0; i < adapter->num_tx_queues; i++) {
349                 struct igb_ring *ring = &(adapter->tx_ring[i]);
350                 ring->count = adapter->tx_ring_count;
351                 ring->queue_index = i;
352                 ring->pdev = adapter->pdev;
353                 ring->netdev = adapter->netdev;
354                 /* For 82575, context index must be unique per ring. */
355                 if (adapter->hw.mac.type == e1000_82575)
356                         ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
357         }
358
359         for (i = 0; i < adapter->num_rx_queues; i++) {
360                 struct igb_ring *ring = &(adapter->rx_ring[i]);
361                 ring->count = adapter->rx_ring_count;
362                 ring->queue_index = i;
363                 ring->pdev = adapter->pdev;
364                 ring->netdev = adapter->netdev;
365                 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
366                 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
367                 /* set flag indicating ring supports SCTP checksum offload */
368                 if (adapter->hw.mac.type >= e1000_82576)
369                         ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
370         }
371
372         igb_cache_ring_register(adapter);
373
374         return 0;
375
376 err:
377         igb_free_queues(adapter);
378
379         return -ENOMEM;
380 }
381
382 #define IGB_N0_QUEUE -1
383 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
384 {
385         u32 msixbm = 0;
386         struct igb_adapter *adapter = q_vector->adapter;
387         struct e1000_hw *hw = &adapter->hw;
388         u32 ivar, index;
389         int rx_queue = IGB_N0_QUEUE;
390         int tx_queue = IGB_N0_QUEUE;
391
392         if (q_vector->rx_ring)
393                 rx_queue = q_vector->rx_ring->reg_idx;
394         if (q_vector->tx_ring)
395                 tx_queue = q_vector->tx_ring->reg_idx;
396
397         switch (hw->mac.type) {
398         case e1000_82575:
399                 /* The 82575 assigns vectors using a bitmask, which matches the
400                    bitmask for the EICR/EIMS/EIMC registers.  To assign one
401                    or more queues to a vector, we write the appropriate bits
402                    into the MSIXBM register for that vector. */
403                 if (rx_queue > IGB_N0_QUEUE)
404                         msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
405                 if (tx_queue > IGB_N0_QUEUE)
406                         msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
407                 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
408                 q_vector->eims_value = msixbm;
409                 break;
410         case e1000_82576:
411                 /* 82576 uses a table-based method for assigning vectors.
412                    Each queue has a single entry in the table to which we write
413                    a vector number along with a "valid" bit.  Sadly, the layout
414                    of the table is somewhat counterintuitive. */
415                 if (rx_queue > IGB_N0_QUEUE) {
416                         index = (rx_queue & 0x7);
417                         ivar = array_rd32(E1000_IVAR0, index);
418                         if (rx_queue < 8) {
419                                 /* vector goes into low byte of register */
420                                 ivar = ivar & 0xFFFFFF00;
421                                 ivar |= msix_vector | E1000_IVAR_VALID;
422                         } else {
423                                 /* vector goes into third byte of register */
424                                 ivar = ivar & 0xFF00FFFF;
425                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
426                         }
427                         array_wr32(E1000_IVAR0, index, ivar);
428                 }
429                 if (tx_queue > IGB_N0_QUEUE) {
430                         index = (tx_queue & 0x7);
431                         ivar = array_rd32(E1000_IVAR0, index);
432                         if (tx_queue < 8) {
433                                 /* vector goes into second byte of register */
434                                 ivar = ivar & 0xFFFF00FF;
435                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
436                         } else {
437                                 /* vector goes into high byte of register */
438                                 ivar = ivar & 0x00FFFFFF;
439                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
440                         }
441                         array_wr32(E1000_IVAR0, index, ivar);
442                 }
443                 q_vector->eims_value = 1 << msix_vector;
444                 break;
445         default:
446                 BUG();
447                 break;
448         }
449 }
450
451 /**
452  * igb_configure_msix - Configure MSI-X hardware
453  *
454  * igb_configure_msix sets up the hardware to properly
455  * generate MSI-X interrupts.
456  **/
457 static void igb_configure_msix(struct igb_adapter *adapter)
458 {
459         u32 tmp;
460         int i, vector = 0;
461         struct e1000_hw *hw = &adapter->hw;
462
463         adapter->eims_enable_mask = 0;
464
465         /* set vector for other causes, i.e. link changes */
466         switch (hw->mac.type) {
467         case e1000_82575:
468                 tmp = rd32(E1000_CTRL_EXT);
469                 /* enable MSI-X PBA support*/
470                 tmp |= E1000_CTRL_EXT_PBA_CLR;
471
472                 /* Auto-Mask interrupts upon ICR read. */
473                 tmp |= E1000_CTRL_EXT_EIAME;
474                 tmp |= E1000_CTRL_EXT_IRCA;
475
476                 wr32(E1000_CTRL_EXT, tmp);
477
478                 /* enable msix_other interrupt */
479                 array_wr32(E1000_MSIXBM(0), vector++,
480                                       E1000_EIMS_OTHER);
481                 adapter->eims_other = E1000_EIMS_OTHER;
482
483                 break;
484
485         case e1000_82576:
486                 /* Turn on MSI-X capability first, or our settings
487                  * won't stick.  And it will take days to debug. */
488                 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
489                                 E1000_GPIE_PBA | E1000_GPIE_EIAME |
490                                 E1000_GPIE_NSICR);
491
492                 /* enable msix_other interrupt */
493                 adapter->eims_other = 1 << vector;
494                 tmp = (vector++ | E1000_IVAR_VALID) << 8;
495
496                 wr32(E1000_IVAR_MISC, tmp);
497                 break;
498         default:
499                 /* do nothing, since nothing else supports MSI-X */
500                 break;
501         } /* switch (hw->mac.type) */
502
503         adapter->eims_enable_mask |= adapter->eims_other;
504
505         for (i = 0; i < adapter->num_q_vectors; i++) {
506                 struct igb_q_vector *q_vector = adapter->q_vector[i];
507                 igb_assign_vector(q_vector, vector++);
508                 adapter->eims_enable_mask |= q_vector->eims_value;
509         }
510
511         wrfl();
512 }
513
514 /**
515  * igb_request_msix - Initialize MSI-X interrupts
516  *
517  * igb_request_msix allocates MSI-X vectors and requests interrupts from the
518  * kernel.
519  **/
520 static int igb_request_msix(struct igb_adapter *adapter)
521 {
522         struct net_device *netdev = adapter->netdev;
523         struct e1000_hw *hw = &adapter->hw;
524         int i, err = 0, vector = 0;
525
526         err = request_irq(adapter->msix_entries[vector].vector,
527                           &igb_msix_other, 0, netdev->name, adapter);
528         if (err)
529                 goto out;
530         vector++;
531
532         for (i = 0; i < adapter->num_q_vectors; i++) {
533                 struct igb_q_vector *q_vector = adapter->q_vector[i];
534
535                 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
536
537                 if (q_vector->rx_ring && q_vector->tx_ring)
538                         sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
539                                 q_vector->rx_ring->queue_index);
540                 else if (q_vector->tx_ring)
541                         sprintf(q_vector->name, "%s-tx-%u", netdev->name,
542                                 q_vector->tx_ring->queue_index);
543                 else if (q_vector->rx_ring)
544                         sprintf(q_vector->name, "%s-rx-%u", netdev->name,
545                                 q_vector->rx_ring->queue_index);
546                 else
547                         sprintf(q_vector->name, "%s-unused", netdev->name);
548
549                 err = request_irq(adapter->msix_entries[vector].vector,
550                                   &igb_msix_ring, 0, q_vector->name,
551                                   q_vector);
552                 if (err)
553                         goto out;
554                 vector++;
555         }
556
557         igb_configure_msix(adapter);
558         return 0;
559 out:
560         return err;
561 }
562
563 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
564 {
565         if (adapter->msix_entries) {
566                 pci_disable_msix(adapter->pdev);
567                 kfree(adapter->msix_entries);
568                 adapter->msix_entries = NULL;
569         } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
570                 pci_disable_msi(adapter->pdev);
571         }
572 }
573
574 /**
575  * igb_free_q_vectors - Free memory allocated for interrupt vectors
576  * @adapter: board private structure to initialize
577  *
578  * This function frees the memory allocated to the q_vectors.  In addition if
579  * NAPI is enabled it will delete any references to the NAPI struct prior
580  * to freeing the q_vector.
581  **/
582 static void igb_free_q_vectors(struct igb_adapter *adapter)
583 {
584         int v_idx;
585
586         for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
587                 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
588                 adapter->q_vector[v_idx] = NULL;
589                 netif_napi_del(&q_vector->napi);
590                 kfree(q_vector);
591         }
592         adapter->num_q_vectors = 0;
593 }
594
595 /**
596  * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
597  *
598  * This function resets the device so that it has 0 rx queues, tx queues, and
599  * MSI-X interrupts allocated.
600  */
601 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
602 {
603         igb_free_queues(adapter);
604         igb_free_q_vectors(adapter);
605         igb_reset_interrupt_capability(adapter);
606 }
607
608 /**
609  * igb_set_interrupt_capability - set MSI or MSI-X if supported
610  *
611  * Attempt to configure interrupts using the best available
612  * capabilities of the hardware and kernel.
613  **/
614 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
615 {
616         int err;
617         int numvecs, i;
618
619         /* Number of supported queues. */
620         adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
621         adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
622
623         /* start with one vector for every rx queue */
624         numvecs = adapter->num_rx_queues;
625
626         /* if tx handler is seperate add 1 for every tx queue */
627         numvecs += adapter->num_tx_queues;
628
629         /* store the number of vectors reserved for queues */
630         adapter->num_q_vectors = numvecs;
631
632         /* add 1 vector for link status interrupts */
633         numvecs++;
634         adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
635                                         GFP_KERNEL);
636         if (!adapter->msix_entries)
637                 goto msi_only;
638
639         for (i = 0; i < numvecs; i++)
640                 adapter->msix_entries[i].entry = i;
641
642         err = pci_enable_msix(adapter->pdev,
643                               adapter->msix_entries,
644                               numvecs);
645         if (err == 0)
646                 goto out;
647
648         igb_reset_interrupt_capability(adapter);
649
650         /* If we can't do MSI-X, try MSI */
651 msi_only:
652 #ifdef CONFIG_PCI_IOV
653         /* disable SR-IOV for non MSI-X configurations */
654         if (adapter->vf_data) {
655                 struct e1000_hw *hw = &adapter->hw;
656                 /* disable iov and allow time for transactions to clear */
657                 pci_disable_sriov(adapter->pdev);
658                 msleep(500);
659
660                 kfree(adapter->vf_data);
661                 adapter->vf_data = NULL;
662                 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
663                 msleep(100);
664                 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
665         }
666 #endif
667         adapter->vfs_allocated_count = 0;
668         adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
669         adapter->num_rx_queues = 1;
670         adapter->num_tx_queues = 1;
671         adapter->num_q_vectors = 1;
672         if (!pci_enable_msi(adapter->pdev))
673                 adapter->flags |= IGB_FLAG_HAS_MSI;
674 out:
675         /* Notify the stack of the (possibly) reduced Tx Queue count. */
676         adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
677         return;
678 }
679
680 /**
681  * igb_alloc_q_vectors - Allocate memory for interrupt vectors
682  * @adapter: board private structure to initialize
683  *
684  * We allocate one q_vector per queue interrupt.  If allocation fails we
685  * return -ENOMEM.
686  **/
687 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
688 {
689         struct igb_q_vector *q_vector;
690         struct e1000_hw *hw = &adapter->hw;
691         int v_idx;
692
693         for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
694                 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
695                 if (!q_vector)
696                         goto err_out;
697                 q_vector->adapter = adapter;
698                 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
699                 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
700                 q_vector->itr_val = IGB_START_ITR;
701                 q_vector->set_itr = 1;
702                 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
703                 adapter->q_vector[v_idx] = q_vector;
704         }
705         return 0;
706
707 err_out:
708         while (v_idx) {
709                 v_idx--;
710                 q_vector = adapter->q_vector[v_idx];
711                 netif_napi_del(&q_vector->napi);
712                 kfree(q_vector);
713                 adapter->q_vector[v_idx] = NULL;
714         }
715         return -ENOMEM;
716 }
717
718 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
719                                       int ring_idx, int v_idx)
720 {
721         struct igb_q_vector *q_vector;
722
723         q_vector = adapter->q_vector[v_idx];
724         q_vector->rx_ring = &adapter->rx_ring[ring_idx];
725         q_vector->rx_ring->q_vector = q_vector;
726         q_vector->itr_val = adapter->rx_itr_setting;
727         if (q_vector->itr_val && q_vector->itr_val <= 3)
728                 q_vector->itr_val = IGB_START_ITR;
729 }
730
731 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
732                                       int ring_idx, int v_idx)
733 {
734         struct igb_q_vector *q_vector;
735
736         q_vector = adapter->q_vector[v_idx];
737         q_vector->tx_ring = &adapter->tx_ring[ring_idx];
738         q_vector->tx_ring->q_vector = q_vector;
739         q_vector->itr_val = adapter->tx_itr_setting;
740         if (q_vector->itr_val && q_vector->itr_val <= 3)
741                 q_vector->itr_val = IGB_START_ITR;
742 }
743
744 /**
745  * igb_map_ring_to_vector - maps allocated queues to vectors
746  *
747  * This function maps the recently allocated queues to vectors.
748  **/
749 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
750 {
751         int i;
752         int v_idx = 0;
753
754         if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
755             (adapter->num_q_vectors < adapter->num_tx_queues))
756                 return -ENOMEM;
757
758         if (adapter->num_q_vectors >=
759             (adapter->num_rx_queues + adapter->num_tx_queues)) {
760                 for (i = 0; i < adapter->num_rx_queues; i++)
761                         igb_map_rx_ring_to_vector(adapter, i, v_idx++);
762                 for (i = 0; i < adapter->num_tx_queues; i++)
763                         igb_map_tx_ring_to_vector(adapter, i, v_idx++);
764         } else {
765                 for (i = 0; i < adapter->num_rx_queues; i++) {
766                         if (i < adapter->num_tx_queues)
767                                 igb_map_tx_ring_to_vector(adapter, i, v_idx);
768                         igb_map_rx_ring_to_vector(adapter, i, v_idx++);
769                 }
770                 for (; i < adapter->num_tx_queues; i++)
771                         igb_map_tx_ring_to_vector(adapter, i, v_idx++);
772         }
773         return 0;
774 }
775
776 /**
777  * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
778  *
779  * This function initializes the interrupts and allocates all of the queues.
780  **/
781 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
782 {
783         struct pci_dev *pdev = adapter->pdev;
784         int err;
785
786         igb_set_interrupt_capability(adapter);
787
788         err = igb_alloc_q_vectors(adapter);
789         if (err) {
790                 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
791                 goto err_alloc_q_vectors;
792         }
793
794         err = igb_alloc_queues(adapter);
795         if (err) {
796                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
797                 goto err_alloc_queues;
798         }
799
800         err = igb_map_ring_to_vector(adapter);
801         if (err) {
802                 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
803                 goto err_map_queues;
804         }
805
806
807         return 0;
808 err_map_queues:
809         igb_free_queues(adapter);
810 err_alloc_queues:
811         igb_free_q_vectors(adapter);
812 err_alloc_q_vectors:
813         igb_reset_interrupt_capability(adapter);
814         return err;
815 }
816
817 /**
818  * igb_request_irq - initialize interrupts
819  *
820  * Attempts to configure interrupts using the best available
821  * capabilities of the hardware and kernel.
822  **/
823 static int igb_request_irq(struct igb_adapter *adapter)
824 {
825         struct net_device *netdev = adapter->netdev;
826         struct pci_dev *pdev = adapter->pdev;
827         struct e1000_hw *hw = &adapter->hw;
828         int err = 0;
829
830         if (adapter->msix_entries) {
831                 err = igb_request_msix(adapter);
832                 if (!err)
833                         goto request_done;
834                 /* fall back to MSI */
835                 igb_clear_interrupt_scheme(adapter);
836                 if (!pci_enable_msi(adapter->pdev))
837                         adapter->flags |= IGB_FLAG_HAS_MSI;
838                 igb_free_all_tx_resources(adapter);
839                 igb_free_all_rx_resources(adapter);
840                 adapter->num_tx_queues = 1;
841                 adapter->num_rx_queues = 1;
842                 adapter->num_q_vectors = 1;
843                 err = igb_alloc_q_vectors(adapter);
844                 if (err) {
845                         dev_err(&pdev->dev,
846                                 "Unable to allocate memory for vectors\n");
847                         goto request_done;
848                 }
849                 err = igb_alloc_queues(adapter);
850                 if (err) {
851                         dev_err(&pdev->dev,
852                                 "Unable to allocate memory for queues\n");
853                         igb_free_q_vectors(adapter);
854                         goto request_done;
855                 }
856                 igb_setup_all_tx_resources(adapter);
857                 igb_setup_all_rx_resources(adapter);
858         } else {
859                 switch (hw->mac.type) {
860                 case e1000_82575:
861                         wr32(E1000_MSIXBM(0),
862                              (E1000_EICR_RX_QUEUE0 |
863                               E1000_EICR_TX_QUEUE0 |
864                               E1000_EIMS_OTHER));
865                         break;
866                 case e1000_82576:
867                         wr32(E1000_IVAR0, E1000_IVAR_VALID);
868                         break;
869                 default:
870                         break;
871                 }
872         }
873
874         if (adapter->flags & IGB_FLAG_HAS_MSI) {
875                 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
876                                   netdev->name, adapter);
877                 if (!err)
878                         goto request_done;
879
880                 /* fall back to legacy interrupts */
881                 igb_reset_interrupt_capability(adapter);
882                 adapter->flags &= ~IGB_FLAG_HAS_MSI;
883         }
884
885         err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
886                           netdev->name, adapter);
887
888         if (err)
889                 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
890                         err);
891
892 request_done:
893         return err;
894 }
895
896 static void igb_free_irq(struct igb_adapter *adapter)
897 {
898         if (adapter->msix_entries) {
899                 int vector = 0, i;
900
901                 free_irq(adapter->msix_entries[vector++].vector, adapter);
902
903                 for (i = 0; i < adapter->num_q_vectors; i++) {
904                         struct igb_q_vector *q_vector = adapter->q_vector[i];
905                         free_irq(adapter->msix_entries[vector++].vector,
906                                  q_vector);
907                 }
908         } else {
909                 free_irq(adapter->pdev->irq, adapter);
910         }
911 }
912
913 /**
914  * igb_irq_disable - Mask off interrupt generation on the NIC
915  * @adapter: board private structure
916  **/
917 static void igb_irq_disable(struct igb_adapter *adapter)
918 {
919         struct e1000_hw *hw = &adapter->hw;
920
921         if (adapter->msix_entries) {
922                 u32 regval = rd32(E1000_EIAM);
923                 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
924                 wr32(E1000_EIMC, adapter->eims_enable_mask);
925                 regval = rd32(E1000_EIAC);
926                 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
927         }
928
929         wr32(E1000_IAM, 0);
930         wr32(E1000_IMC, ~0);
931         wrfl();
932         synchronize_irq(adapter->pdev->irq);
933 }
934
935 /**
936  * igb_irq_enable - Enable default interrupt generation settings
937  * @adapter: board private structure
938  **/
939 static void igb_irq_enable(struct igb_adapter *adapter)
940 {
941         struct e1000_hw *hw = &adapter->hw;
942
943         if (adapter->msix_entries) {
944                 u32 regval = rd32(E1000_EIAC);
945                 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
946                 regval = rd32(E1000_EIAM);
947                 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
948                 wr32(E1000_EIMS, adapter->eims_enable_mask);
949                 if (adapter->vfs_allocated_count)
950                         wr32(E1000_MBVFIMR, 0xFF);
951                 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
952                                  E1000_IMS_DOUTSYNC));
953         } else {
954                 wr32(E1000_IMS, IMS_ENABLE_MASK);
955                 wr32(E1000_IAM, IMS_ENABLE_MASK);
956         }
957 }
958
959 static void igb_update_mng_vlan(struct igb_adapter *adapter)
960 {
961         struct e1000_hw *hw = &adapter->hw;
962         u16 vid = adapter->hw.mng_cookie.vlan_id;
963         u16 old_vid = adapter->mng_vlan_id;
964
965         if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
966                 /* add VID to filter table */
967                 igb_vfta_set(hw, vid, true);
968                 adapter->mng_vlan_id = vid;
969         } else {
970                 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
971         }
972
973         if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
974             (vid != old_vid) &&
975             !vlan_group_get_device(adapter->vlgrp, old_vid)) {
976                 /* remove VID from filter table */
977                 igb_vfta_set(hw, old_vid, false);
978         }
979 }
980
981 /**
982  * igb_release_hw_control - release control of the h/w to f/w
983  * @adapter: address of board private structure
984  *
985  * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
986  * For ASF and Pass Through versions of f/w this means that the
987  * driver is no longer loaded.
988  *
989  **/
990 static void igb_release_hw_control(struct igb_adapter *adapter)
991 {
992         struct e1000_hw *hw = &adapter->hw;
993         u32 ctrl_ext;
994
995         /* Let firmware take over control of h/w */
996         ctrl_ext = rd32(E1000_CTRL_EXT);
997         wr32(E1000_CTRL_EXT,
998                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
999 }
1000
1001
1002 /**
1003  * igb_get_hw_control - get control of the h/w from f/w
1004  * @adapter: address of board private structure
1005  *
1006  * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1007  * For ASF and Pass Through versions of f/w this means that
1008  * the driver is loaded.
1009  *
1010  **/
1011 static void igb_get_hw_control(struct igb_adapter *adapter)
1012 {
1013         struct e1000_hw *hw = &adapter->hw;
1014         u32 ctrl_ext;
1015
1016         /* Let firmware know the driver has taken over */
1017         ctrl_ext = rd32(E1000_CTRL_EXT);
1018         wr32(E1000_CTRL_EXT,
1019                         ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1020 }
1021
1022 /**
1023  * igb_configure - configure the hardware for RX and TX
1024  * @adapter: private board structure
1025  **/
1026 static void igb_configure(struct igb_adapter *adapter)
1027 {
1028         struct net_device *netdev = adapter->netdev;
1029         int i;
1030
1031         igb_get_hw_control(adapter);
1032         igb_set_rx_mode(netdev);
1033
1034         igb_restore_vlan(adapter);
1035
1036         igb_setup_tctl(adapter);
1037         igb_setup_mrqc(adapter);
1038         igb_setup_rctl(adapter);
1039
1040         igb_configure_tx(adapter);
1041         igb_configure_rx(adapter);
1042
1043         igb_rx_fifo_flush_82575(&adapter->hw);
1044
1045         /* call igb_desc_unused which always leaves
1046          * at least 1 descriptor unused to make sure
1047          * next_to_use != next_to_clean */
1048         for (i = 0; i < adapter->num_rx_queues; i++) {
1049                 struct igb_ring *ring = &adapter->rx_ring[i];
1050                 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1051         }
1052
1053
1054         adapter->tx_queue_len = netdev->tx_queue_len;
1055 }
1056
1057
1058 /**
1059  * igb_up - Open the interface and prepare it to handle traffic
1060  * @adapter: board private structure
1061  **/
1062
1063 int igb_up(struct igb_adapter *adapter)
1064 {
1065         struct e1000_hw *hw = &adapter->hw;
1066         int i;
1067
1068         /* hardware has been reset, we need to reload some things */
1069         igb_configure(adapter);
1070
1071         clear_bit(__IGB_DOWN, &adapter->state);
1072
1073         for (i = 0; i < adapter->num_q_vectors; i++) {
1074                 struct igb_q_vector *q_vector = adapter->q_vector[i];
1075                 napi_enable(&q_vector->napi);
1076         }
1077         if (adapter->msix_entries)
1078                 igb_configure_msix(adapter);
1079
1080         /* Clear any pending interrupts. */
1081         rd32(E1000_ICR);
1082         igb_irq_enable(adapter);
1083
1084         /* notify VFs that reset has been completed */
1085         if (adapter->vfs_allocated_count) {
1086                 u32 reg_data = rd32(E1000_CTRL_EXT);
1087                 reg_data |= E1000_CTRL_EXT_PFRSTD;
1088                 wr32(E1000_CTRL_EXT, reg_data);
1089         }
1090
1091         netif_tx_start_all_queues(adapter->netdev);
1092
1093         /* Fire a link change interrupt to start the watchdog. */
1094         wr32(E1000_ICS, E1000_ICS_LSC);
1095         return 0;
1096 }
1097
1098 void igb_down(struct igb_adapter *adapter)
1099 {
1100         struct e1000_hw *hw = &adapter->hw;
1101         struct net_device *netdev = adapter->netdev;
1102         u32 tctl, rctl;
1103         int i;
1104
1105         /* signal that we're down so the interrupt handler does not
1106          * reschedule our watchdog timer */
1107         set_bit(__IGB_DOWN, &adapter->state);
1108
1109         /* disable receives in the hardware */
1110         rctl = rd32(E1000_RCTL);
1111         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1112         /* flush and sleep below */
1113
1114         netif_tx_stop_all_queues(netdev);
1115
1116         /* disable transmits in the hardware */
1117         tctl = rd32(E1000_TCTL);
1118         tctl &= ~E1000_TCTL_EN;
1119         wr32(E1000_TCTL, tctl);
1120         /* flush both disables and wait for them to finish */
1121         wrfl();
1122         msleep(10);
1123
1124         for (i = 0; i < adapter->num_q_vectors; i++) {
1125                 struct igb_q_vector *q_vector = adapter->q_vector[i];
1126                 napi_disable(&q_vector->napi);
1127         }
1128
1129         igb_irq_disable(adapter);
1130
1131         del_timer_sync(&adapter->watchdog_timer);
1132         del_timer_sync(&adapter->phy_info_timer);
1133
1134         netdev->tx_queue_len = adapter->tx_queue_len;
1135         netif_carrier_off(netdev);
1136
1137         /* record the stats before reset*/
1138         igb_update_stats(adapter);
1139
1140         adapter->link_speed = 0;
1141         adapter->link_duplex = 0;
1142
1143         if (!pci_channel_offline(adapter->pdev))
1144                 igb_reset(adapter);
1145         igb_clean_all_tx_rings(adapter);
1146         igb_clean_all_rx_rings(adapter);
1147 #ifdef CONFIG_IGB_DCA
1148
1149         /* since we reset the hardware DCA settings were cleared */
1150         igb_setup_dca(adapter);
1151 #endif
1152 }
1153
1154 void igb_reinit_locked(struct igb_adapter *adapter)
1155 {
1156         WARN_ON(in_interrupt());
1157         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1158                 msleep(1);
1159         igb_down(adapter);
1160         igb_up(adapter);
1161         clear_bit(__IGB_RESETTING, &adapter->state);
1162 }
1163
1164 void igb_reset(struct igb_adapter *adapter)
1165 {
1166         struct e1000_hw *hw = &adapter->hw;
1167         struct e1000_mac_info *mac = &hw->mac;
1168         struct e1000_fc_info *fc = &hw->fc;
1169         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1170         u16 hwm;
1171
1172         /* Repartition Pba for greater than 9k mtu
1173          * To take effect CTRL.RST is required.
1174          */
1175         switch (mac->type) {
1176         case e1000_82576:
1177                 pba = rd32(E1000_RXPBS);
1178                 pba &= E1000_RXPBS_SIZE_MASK_82576;
1179                 break;
1180         case e1000_82575:
1181         default:
1182                 pba = E1000_PBA_34K;
1183                 break;
1184         }
1185
1186         if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1187             (mac->type < e1000_82576)) {
1188                 /* adjust PBA for jumbo frames */
1189                 wr32(E1000_PBA, pba);
1190
1191                 /* To maintain wire speed transmits, the Tx FIFO should be
1192                  * large enough to accommodate two full transmit packets,
1193                  * rounded up to the next 1KB and expressed in KB.  Likewise,
1194                  * the Rx FIFO should be large enough to accommodate at least
1195                  * one full receive packet and is similarly rounded up and
1196                  * expressed in KB. */
1197                 pba = rd32(E1000_PBA);
1198                 /* upper 16 bits has Tx packet buffer allocation size in KB */
1199                 tx_space = pba >> 16;
1200                 /* lower 16 bits has Rx packet buffer allocation size in KB */
1201                 pba &= 0xffff;
1202                 /* the tx fifo also stores 16 bytes of information about the tx
1203                  * but don't include ethernet FCS because hardware appends it */
1204                 min_tx_space = (adapter->max_frame_size +
1205                                 sizeof(union e1000_adv_tx_desc) -
1206                                 ETH_FCS_LEN) * 2;
1207                 min_tx_space = ALIGN(min_tx_space, 1024);
1208                 min_tx_space >>= 10;
1209                 /* software strips receive CRC, so leave room for it */
1210                 min_rx_space = adapter->max_frame_size;
1211                 min_rx_space = ALIGN(min_rx_space, 1024);
1212                 min_rx_space >>= 10;
1213
1214                 /* If current Tx allocation is less than the min Tx FIFO size,
1215                  * and the min Tx FIFO size is less than the current Rx FIFO
1216                  * allocation, take space away from current Rx allocation */
1217                 if (tx_space < min_tx_space &&
1218                     ((min_tx_space - tx_space) < pba)) {
1219                         pba = pba - (min_tx_space - tx_space);
1220
1221                         /* if short on rx space, rx wins and must trump tx
1222                          * adjustment */
1223                         if (pba < min_rx_space)
1224                                 pba = min_rx_space;
1225                 }
1226                 wr32(E1000_PBA, pba);
1227         }
1228
1229         /* flow control settings */
1230         /* The high water mark must be low enough to fit one full frame
1231          * (or the size used for early receive) above it in the Rx FIFO.
1232          * Set it to the lower of:
1233          * - 90% of the Rx FIFO size, or
1234          * - the full Rx FIFO size minus one full frame */
1235         hwm = min(((pba << 10) * 9 / 10),
1236                         ((pba << 10) - 2 * adapter->max_frame_size));
1237
1238         if (mac->type < e1000_82576) {
1239                 fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
1240                 fc->low_water = fc->high_water - 8;
1241         } else {
1242                 fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
1243                 fc->low_water = fc->high_water - 16;
1244         }
1245         fc->pause_time = 0xFFFF;
1246         fc->send_xon = 1;
1247         fc->current_mode = fc->requested_mode;
1248
1249         /* disable receive for all VFs and wait one second */
1250         if (adapter->vfs_allocated_count) {
1251                 int i;
1252                 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1253                         adapter->vf_data[i].flags = 0;
1254
1255                 /* ping all the active vfs to let them know we are going down */
1256                 igb_ping_all_vfs(adapter);
1257
1258                 /* disable transmits and receives */
1259                 wr32(E1000_VFRE, 0);
1260                 wr32(E1000_VFTE, 0);
1261         }
1262
1263         /* Allow time for pending master requests to run */
1264         adapter->hw.mac.ops.reset_hw(&adapter->hw);
1265         wr32(E1000_WUC, 0);
1266
1267         if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1268                 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1269
1270         igb_update_mng_vlan(adapter);
1271
1272         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1273         wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1274
1275         igb_reset_adaptive(&adapter->hw);
1276         igb_get_phy_info(&adapter->hw);
1277 }
1278
1279 static const struct net_device_ops igb_netdev_ops = {
1280         .ndo_open               = igb_open,
1281         .ndo_stop               = igb_close,
1282         .ndo_start_xmit         = igb_xmit_frame_adv,
1283         .ndo_get_stats          = igb_get_stats,
1284         .ndo_set_rx_mode        = igb_set_rx_mode,
1285         .ndo_set_multicast_list = igb_set_rx_mode,
1286         .ndo_set_mac_address    = igb_set_mac,
1287         .ndo_change_mtu         = igb_change_mtu,
1288         .ndo_do_ioctl           = igb_ioctl,
1289         .ndo_tx_timeout         = igb_tx_timeout,
1290         .ndo_validate_addr      = eth_validate_addr,
1291         .ndo_vlan_rx_register   = igb_vlan_rx_register,
1292         .ndo_vlan_rx_add_vid    = igb_vlan_rx_add_vid,
1293         .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
1294 #ifdef CONFIG_NET_POLL_CONTROLLER
1295         .ndo_poll_controller    = igb_netpoll,
1296 #endif
1297 };
1298
1299 /**
1300  * igb_probe - Device Initialization Routine
1301  * @pdev: PCI device information struct
1302  * @ent: entry in igb_pci_tbl
1303  *
1304  * Returns 0 on success, negative on failure
1305  *
1306  * igb_probe initializes an adapter identified by a pci_dev structure.
1307  * The OS initialization, configuring of the adapter private structure,
1308  * and a hardware reset occur.
1309  **/
1310 static int __devinit igb_probe(struct pci_dev *pdev,
1311                                const struct pci_device_id *ent)
1312 {
1313         struct net_device *netdev;
1314         struct igb_adapter *adapter;
1315         struct e1000_hw *hw;
1316         u16 eeprom_data = 0;
1317         static int global_quad_port_a; /* global quad port a indication */
1318         const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1319         unsigned long mmio_start, mmio_len;
1320         int err, pci_using_dac;
1321         u16 eeprom_apme_mask = IGB_EEPROM_APME;
1322         u32 part_num;
1323
1324         err = pci_enable_device_mem(pdev);
1325         if (err)
1326                 return err;
1327
1328         pci_using_dac = 0;
1329         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1330         if (!err) {
1331                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1332                 if (!err)
1333                         pci_using_dac = 1;
1334         } else {
1335                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1336                 if (err) {
1337                         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1338                         if (err) {
1339                                 dev_err(&pdev->dev, "No usable DMA "
1340                                         "configuration, aborting\n");
1341                                 goto err_dma;
1342                         }
1343                 }
1344         }
1345
1346         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1347                                            IORESOURCE_MEM),
1348                                            igb_driver_name);
1349         if (err)
1350                 goto err_pci_reg;
1351
1352         pci_enable_pcie_error_reporting(pdev);
1353
1354         pci_set_master(pdev);
1355         pci_save_state(pdev);
1356
1357         err = -ENOMEM;
1358         netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1359                                    IGB_ABS_MAX_TX_QUEUES);
1360         if (!netdev)
1361                 goto err_alloc_etherdev;
1362
1363         SET_NETDEV_DEV(netdev, &pdev->dev);
1364
1365         pci_set_drvdata(pdev, netdev);
1366         adapter = netdev_priv(netdev);
1367         adapter->netdev = netdev;
1368         adapter->pdev = pdev;
1369         hw = &adapter->hw;
1370         hw->back = adapter;
1371         adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1372
1373         mmio_start = pci_resource_start(pdev, 0);
1374         mmio_len = pci_resource_len(pdev, 0);
1375
1376         err = -EIO;
1377         hw->hw_addr = ioremap(mmio_start, mmio_len);
1378         if (!hw->hw_addr)
1379                 goto err_ioremap;
1380
1381         netdev->netdev_ops = &igb_netdev_ops;
1382         igb_set_ethtool_ops(netdev);
1383         netdev->watchdog_timeo = 5 * HZ;
1384
1385         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1386
1387         netdev->mem_start = mmio_start;
1388         netdev->mem_end = mmio_start + mmio_len;
1389
1390         /* PCI config space info */
1391         hw->vendor_id = pdev->vendor;
1392         hw->device_id = pdev->device;
1393         hw->revision_id = pdev->revision;
1394         hw->subsystem_vendor_id = pdev->subsystem_vendor;
1395         hw->subsystem_device_id = pdev->subsystem_device;
1396
1397         /* setup the private structure */
1398         hw->back = adapter;
1399         /* Copy the default MAC, PHY and NVM function pointers */
1400         memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1401         memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1402         memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1403         /* Initialize skew-specific constants */
1404         err = ei->get_invariants(hw);
1405         if (err)
1406                 goto err_sw_init;
1407
1408         /* setup the private structure */
1409         err = igb_sw_init(adapter);
1410         if (err)
1411                 goto err_sw_init;
1412
1413         igb_get_bus_info_pcie(hw);
1414
1415         hw->phy.autoneg_wait_to_complete = false;
1416         hw->mac.adaptive_ifs = true;
1417
1418         /* Copper options */
1419         if (hw->phy.media_type == e1000_media_type_copper) {
1420                 hw->phy.mdix = AUTO_ALL_MODES;
1421                 hw->phy.disable_polarity_correction = false;
1422                 hw->phy.ms_type = e1000_ms_hw_default;
1423         }
1424
1425         if (igb_check_reset_block(hw))
1426                 dev_info(&pdev->dev,
1427                         "PHY reset is blocked due to SOL/IDER session.\n");
1428
1429         netdev->features = NETIF_F_SG |
1430                            NETIF_F_IP_CSUM |
1431                            NETIF_F_HW_VLAN_TX |
1432                            NETIF_F_HW_VLAN_RX |
1433                            NETIF_F_HW_VLAN_FILTER;
1434
1435         netdev->features |= NETIF_F_IPV6_CSUM;
1436         netdev->features |= NETIF_F_TSO;
1437         netdev->features |= NETIF_F_TSO6;
1438
1439         netdev->features |= NETIF_F_GRO;
1440
1441         netdev->vlan_features |= NETIF_F_TSO;
1442         netdev->vlan_features |= NETIF_F_TSO6;
1443         netdev->vlan_features |= NETIF_F_IP_CSUM;
1444         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1445         netdev->vlan_features |= NETIF_F_SG;
1446
1447         if (pci_using_dac)
1448                 netdev->features |= NETIF_F_HIGHDMA;
1449
1450         if (adapter->hw.mac.type == e1000_82576)
1451                 netdev->features |= NETIF_F_SCTP_CSUM;
1452
1453         adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1454
1455         /* before reading the NVM, reset the controller to put the device in a
1456          * known good starting state */
1457         hw->mac.ops.reset_hw(hw);
1458
1459         /* make sure the NVM is good */
1460         if (igb_validate_nvm_checksum(hw) < 0) {
1461                 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1462                 err = -EIO;
1463                 goto err_eeprom;
1464         }
1465
1466         /* copy the MAC address out of the NVM */
1467         if (hw->mac.ops.read_mac_addr(hw))
1468                 dev_err(&pdev->dev, "NVM Read Error\n");
1469
1470         memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1471         memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1472
1473         if (!is_valid_ether_addr(netdev->perm_addr)) {
1474                 dev_err(&pdev->dev, "Invalid MAC Address\n");
1475                 err = -EIO;
1476                 goto err_eeprom;
1477         }
1478
1479         setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1480                     (unsigned long) adapter);
1481         setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1482                     (unsigned long) adapter);
1483
1484         INIT_WORK(&adapter->reset_task, igb_reset_task);
1485         INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1486
1487         /* Initialize link properties that are user-changeable */
1488         adapter->fc_autoneg = true;
1489         hw->mac.autoneg = true;
1490         hw->phy.autoneg_advertised = 0x2f;
1491
1492         hw->fc.requested_mode = e1000_fc_default;
1493         hw->fc.current_mode = e1000_fc_default;
1494
1495         igb_validate_mdi_setting(hw);
1496
1497         /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1498          * enable the ACPI Magic Packet filter
1499          */
1500
1501         if (hw->bus.func == 0)
1502                 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1503         else if (hw->bus.func == 1)
1504                 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1505
1506         if (eeprom_data & eeprom_apme_mask)
1507                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1508
1509         /* now that we have the eeprom settings, apply the special cases where
1510          * the eeprom may be wrong or the board simply won't support wake on
1511          * lan on a particular port */
1512         switch (pdev->device) {
1513         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1514                 adapter->eeprom_wol = 0;
1515                 break;
1516         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1517         case E1000_DEV_ID_82576_FIBER:
1518         case E1000_DEV_ID_82576_SERDES:
1519                 /* Wake events only supported on port A for dual fiber
1520                  * regardless of eeprom setting */
1521                 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1522                         adapter->eeprom_wol = 0;
1523                 break;
1524         case E1000_DEV_ID_82576_QUAD_COPPER:
1525                 /* if quad port adapter, disable WoL on all but port A */
1526                 if (global_quad_port_a != 0)
1527                         adapter->eeprom_wol = 0;
1528                 else
1529                         adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1530                 /* Reset for multiple quad port adapters */
1531                 if (++global_quad_port_a == 4)
1532                         global_quad_port_a = 0;
1533                 break;
1534         }
1535
1536         /* initialize the wol settings based on the eeprom settings */
1537         adapter->wol = adapter->eeprom_wol;
1538         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1539
1540         /* reset the hardware with the new settings */
1541         igb_reset(adapter);
1542
1543         /* let the f/w know that the h/w is now under the control of the
1544          * driver. */
1545         igb_get_hw_control(adapter);
1546
1547         strcpy(netdev->name, "eth%d");
1548         err = register_netdev(netdev);
1549         if (err)
1550                 goto err_register;
1551
1552         /* carrier off reporting is important to ethtool even BEFORE open */
1553         netif_carrier_off(netdev);
1554
1555 #ifdef CONFIG_IGB_DCA
1556         if (dca_add_requester(&pdev->dev) == 0) {
1557                 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1558                 dev_info(&pdev->dev, "DCA enabled\n");
1559                 igb_setup_dca(adapter);
1560         }
1561
1562 #endif
1563
1564         switch (hw->mac.type) {
1565         case e1000_82576:
1566                 /*
1567                  * Initialize hardware timer: we keep it running just in case
1568                  * that some program needs it later on.
1569                  */
1570                 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1571                 adapter->cycles.read = igb_read_clock;
1572                 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1573                 adapter->cycles.mult = 1;
1574                 /**
1575                  * Scale the NIC clock cycle by a large factor so that
1576                  * relatively small clock corrections can be added or
1577                  * substracted at each clock tick. The drawbacks of a large
1578                  * factor are a) that the clock register overflows more quickly
1579                  * (not such a big deal) and b) that the increment per tick has
1580                  * to fit into 24 bits.  As a result we need to use a shift of
1581                  * 19 so we can fit a value of 16 into the TIMINCA register.
1582                  */
1583                 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1584                 wr32(E1000_TIMINCA,
1585                                 (1 << E1000_TIMINCA_16NS_SHIFT) |
1586                                 (16 << IGB_82576_TSYNC_SHIFT));
1587
1588                 /* Set registers so that rollover occurs soon to test this. */
1589                 wr32(E1000_SYSTIML, 0x00000000);
1590                 wr32(E1000_SYSTIMH, 0xFF800000);
1591                 wrfl();
1592
1593                 timecounter_init(&adapter->clock,
1594                                  &adapter->cycles,
1595                                  ktime_to_ns(ktime_get_real()));
1596                 /*
1597                  * Synchronize our NIC clock against system wall clock. NIC
1598                  * time stamp reading requires ~3us per sample, each sample
1599                  * was pretty stable even under load => only require 10
1600                  * samples for each offset comparison.
1601                  */
1602                 memset(&adapter->compare, 0, sizeof(adapter->compare));
1603                 adapter->compare.source = &adapter->clock;
1604                 adapter->compare.target = ktime_get_real;
1605                 adapter->compare.num_samples = 10;
1606                 timecompare_update(&adapter->compare, 0);
1607                 break;
1608         case e1000_82575:
1609                 /* 82575 does not support timesync */
1610         default:
1611                 break;
1612         }
1613
1614         dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1615         /* print bus type/speed/width info */
1616         dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1617                  netdev->name,
1618                  ((hw->bus.speed == e1000_bus_speed_2500)
1619                   ? "2.5Gb/s" : "unknown"),
1620                  ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1621                   (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1622                   (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1623                    "unknown"),
1624                  netdev->dev_addr);
1625
1626         igb_read_part_num(hw, &part_num);
1627         dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1628                 (part_num >> 8), (part_num & 0xff));
1629
1630         dev_info(&pdev->dev,
1631                 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1632                 adapter->msix_entries ? "MSI-X" :
1633                 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1634                 adapter->num_rx_queues, adapter->num_tx_queues);
1635
1636         return 0;
1637
1638 err_register:
1639         igb_release_hw_control(adapter);
1640 err_eeprom:
1641         if (!igb_check_reset_block(hw))
1642                 igb_reset_phy(hw);
1643
1644         if (hw->flash_address)
1645                 iounmap(hw->flash_address);
1646 err_sw_init:
1647         igb_clear_interrupt_scheme(adapter);
1648         iounmap(hw->hw_addr);
1649 err_ioremap:
1650         free_netdev(netdev);
1651 err_alloc_etherdev:
1652         pci_release_selected_regions(pdev, pci_select_bars(pdev,
1653                                      IORESOURCE_MEM));
1654 err_pci_reg:
1655 err_dma:
1656         pci_disable_device(pdev);
1657         return err;
1658 }
1659
1660 /**
1661  * igb_remove - Device Removal Routine
1662  * @pdev: PCI device information struct
1663  *
1664  * igb_remove is called by the PCI subsystem to alert the driver
1665  * that it should release a PCI device.  The could be caused by a
1666  * Hot-Plug event, or because the driver is going to be removed from
1667  * memory.
1668  **/
1669 static void __devexit igb_remove(struct pci_dev *pdev)
1670 {
1671         struct net_device *netdev = pci_get_drvdata(pdev);
1672         struct igb_adapter *adapter = netdev_priv(netdev);
1673         struct e1000_hw *hw = &adapter->hw;
1674
1675         /* flush_scheduled work may reschedule our watchdog task, so
1676          * explicitly disable watchdog tasks from being rescheduled  */
1677         set_bit(__IGB_DOWN, &adapter->state);
1678         del_timer_sync(&adapter->watchdog_timer);
1679         del_timer_sync(&adapter->phy_info_timer);
1680
1681         flush_scheduled_work();
1682
1683 #ifdef CONFIG_IGB_DCA
1684         if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1685                 dev_info(&pdev->dev, "DCA disabled\n");
1686                 dca_remove_requester(&pdev->dev);
1687                 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1688                 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1689         }
1690 #endif
1691
1692         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1693          * would have already happened in close and is redundant. */
1694         igb_release_hw_control(adapter);
1695
1696         unregister_netdev(netdev);
1697
1698         if (!igb_check_reset_block(&adapter->hw))
1699                 igb_reset_phy(&adapter->hw);
1700
1701         igb_clear_interrupt_scheme(adapter);
1702
1703 #ifdef CONFIG_PCI_IOV
1704         /* reclaim resources allocated to VFs */
1705         if (adapter->vf_data) {
1706                 /* disable iov and allow time for transactions to clear */
1707                 pci_disable_sriov(pdev);
1708                 msleep(500);
1709
1710                 kfree(adapter->vf_data);
1711                 adapter->vf_data = NULL;
1712                 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1713                 msleep(100);
1714                 dev_info(&pdev->dev, "IOV Disabled\n");
1715         }
1716 #endif
1717         iounmap(hw->hw_addr);
1718         if (hw->flash_address)
1719                 iounmap(hw->flash_address);
1720         pci_release_selected_regions(pdev, pci_select_bars(pdev,
1721                                      IORESOURCE_MEM));
1722
1723         free_netdev(netdev);
1724
1725         pci_disable_pcie_error_reporting(pdev);
1726
1727         pci_disable_device(pdev);
1728 }
1729
1730 /**
1731  * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1732  * @adapter: board private structure to initialize
1733  *
1734  * This function initializes the vf specific data storage and then attempts to
1735  * allocate the VFs.  The reason for ordering it this way is because it is much
1736  * mor expensive time wise to disable SR-IOV than it is to allocate and free
1737  * the memory for the VFs.
1738  **/
1739 static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1740 {
1741 #ifdef CONFIG_PCI_IOV
1742         struct pci_dev *pdev = adapter->pdev;
1743
1744         if (adapter->vfs_allocated_count > 7)
1745                 adapter->vfs_allocated_count = 7;
1746
1747         if (adapter->vfs_allocated_count) {
1748                 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1749                                            sizeof(struct vf_data_storage),
1750                                            GFP_KERNEL);
1751                 /* if allocation failed then we do not support SR-IOV */
1752                 if (!adapter->vf_data) {
1753                         adapter->vfs_allocated_count = 0;
1754                         dev_err(&pdev->dev, "Unable to allocate memory for VF "
1755                                 "Data Storage\n");
1756                 }
1757         }
1758
1759         if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1760                 kfree(adapter->vf_data);
1761                 adapter->vf_data = NULL;
1762 #endif /* CONFIG_PCI_IOV */
1763                 adapter->vfs_allocated_count = 0;
1764 #ifdef CONFIG_PCI_IOV
1765         } else {
1766                 unsigned char mac_addr[ETH_ALEN];
1767                 int i;
1768                 dev_info(&pdev->dev, "%d vfs allocated\n",
1769                          adapter->vfs_allocated_count);
1770                 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1771                         random_ether_addr(mac_addr);
1772                         igb_set_vf_mac(adapter, i, mac_addr);
1773                 }
1774         }
1775 #endif /* CONFIG_PCI_IOV */
1776 }
1777
1778 /**
1779  * igb_sw_init - Initialize general software structures (struct igb_adapter)
1780  * @adapter: board private structure to initialize
1781  *
1782  * igb_sw_init initializes the Adapter private data structure.
1783  * Fields are initialized based on PCI device information and
1784  * OS network device settings (MTU size).
1785  **/
1786 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1787 {
1788         struct e1000_hw *hw = &adapter->hw;
1789         struct net_device *netdev = adapter->netdev;
1790         struct pci_dev *pdev = adapter->pdev;
1791
1792         pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1793
1794         adapter->tx_ring_count = IGB_DEFAULT_TXD;
1795         adapter->rx_ring_count = IGB_DEFAULT_RXD;
1796         adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1797         adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1798
1799         adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1800         adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1801
1802 #ifdef CONFIG_PCI_IOV
1803         if (hw->mac.type == e1000_82576)
1804                 adapter->vfs_allocated_count = max_vfs;
1805
1806 #endif /* CONFIG_PCI_IOV */
1807         /* This call may decrease the number of queues */
1808         if (igb_init_interrupt_scheme(adapter)) {
1809                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1810                 return -ENOMEM;
1811         }
1812
1813         igb_probe_vfs(adapter);
1814
1815         /* Explicitly disable IRQ since the NIC can be in any state. */
1816         igb_irq_disable(adapter);
1817
1818         set_bit(__IGB_DOWN, &adapter->state);
1819         return 0;
1820 }
1821
1822 /**
1823  * igb_open - Called when a network interface is made active
1824  * @netdev: network interface device structure
1825  *
1826  * Returns 0 on success, negative value on failure
1827  *
1828  * The open entry point is called when a network interface is made
1829  * active by the system (IFF_UP).  At this point all resources needed
1830  * for transmit and receive operations are allocated, the interrupt
1831  * handler is registered with the OS, the watchdog timer is started,
1832  * and the stack is notified that the interface is ready.
1833  **/
1834 static int igb_open(struct net_device *netdev)
1835 {
1836         struct igb_adapter *adapter = netdev_priv(netdev);
1837         struct e1000_hw *hw = &adapter->hw;
1838         int err;
1839         int i;
1840
1841         /* disallow open during test */
1842         if (test_bit(__IGB_TESTING, &adapter->state))
1843                 return -EBUSY;
1844
1845         netif_carrier_off(netdev);
1846
1847         /* allocate transmit descriptors */
1848         err = igb_setup_all_tx_resources(adapter);
1849         if (err)
1850                 goto err_setup_tx;
1851
1852         /* allocate receive descriptors */
1853         err = igb_setup_all_rx_resources(adapter);
1854         if (err)
1855                 goto err_setup_rx;
1856
1857         /* e1000_power_up_phy(adapter); */
1858
1859         /* before we allocate an interrupt, we must be ready to handle it.
1860          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1861          * as soon as we call pci_request_irq, so we have to setup our
1862          * clean_rx handler before we do so.  */
1863         igb_configure(adapter);
1864
1865         err = igb_request_irq(adapter);
1866         if (err)
1867                 goto err_req_irq;
1868
1869         /* From here on the code is the same as igb_up() */
1870         clear_bit(__IGB_DOWN, &adapter->state);
1871
1872         for (i = 0; i < adapter->num_q_vectors; i++) {
1873                 struct igb_q_vector *q_vector = adapter->q_vector[i];
1874                 napi_enable(&q_vector->napi);
1875         }
1876
1877         /* Clear any pending interrupts. */
1878         rd32(E1000_ICR);
1879
1880         igb_irq_enable(adapter);
1881
1882         /* notify VFs that reset has been completed */
1883         if (adapter->vfs_allocated_count) {
1884                 u32 reg_data = rd32(E1000_CTRL_EXT);
1885                 reg_data |= E1000_CTRL_EXT_PFRSTD;
1886                 wr32(E1000_CTRL_EXT, reg_data);
1887         }
1888
1889         netif_tx_start_all_queues(netdev);
1890
1891         /* Fire a link status change interrupt to start the watchdog. */
1892         wr32(E1000_ICS, E1000_ICS_LSC);
1893
1894         return 0;
1895
1896 err_req_irq:
1897         igb_release_hw_control(adapter);
1898         /* e1000_power_down_phy(adapter); */
1899         igb_free_all_rx_resources(adapter);
1900 err_setup_rx:
1901         igb_free_all_tx_resources(adapter);
1902 err_setup_tx:
1903         igb_reset(adapter);
1904
1905         return err;
1906 }
1907
1908 /**
1909  * igb_close - Disables a network interface
1910  * @netdev: network interface device structure
1911  *
1912  * Returns 0, this is not allowed to fail
1913  *
1914  * The close entry point is called when an interface is de-activated
1915  * by the OS.  The hardware is still under the driver's control, but
1916  * needs to be disabled.  A global MAC reset is issued to stop the
1917  * hardware, and all transmit and receive resources are freed.
1918  **/
1919 static int igb_close(struct net_device *netdev)
1920 {
1921         struct igb_adapter *adapter = netdev_priv(netdev);
1922
1923         WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1924         igb_down(adapter);
1925
1926         igb_free_irq(adapter);
1927
1928         igb_free_all_tx_resources(adapter);
1929         igb_free_all_rx_resources(adapter);
1930
1931         return 0;
1932 }
1933
1934 /**
1935  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1936  * @tx_ring: tx descriptor ring (for a specific queue) to setup
1937  *
1938  * Return 0 on success, negative on failure
1939  **/
1940 int igb_setup_tx_resources(struct igb_ring *tx_ring)
1941 {
1942         struct pci_dev *pdev = tx_ring->pdev;
1943         int size;
1944
1945         size = sizeof(struct igb_buffer) * tx_ring->count;
1946         tx_ring->buffer_info = vmalloc(size);
1947         if (!tx_ring->buffer_info)
1948                 goto err;
1949         memset(tx_ring->buffer_info, 0, size);
1950
1951         /* round up to nearest 4K */
1952         tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1953         tx_ring->size = ALIGN(tx_ring->size, 4096);
1954
1955         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1956                                              &tx_ring->dma);
1957
1958         if (!tx_ring->desc)
1959                 goto err;
1960
1961         tx_ring->next_to_use = 0;
1962         tx_ring->next_to_clean = 0;
1963         return 0;
1964
1965 err:
1966         vfree(tx_ring->buffer_info);
1967         dev_err(&pdev->dev,
1968                 "Unable to allocate memory for the transmit descriptor ring\n");
1969         return -ENOMEM;
1970 }
1971
1972 /**
1973  * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1974  *                                (Descriptors) for all queues
1975  * @adapter: board private structure
1976  *
1977  * Return 0 on success, negative on failure
1978  **/
1979 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1980 {
1981         int i, err = 0;
1982         int r_idx;
1983
1984         for (i = 0; i < adapter->num_tx_queues; i++) {
1985                 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1986                 if (err) {
1987                         dev_err(&adapter->pdev->dev,
1988                                 "Allocation for Tx Queue %u failed\n", i);
1989                         for (i--; i >= 0; i--)
1990                                 igb_free_tx_resources(&adapter->tx_ring[i]);
1991                         break;
1992                 }
1993         }
1994
1995         for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1996                 r_idx = i % adapter->num_tx_queues;
1997                 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1998         }
1999         return err;
2000 }
2001
2002 /**
2003  * igb_setup_tctl - configure the transmit control registers
2004  * @adapter: Board private structure
2005  **/
2006 void igb_setup_tctl(struct igb_adapter *adapter)
2007 {
2008         struct e1000_hw *hw = &adapter->hw;
2009         u32 tctl;
2010
2011         /* disable queue 0 which is enabled by default on 82575 and 82576 */
2012         wr32(E1000_TXDCTL(0), 0);
2013
2014         /* Program the Transmit Control Register */
2015         tctl = rd32(E1000_TCTL);
2016         tctl &= ~E1000_TCTL_CT;
2017         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2018                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2019
2020         igb_config_collision_dist(hw);
2021
2022         /* Enable transmits */
2023         tctl |= E1000_TCTL_EN;
2024
2025         wr32(E1000_TCTL, tctl);
2026 }
2027
2028 /**
2029  * igb_configure_tx_ring - Configure transmit ring after Reset
2030  * @adapter: board private structure
2031  * @ring: tx ring to configure
2032  *
2033  * Configure a transmit ring after a reset.
2034  **/
2035 void igb_configure_tx_ring(struct igb_adapter *adapter,
2036                            struct igb_ring *ring)
2037 {
2038         struct e1000_hw *hw = &adapter->hw;
2039         u32 txdctl;
2040         u64 tdba = ring->dma;
2041         int reg_idx = ring->reg_idx;
2042
2043         /* disable the queue */
2044         txdctl = rd32(E1000_TXDCTL(reg_idx));
2045         wr32(E1000_TXDCTL(reg_idx),
2046                         txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2047         wrfl();
2048         mdelay(10);
2049
2050         wr32(E1000_TDLEN(reg_idx),
2051                         ring->count * sizeof(union e1000_adv_tx_desc));
2052         wr32(E1000_TDBAL(reg_idx),
2053                         tdba & 0x00000000ffffffffULL);
2054         wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2055
2056         ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2057         ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2058         writel(0, ring->head);
2059         writel(0, ring->tail);
2060
2061         txdctl |= IGB_TX_PTHRESH;
2062         txdctl |= IGB_TX_HTHRESH << 8;
2063         txdctl |= IGB_TX_WTHRESH << 16;
2064
2065         txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2066         wr32(E1000_TXDCTL(reg_idx), txdctl);
2067 }
2068
2069 /**
2070  * igb_configure_tx - Configure transmit Unit after Reset
2071  * @adapter: board private structure
2072  *
2073  * Configure the Tx unit of the MAC after a reset.
2074  **/
2075 static void igb_configure_tx(struct igb_adapter *adapter)
2076 {
2077         int i;
2078
2079         for (i = 0; i < adapter->num_tx_queues; i++)
2080                 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2081 }
2082
2083 /**
2084  * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2085  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2086  *
2087  * Returns 0 on success, negative on failure
2088  **/
2089 int igb_setup_rx_resources(struct igb_ring *rx_ring)
2090 {
2091         struct pci_dev *pdev = rx_ring->pdev;
2092         int size, desc_len;
2093
2094         size = sizeof(struct igb_buffer) * rx_ring->count;
2095         rx_ring->buffer_info = vmalloc(size);
2096         if (!rx_ring->buffer_info)
2097                 goto err;
2098         memset(rx_ring->buffer_info, 0, size);
2099
2100         desc_len = sizeof(union e1000_adv_rx_desc);
2101
2102         /* Round up to nearest 4K */
2103         rx_ring->size = rx_ring->count * desc_len;
2104         rx_ring->size = ALIGN(rx_ring->size, 4096);
2105
2106         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2107                                              &rx_ring->dma);
2108
2109         if (!rx_ring->desc)
2110                 goto err;
2111
2112         rx_ring->next_to_clean = 0;
2113         rx_ring->next_to_use = 0;
2114
2115         return 0;
2116
2117 err:
2118         vfree(rx_ring->buffer_info);
2119         dev_err(&pdev->dev, "Unable to allocate memory for "
2120                 "the receive descriptor ring\n");
2121         return -ENOMEM;
2122 }
2123
2124 /**
2125  * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2126  *                                (Descriptors) for all queues
2127  * @adapter: board private structure
2128  *
2129  * Return 0 on success, negative on failure
2130  **/
2131 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2132 {
2133         int i, err = 0;
2134
2135         for (i = 0; i < adapter->num_rx_queues; i++) {
2136                 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2137                 if (err) {
2138                         dev_err(&adapter->pdev->dev,
2139                                 "Allocation for Rx Queue %u failed\n", i);
2140                         for (i--; i >= 0; i--)
2141                                 igb_free_rx_resources(&adapter->rx_ring[i]);
2142                         break;
2143                 }
2144         }
2145
2146         return err;
2147 }
2148
2149 /**
2150  * igb_setup_mrqc - configure the multiple receive queue control registers
2151  * @adapter: Board private structure
2152  **/
2153 static void igb_setup_mrqc(struct igb_adapter *adapter)
2154 {
2155         struct e1000_hw *hw = &adapter->hw;
2156         u32 mrqc, rxcsum;
2157         u32 j, num_rx_queues, shift = 0, shift2 = 0;
2158         union e1000_reta {
2159                 u32 dword;
2160                 u8  bytes[4];
2161         } reta;
2162         static const u8 rsshash[40] = {
2163                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2164                 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2165                 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2166                 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2167
2168         /* Fill out hash function seeds */
2169         for (j = 0; j < 10; j++) {
2170                 u32 rsskey = rsshash[(j * 4)];
2171                 rsskey |= rsshash[(j * 4) + 1] << 8;
2172                 rsskey |= rsshash[(j * 4) + 2] << 16;
2173                 rsskey |= rsshash[(j * 4) + 3] << 24;
2174                 array_wr32(E1000_RSSRK(0), j, rsskey);
2175         }
2176
2177         num_rx_queues = adapter->num_rx_queues;
2178
2179         if (adapter->vfs_allocated_count) {
2180                 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2181                 switch (hw->mac.type) {
2182                 case e1000_82576:
2183                         shift = 3;
2184                         num_rx_queues = 2;
2185                         break;
2186                 case e1000_82575:
2187                         shift = 2;
2188                         shift2 = 6;
2189                 default:
2190                         break;
2191                 }
2192         } else {
2193                 if (hw->mac.type == e1000_82575)
2194                         shift = 6;
2195         }
2196
2197         for (j = 0; j < (32 * 4); j++) {
2198                 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2199                 if (shift2)
2200                         reta.bytes[j & 3] |= num_rx_queues << shift2;
2201                 if ((j & 3) == 3)
2202                         wr32(E1000_RETA(j >> 2), reta.dword);
2203         }
2204
2205         /*
2206          * Disable raw packet checksumming so that RSS hash is placed in
2207          * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
2208          * offloads as they are enabled by default
2209          */
2210         rxcsum = rd32(E1000_RXCSUM);
2211         rxcsum |= E1000_RXCSUM_PCSD;
2212
2213         if (adapter->hw.mac.type >= e1000_82576)
2214                 /* Enable Receive Checksum Offload for SCTP */
2215                 rxcsum |= E1000_RXCSUM_CRCOFL;
2216
2217         /* Don't need to set TUOFL or IPOFL, they default to 1 */
2218         wr32(E1000_RXCSUM, rxcsum);
2219
2220         /* If VMDq is enabled then we set the appropriate mode for that, else
2221          * we default to RSS so that an RSS hash is calculated per packet even
2222          * if we are only using one queue */
2223         if (adapter->vfs_allocated_count) {
2224                 if (hw->mac.type > e1000_82575) {
2225                         /* Set the default pool for the PF's first queue */
2226                         u32 vtctl = rd32(E1000_VT_CTL);
2227                         vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2228                                    E1000_VT_CTL_DISABLE_DEF_POOL);
2229                         vtctl |= adapter->vfs_allocated_count <<
2230                                 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2231                         wr32(E1000_VT_CTL, vtctl);
2232                 }
2233                 if (adapter->num_rx_queues > 1)
2234                         mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2235                 else
2236                         mrqc = E1000_MRQC_ENABLE_VMDQ;
2237         } else {
2238                 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2239         }
2240         igb_vmm_control(adapter);
2241
2242         mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2243                  E1000_MRQC_RSS_FIELD_IPV4_TCP);
2244         mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2245                  E1000_MRQC_RSS_FIELD_IPV6_TCP);
2246         mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2247                  E1000_MRQC_RSS_FIELD_IPV6_UDP);
2248         mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2249                  E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2250
2251         wr32(E1000_MRQC, mrqc);
2252 }
2253
2254 /**
2255  * igb_setup_rctl - configure the receive control registers
2256  * @adapter: Board private structure
2257  **/
2258 void igb_setup_rctl(struct igb_adapter *adapter)
2259 {
2260         struct e1000_hw *hw = &adapter->hw;
2261         u32 rctl;
2262
2263         rctl = rd32(E1000_RCTL);
2264
2265         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2266         rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2267
2268         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2269                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2270
2271         /*
2272          * enable stripping of CRC. It's unlikely this will break BMC
2273          * redirection as it did with e1000. Newer features require
2274          * that the HW strips the CRC.
2275          */
2276         rctl |= E1000_RCTL_SECRC;
2277
2278         /*
2279          * disable store bad packets and clear size bits.
2280          */
2281         rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2282
2283         /* enable LPE to prevent packets larger than max_frame_size */
2284         rctl |= E1000_RCTL_LPE;
2285
2286         /* disable queue 0 to prevent tail write w/o re-config */
2287         wr32(E1000_RXDCTL(0), 0);
2288
2289         /* Attention!!!  For SR-IOV PF driver operations you must enable
2290          * queue drop for all VF and PF queues to prevent head of line blocking
2291          * if an un-trusted VF does not provide descriptors to hardware.
2292          */
2293         if (adapter->vfs_allocated_count) {
2294                 /* set all queue drop enable bits */
2295                 wr32(E1000_QDE, ALL_QUEUES);
2296         }
2297
2298         wr32(E1000_RCTL, rctl);
2299 }
2300
2301 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2302                                    int vfn)
2303 {
2304         struct e1000_hw *hw = &adapter->hw;
2305         u32 vmolr;
2306
2307         /* if it isn't the PF check to see if VFs are enabled and
2308          * increase the size to support vlan tags */
2309         if (vfn < adapter->vfs_allocated_count &&
2310             adapter->vf_data[vfn].vlans_enabled)
2311                 size += VLAN_TAG_SIZE;
2312
2313         vmolr = rd32(E1000_VMOLR(vfn));
2314         vmolr &= ~E1000_VMOLR_RLPML_MASK;
2315         vmolr |= size | E1000_VMOLR_LPE;
2316         wr32(E1000_VMOLR(vfn), vmolr);
2317
2318         return 0;
2319 }
2320
2321 /**
2322  * igb_rlpml_set - set maximum receive packet size
2323  * @adapter: board private structure
2324  *
2325  * Configure maximum receivable packet size.
2326  **/
2327 static void igb_rlpml_set(struct igb_adapter *adapter)
2328 {
2329         u32 max_frame_size = adapter->max_frame_size;
2330         struct e1000_hw *hw = &adapter->hw;
2331         u16 pf_id = adapter->vfs_allocated_count;
2332
2333         if (adapter->vlgrp)
2334                 max_frame_size += VLAN_TAG_SIZE;
2335
2336         /* if vfs are enabled we set RLPML to the largest possible request
2337          * size and set the VMOLR RLPML to the size we need */
2338         if (pf_id) {
2339                 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2340                 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2341         }
2342
2343         wr32(E1000_RLPML, max_frame_size);
2344 }
2345
2346 static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2347 {
2348         struct e1000_hw *hw = &adapter->hw;
2349         u32 vmolr;
2350
2351         /*
2352          * This register exists only on 82576 and newer so if we are older then
2353          * we should exit and do nothing
2354          */
2355         if (hw->mac.type < e1000_82576)
2356                 return;
2357
2358         vmolr = rd32(E1000_VMOLR(vfn));
2359         vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
2360                  E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
2361
2362         /* clear all bits that might not be set */
2363         vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2364
2365         if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
2366                 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2367         /*
2368          * for VMDq only allow the VFs and pool 0 to accept broadcast and
2369          * multicast packets
2370          */
2371         if (vfn <= adapter->vfs_allocated_count)
2372                 vmolr |= E1000_VMOLR_BAM;          /* Accept broadcast */
2373
2374         wr32(E1000_VMOLR(vfn), vmolr);
2375 }
2376
2377 /**
2378  * igb_configure_rx_ring - Configure a receive ring after Reset
2379  * @adapter: board private structure
2380  * @ring: receive ring to be configured
2381  *
2382  * Configure the Rx unit of the MAC after a reset.
2383  **/
2384 void igb_configure_rx_ring(struct igb_adapter *adapter,
2385                            struct igb_ring *ring)
2386 {
2387         struct e1000_hw *hw = &adapter->hw;
2388         u64 rdba = ring->dma;
2389         int reg_idx = ring->reg_idx;
2390         u32 srrctl, rxdctl;
2391
2392         /* disable the queue */
2393         rxdctl = rd32(E1000_RXDCTL(reg_idx));
2394         wr32(E1000_RXDCTL(reg_idx),
2395                         rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2396
2397         /* Set DMA base address registers */
2398         wr32(E1000_RDBAL(reg_idx),
2399              rdba & 0x00000000ffffffffULL);
2400         wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2401         wr32(E1000_RDLEN(reg_idx),
2402                        ring->count * sizeof(union e1000_adv_rx_desc));
2403
2404         /* initialize head and tail */
2405         ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2406         ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2407         writel(0, ring->head);
2408         writel(0, ring->tail);
2409
2410         /* set descriptor configuration */
2411         if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2412                 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2413                          E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2414 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2415                 srrctl |= IGB_RXBUFFER_16384 >>
2416                           E1000_SRRCTL_BSIZEPKT_SHIFT;
2417 #else
2418                 srrctl |= (PAGE_SIZE / 2) >>
2419                           E1000_SRRCTL_BSIZEPKT_SHIFT;
2420 #endif
2421                 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2422         } else {
2423                 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2424                          E1000_SRRCTL_BSIZEPKT_SHIFT;
2425                 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2426         }
2427
2428         wr32(E1000_SRRCTL(reg_idx), srrctl);
2429
2430         /* set filtering for VMDQ pools */
2431         igb_set_vmolr(adapter, reg_idx & 0x7);
2432
2433         /* enable receive descriptor fetching */
2434         rxdctl = rd32(E1000_RXDCTL(reg_idx));
2435         rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2436         rxdctl &= 0xFFF00000;
2437         rxdctl |= IGB_RX_PTHRESH;
2438         rxdctl |= IGB_RX_HTHRESH << 8;
2439         rxdctl |= IGB_RX_WTHRESH << 16;
2440         wr32(E1000_RXDCTL(reg_idx), rxdctl);
2441 }
2442
2443 /**
2444  * igb_configure_rx - Configure receive Unit after Reset
2445  * @adapter: board private structure
2446  *
2447  * Configure the Rx unit of the MAC after a reset.
2448  **/
2449 static void igb_configure_rx(struct igb_adapter *adapter)
2450 {
2451         int i;
2452
2453         /* set UTA to appropriate mode */
2454         igb_set_uta(adapter);
2455
2456         /* set the correct pool for the PF default MAC address in entry 0 */
2457         igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2458                          adapter->vfs_allocated_count);
2459
2460         /* Setup the HW Rx Head and Tail Descriptor Pointers and
2461          * the Base and Length of the Rx Descriptor Ring */
2462         for (i = 0; i < adapter->num_rx_queues; i++)
2463                 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2464 }
2465
2466 /**
2467  * igb_free_tx_resources - Free Tx Resources per Queue
2468  * @tx_ring: Tx descriptor ring for a specific queue
2469  *
2470  * Free all transmit software resources
2471  **/
2472 void igb_free_tx_resources(struct igb_ring *tx_ring)
2473 {
2474         igb_clean_tx_ring(tx_ring);
2475
2476         vfree(tx_ring->buffer_info);
2477         tx_ring->buffer_info = NULL;
2478
2479         pci_free_consistent(tx_ring->pdev, tx_ring->size,
2480                             tx_ring->desc, tx_ring->dma);
2481
2482         tx_ring->desc = NULL;
2483 }
2484
2485 /**
2486  * igb_free_all_tx_resources - Free Tx Resources for All Queues
2487  * @adapter: board private structure
2488  *
2489  * Free all transmit software resources
2490  **/
2491 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2492 {
2493         int i;
2494
2495         for (i = 0; i < adapter->num_tx_queues; i++)
2496                 igb_free_tx_resources(&adapter->tx_ring[i]);
2497 }
2498
2499 void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2500                                     struct igb_buffer *buffer_info)
2501 {
2502         buffer_info->dma = 0;
2503         if (buffer_info->skb) {
2504                 skb_dma_unmap(&tx_ring->pdev->dev,
2505                               buffer_info->skb,
2506                               DMA_TO_DEVICE);
2507                 dev_kfree_skb_any(buffer_info->skb);
2508                 buffer_info->skb = NULL;
2509         }
2510         buffer_info->time_stamp = 0;
2511         /* buffer_info must be completely set up in the transmit path */
2512 }
2513
2514 /**
2515  * igb_clean_tx_ring - Free Tx Buffers
2516  * @tx_ring: ring to be cleaned
2517  **/
2518 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2519 {
2520         struct igb_buffer *buffer_info;
2521         unsigned long size;
2522         unsigned int i;
2523
2524         if (!tx_ring->buffer_info)
2525                 return;
2526         /* Free all the Tx ring sk_buffs */
2527
2528         for (i = 0; i < tx_ring->count; i++) {
2529                 buffer_info = &tx_ring->buffer_info[i];
2530                 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2531         }
2532
2533         size = sizeof(struct igb_buffer) * tx_ring->count;
2534         memset(tx_ring->buffer_info, 0, size);
2535
2536         /* Zero out the descriptor ring */
2537
2538         memset(tx_ring->desc, 0, tx_ring->size);
2539
2540         tx_ring->next_to_use = 0;
2541         tx_ring->next_to_clean = 0;
2542
2543         writel(0, tx_ring->head);
2544         writel(0, tx_ring->tail);
2545 }
2546
2547 /**
2548  * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2549  * @adapter: board private structure
2550  **/
2551 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2552 {
2553         int i;
2554
2555         for (i = 0; i < adapter->num_tx_queues; i++)
2556                 igb_clean_tx_ring(&adapter->tx_ring[i]);
2557 }
2558
2559 /**
2560  * igb_free_rx_resources - Free Rx Resources
2561  * @rx_ring: ring to clean the resources from
2562  *
2563  * Free all receive software resources
2564  **/
2565 void igb_free_rx_resources(struct igb_ring *rx_ring)
2566 {
2567         igb_clean_rx_ring(rx_ring);
2568
2569         vfree(rx_ring->buffer_info);
2570         rx_ring->buffer_info = NULL;
2571
2572         pci_free_consistent(rx_ring->pdev, rx_ring->size,
2573                             rx_ring->desc, rx_ring->dma);
2574
2575         rx_ring->desc = NULL;
2576 }
2577
2578 /**
2579  * igb_free_all_rx_resources - Free Rx Resources for All Queues
2580  * @adapter: board private structure
2581  *
2582  * Free all receive software resources
2583  **/
2584 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2585 {
2586         int i;
2587
2588         for (i = 0; i < adapter->num_rx_queues; i++)
2589                 igb_free_rx_resources(&adapter->rx_ring[i]);
2590 }
2591
2592 /**
2593  * igb_clean_rx_ring - Free Rx Buffers per Queue
2594  * @rx_ring: ring to free buffers from
2595  **/
2596 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2597 {
2598         struct igb_buffer *buffer_info;
2599         unsigned long size;
2600         unsigned int i;
2601
2602         if (!rx_ring->buffer_info)
2603                 return;
2604         /* Free all the Rx ring sk_buffs */
2605         for (i = 0; i < rx_ring->count; i++) {
2606                 buffer_info = &rx_ring->buffer_info[i];
2607                 if (buffer_info->dma) {
2608                         pci_unmap_single(rx_ring->pdev,
2609                                          buffer_info->dma,
2610                                          rx_ring->rx_buffer_len,
2611                                          PCI_DMA_FROMDEVICE);
2612                         buffer_info->dma = 0;
2613                 }
2614
2615                 if (buffer_info->skb) {
2616                         dev_kfree_skb(buffer_info->skb);
2617                         buffer_info->skb = NULL;
2618                 }
2619                 if (buffer_info->page_dma) {
2620                         pci_unmap_page(rx_ring->pdev,
2621                                        buffer_info->page_dma,
2622                                        PAGE_SIZE / 2,
2623                                        PCI_DMA_FROMDEVICE);
2624                         buffer_info->page_dma = 0;
2625                 }
2626                 if (buffer_info->page) {
2627                         put_page(buffer_info->page);
2628                         buffer_info->page = NULL;
2629                         buffer_info->page_offset = 0;
2630                 }
2631         }
2632
2633         size = sizeof(struct igb_buffer) * rx_ring->count;
2634         memset(rx_ring->buffer_info, 0, size);
2635
2636         /* Zero out the descriptor ring */
2637         memset(rx_ring->desc, 0, rx_ring->size);
2638
2639         rx_ring->next_to_clean = 0;
2640         rx_ring->next_to_use = 0;
2641
2642         writel(0, rx_ring->head);
2643         writel(0, rx_ring->tail);
2644 }
2645
2646 /**
2647  * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2648  * @adapter: board private structure
2649  **/
2650 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2651 {
2652         int i;
2653
2654         for (i = 0; i < adapter->num_rx_queues; i++)
2655                 igb_clean_rx_ring(&adapter->rx_ring[i]);
2656 }
2657
2658 /**
2659  * igb_set_mac - Change the Ethernet Address of the NIC
2660  * @netdev: network interface device structure
2661  * @p: pointer to an address structure
2662  *
2663  * Returns 0 on success, negative on failure
2664  **/
2665 static int igb_set_mac(struct net_device *netdev, void *p)
2666 {
2667         struct igb_adapter *adapter = netdev_priv(netdev);
2668         struct e1000_hw *hw = &adapter->hw;
2669         struct sockaddr *addr = p;
2670
2671         if (!is_valid_ether_addr(addr->sa_data))
2672                 return -EADDRNOTAVAIL;
2673
2674         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2675         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2676
2677         /* set the correct pool for the new PF MAC address in entry 0 */
2678         igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2679                          adapter->vfs_allocated_count);
2680
2681         return 0;
2682 }
2683
2684 /**
2685  * igb_write_mc_addr_list - write multicast addresses to MTA
2686  * @netdev: network interface device structure
2687  *
2688  * Writes multicast address list to the MTA hash table.
2689  * Returns: -ENOMEM on failure
2690  *                0 on no addresses written
2691  *                X on writing X addresses to MTA
2692  **/
2693 static int igb_write_mc_addr_list(struct net_device *netdev)
2694 {
2695         struct igb_adapter *adapter = netdev_priv(netdev);
2696         struct e1000_hw *hw = &adapter->hw;
2697         struct dev_mc_list *mc_ptr = netdev->mc_list;
2698         u8  *mta_list;
2699         u32 vmolr = 0;
2700         int i;
2701
2702         if (!netdev->mc_count) {
2703                 /* nothing to program, so clear mc list */
2704                 igb_update_mc_addr_list(hw, NULL, 0);
2705                 igb_restore_vf_multicasts(adapter);
2706                 return 0;
2707         }
2708
2709         mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2710         if (!mta_list)
2711                 return -ENOMEM;
2712
2713         /* set vmolr receive overflow multicast bit */
2714         vmolr |= E1000_VMOLR_ROMPE;
2715
2716         /* The shared function expects a packed array of only addresses. */
2717         mc_ptr = netdev->mc_list;
2718
2719         for (i = 0; i < netdev->mc_count; i++) {
2720                 if (!mc_ptr)
2721                         break;
2722                 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2723                 mc_ptr = mc_ptr->next;
2724         }
2725         igb_update_mc_addr_list(hw, mta_list, i);
2726         kfree(mta_list);
2727
2728         return netdev->mc_count;
2729 }
2730
2731 /**
2732  * igb_write_uc_addr_list - write unicast addresses to RAR table
2733  * @netdev: network interface device structure
2734  *
2735  * Writes unicast address list to the RAR table.
2736  * Returns: -ENOMEM on failure/insufficient address space
2737  *                0 on no addresses written
2738  *                X on writing X addresses to the RAR table
2739  **/
2740 static int igb_write_uc_addr_list(struct net_device *netdev)
2741 {
2742         struct igb_adapter *adapter = netdev_priv(netdev);
2743         struct e1000_hw *hw = &adapter->hw;
2744         unsigned int vfn = adapter->vfs_allocated_count;
2745         unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2746         int count = 0;
2747
2748         /* return ENOMEM indicating insufficient memory for addresses */
2749         if (netdev->uc.count > rar_entries)
2750                 return -ENOMEM;
2751
2752         if (netdev->uc.count && rar_entries) {
2753                 struct netdev_hw_addr *ha;
2754                 list_for_each_entry(ha, &netdev->uc.list, list) {
2755                         if (!rar_entries)
2756                                 break;
2757                         igb_rar_set_qsel(adapter, ha->addr,
2758                                          rar_entries--,
2759                                          vfn);
2760                         count++;
2761                 }
2762         }
2763         /* write the addresses in reverse order to avoid write combining */
2764         for (; rar_entries > 0 ; rar_entries--) {
2765                 wr32(E1000_RAH(rar_entries), 0);
2766                 wr32(E1000_RAL(rar_entries), 0);
2767         }
2768         wrfl();
2769
2770         return count;
2771 }
2772
2773 /**
2774  * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2775  * @netdev: network interface device structure
2776  *
2777  * The set_rx_mode entry point is called whenever the unicast or multicast
2778  * address lists or the network interface flags are updated.  This routine is
2779  * responsible for configuring the hardware for proper unicast, multicast,
2780  * promiscuous mode, and all-multi behavior.
2781  **/
2782 static void igb_set_rx_mode(struct net_device *netdev)
2783 {
2784         struct igb_adapter *adapter = netdev_priv(netdev);
2785         struct e1000_hw *hw = &adapter->hw;
2786         unsigned int vfn = adapter->vfs_allocated_count;
2787         u32 rctl, vmolr = 0;
2788         int count;
2789
2790         /* Check for Promiscuous and All Multicast modes */
2791         rctl = rd32(E1000_RCTL);
2792
2793         /* clear the effected bits */
2794         rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2795
2796         if (netdev->flags & IFF_PROMISC) {
2797                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2798                 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2799         } else {
2800                 if (netdev->flags & IFF_ALLMULTI) {
2801                         rctl |= E1000_RCTL_MPE;
2802                         vmolr |= E1000_VMOLR_MPME;
2803                 } else {
2804                         /*
2805                          * Write addresses to the MTA, if the attempt fails
2806                          * then we should just turn on promiscous mode so
2807                          * that we can at least receive multicast traffic
2808                          */
2809                         count = igb_write_mc_addr_list(netdev);
2810                         if (count < 0) {
2811                                 rctl |= E1000_RCTL_MPE;
2812                                 vmolr |= E1000_VMOLR_MPME;
2813                         } else if (count) {
2814                                 vmolr |= E1000_VMOLR_ROMPE;
2815                         }
2816                 }
2817                 /*
2818                  * Write addresses to available RAR registers, if there is not
2819                  * sufficient space to store all the addresses then enable
2820                  * unicast promiscous mode
2821                  */
2822                 count = igb_write_uc_addr_list(netdev);
2823                 if (count < 0) {
2824                         rctl |= E1000_RCTL_UPE;
2825                         vmolr |= E1000_VMOLR_ROPE;
2826                 }
2827                 rctl |= E1000_RCTL_VFE;
2828         }
2829         wr32(E1000_RCTL, rctl);
2830
2831         /*
2832          * In order to support SR-IOV and eventually VMDq it is necessary to set
2833          * the VMOLR to enable the appropriate modes.  Without this workaround
2834          * we will have issues with VLAN tag stripping not being done for frames
2835          * that are only arriving because we are the default pool
2836          */
2837         if (hw->mac.type < e1000_82576)
2838                 return;
2839
2840         vmolr |= rd32(E1000_VMOLR(vfn)) &
2841                  ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2842         wr32(E1000_VMOLR(vfn), vmolr);
2843         igb_restore_vf_multicasts(adapter);
2844 }
2845
2846 /* Need to wait a few seconds after link up to get diagnostic information from
2847  * the phy */
2848 static void igb_update_phy_info(unsigned long data)
2849 {
2850         struct igb_adapter *adapter = (struct igb_adapter *) data;
2851         igb_get_phy_info(&adapter->hw);
2852 }
2853
2854 /**
2855  * igb_has_link - check shared code for link and determine up/down
2856  * @adapter: pointer to driver private info
2857  **/
2858 static bool igb_has_link(struct igb_adapter *adapter)
2859 {
2860         struct e1000_hw *hw = &adapter->hw;
2861         bool link_active = false;
2862         s32 ret_val = 0;
2863
2864         /* get_link_status is set on LSC (link status) interrupt or
2865          * rx sequence error interrupt.  get_link_status will stay
2866          * false until the e1000_check_for_link establishes link
2867          * for copper adapters ONLY
2868          */
2869         switch (hw->phy.media_type) {
2870         case e1000_media_type_copper:
2871                 if (hw->mac.get_link_status) {
2872                         ret_val = hw->mac.ops.check_for_link(hw);
2873                         link_active = !hw->mac.get_link_status;
2874                 } else {
2875                         link_active = true;
2876                 }
2877                 break;
2878         case e1000_media_type_internal_serdes:
2879                 ret_val = hw->mac.ops.check_for_link(hw);
2880                 link_active = hw->mac.serdes_has_link;
2881                 break;
2882         default:
2883         case e1000_media_type_unknown:
2884                 break;
2885         }
2886
2887         return link_active;
2888 }
2889
2890 /**
2891  * igb_watchdog - Timer Call-back
2892  * @data: pointer to adapter cast into an unsigned long
2893  **/
2894 static void igb_watchdog(unsigned long data)
2895 {
2896         struct igb_adapter *adapter = (struct igb_adapter *)data;
2897         /* Do the rest outside of interrupt context */
2898         schedule_work(&adapter->watchdog_task);
2899 }
2900
2901 static void igb_watchdog_task(struct work_struct *work)
2902 {
2903         struct igb_adapter *adapter = container_of(work,
2904                                         struct igb_adapter, watchdog_task);
2905         struct e1000_hw *hw = &adapter->hw;
2906         struct net_device *netdev = adapter->netdev;
2907         struct igb_ring *tx_ring = adapter->tx_ring;
2908         u32 link;
2909         int i;
2910
2911         link = igb_has_link(adapter);
2912         if ((netif_carrier_ok(netdev)) && link)
2913                 goto link_up;
2914
2915         if (link) {
2916                 if (!netif_carrier_ok(netdev)) {
2917                         u32 ctrl;
2918                         hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2919                                                    &adapter->link_speed,
2920                                                    &adapter->link_duplex);
2921
2922                         ctrl = rd32(E1000_CTRL);
2923                         /* Links status message must follow this format */
2924                         printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2925                                  "Flow Control: %s\n",
2926                                  netdev->name,
2927                                  adapter->link_speed,
2928                                  adapter->link_duplex == FULL_DUPLEX ?
2929                                  "Full Duplex" : "Half Duplex",
2930                                  ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2931                                  E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2932                                  E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2933                                  E1000_CTRL_TFCE) ? "TX" : "None")));
2934
2935                         /* tweak tx_queue_len according to speed/duplex and
2936                          * adjust the timeout factor */
2937                         netdev->tx_queue_len = adapter->tx_queue_len;
2938                         adapter->tx_timeout_factor = 1;
2939                         switch (adapter->link_speed) {
2940                         case SPEED_10:
2941                                 netdev->tx_queue_len = 10;
2942                                 adapter->tx_timeout_factor = 14;
2943                                 break;
2944                         case SPEED_100:
2945                                 netdev->tx_queue_len = 100;
2946                                 /* maybe add some timeout factor ? */
2947                                 break;
2948                         }
2949
2950                         netif_carrier_on(netdev);
2951
2952                         igb_ping_all_vfs(adapter);
2953
2954                         /* link state has changed, schedule phy info update */
2955                         if (!test_bit(__IGB_DOWN, &adapter->state))
2956                                 mod_timer(&adapter->phy_info_timer,
2957                                           round_jiffies(jiffies + 2 * HZ));
2958                 }
2959         } else {
2960                 if (netif_carrier_ok(netdev)) {
2961                         adapter->link_speed = 0;
2962                         adapter->link_duplex = 0;
2963                         /* Links status message must follow this format */
2964                         printk(KERN_INFO "igb: %s NIC Link is Down\n",
2965                                netdev->name);
2966                         netif_carrier_off(netdev);
2967
2968                         igb_ping_all_vfs(adapter);
2969
2970                         /* link state has changed, schedule phy info update */
2971                         if (!test_bit(__IGB_DOWN, &adapter->state))
2972                                 mod_timer(&adapter->phy_info_timer,
2973                                           round_jiffies(jiffies + 2 * HZ));
2974                 }
2975         }
2976
2977 link_up:
2978         igb_update_stats(adapter);
2979
2980         hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2981         adapter->tpt_old = adapter->stats.tpt;
2982         hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2983         adapter->colc_old = adapter->stats.colc;
2984
2985         adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2986         adapter->gorc_old = adapter->stats.gorc;
2987         adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2988         adapter->gotc_old = adapter->stats.gotc;
2989
2990         igb_update_adaptive(&adapter->hw);
2991
2992         if (!netif_carrier_ok(netdev)) {
2993                 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2994                         /* We've lost link, so the controller stops DMA,
2995                          * but we've got queued Tx work that's never going
2996                          * to get done, so reset controller to flush Tx.
2997                          * (Do the reset outside of interrupt context). */
2998                         adapter->tx_timeout_count++;
2999                         schedule_work(&adapter->reset_task);
3000                         /* return immediately since reset is imminent */
3001                         return;
3002                 }
3003         }
3004
3005         /* Force detection of hung controller every watchdog period */
3006         for (i = 0; i < adapter->num_tx_queues; i++)
3007                 adapter->tx_ring[i].detect_tx_hung = true;
3008
3009         /* Cause software interrupt to ensure rx ring is cleaned */
3010         if (adapter->msix_entries) {
3011                 u32 eics = 0;
3012                 for (i = 0; i < adapter->num_q_vectors; i++) {
3013                         struct igb_q_vector *q_vector = adapter->q_vector[i];
3014                         eics |= q_vector->eims_value;
3015                 }
3016                 wr32(E1000_EICS, eics);
3017         } else {
3018                 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3019         }
3020
3021         /* Reset the timer */
3022         if (!test_bit(__IGB_DOWN, &adapter->state))
3023                 mod_timer(&adapter->watchdog_timer,
3024                           round_jiffies(jiffies + 2 * HZ));
3025 }
3026
3027 enum latency_range {
3028         lowest_latency = 0,
3029         low_latency = 1,
3030         bulk_latency = 2,
3031         latency_invalid = 255
3032 };
3033
3034 /**
3035  * igb_update_ring_itr - update the dynamic ITR value based on packet size
3036  *
3037  *      Stores a new ITR value based on strictly on packet size.  This
3038  *      algorithm is less sophisticated than that used in igb_update_itr,
3039  *      due to the difficulty of synchronizing statistics across multiple
3040  *      receive rings.  The divisors and thresholds used by this fuction
3041  *      were determined based on theoretical maximum wire speed and testing
3042  *      data, in order to minimize response time while increasing bulk
3043  *      throughput.
3044  *      This functionality is controlled by the InterruptThrottleRate module
3045  *      parameter (see igb_param.c)
3046  *      NOTE:  This function is called only when operating in a multiqueue
3047  *             receive environment.
3048  * @q_vector: pointer to q_vector
3049  **/
3050 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3051 {
3052         int new_val = q_vector->itr_val;
3053         int avg_wire_size = 0;
3054         struct igb_adapter *adapter = q_vector->adapter;
3055
3056         /* For non-gigabit speeds, just fix the interrupt rate at 4000
3057          * ints/sec - ITR timer value of 120 ticks.
3058          */
3059         if (adapter->link_speed != SPEED_1000) {
3060                 new_val = 976;
3061                 goto set_itr_val;
3062         }
3063
3064         if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3065                 struct igb_ring *ring = q_vector->rx_ring;
3066                 avg_wire_size = ring->total_bytes / ring->total_packets;
3067         }
3068
3069         if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3070                 struct igb_ring *ring = q_vector->tx_ring;
3071                 avg_wire_size = max_t(u32, avg_wire_size,
3072                                       (ring->total_bytes /
3073                                        ring->total_packets));
3074         }
3075
3076         /* if avg_wire_size isn't set no work was done */
3077         if (!avg_wire_size)
3078                 goto clear_counts;
3079
3080         /* Add 24 bytes to size to account for CRC, preamble, and gap */
3081         avg_wire_size += 24;
3082
3083         /* Don't starve jumbo frames */
3084         avg_wire_size = min(avg_wire_size, 3000);
3085
3086         /* Give a little boost to mid-size frames */
3087         if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3088                 new_val = avg_wire_size / 3;
3089         else
3090                 new_val = avg_wire_size / 2;
3091
3092 set_itr_val:
3093         if (new_val != q_vector->itr_val) {
3094                 q_vector->itr_val = new_val;
3095                 q_vector->set_itr = 1;
3096         }
3097 clear_counts:
3098         if (q_vector->rx_ring) {
3099                 q_vector->rx_ring->total_bytes = 0;
3100                 q_vector->rx_ring->total_packets = 0;
3101         }
3102         if (q_vector->tx_ring) {
3103                 q_vector->tx_ring->total_bytes = 0;
3104                 q_vector->tx_ring->total_packets = 0;
3105         }
3106 }
3107
3108 /**
3109  * igb_update_itr - update the dynamic ITR value based on statistics
3110  *      Stores a new ITR value based on packets and byte
3111  *      counts during the last interrupt.  The advantage of per interrupt
3112  *      computation is faster updates and more accurate ITR for the current
3113  *      traffic pattern.  Constants in this function were computed
3114  *      based on theoretical maximum wire speed and thresholds were set based
3115  *      on testing data as well as attempting to minimize response time
3116  *      while increasing bulk throughput.
3117  *      this functionality is controlled by the InterruptThrottleRate module
3118  *      parameter (see igb_param.c)
3119  *      NOTE:  These calculations are only valid when operating in a single-
3120  *             queue environment.
3121  * @adapter: pointer to adapter
3122  * @itr_setting: current q_vector->itr_val
3123  * @packets: the number of packets during this measurement interval
3124  * @bytes: the number of bytes during this measurement interval
3125  **/
3126 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3127                                    int packets, int bytes)
3128 {
3129         unsigned int retval = itr_setting;
3130
3131         if (packets == 0)
3132                 goto update_itr_done;
3133
3134         switch (itr_setting) {
3135         case lowest_latency:
3136                 /* handle TSO and jumbo frames */
3137                 if (bytes/packets > 8000)
3138                         retval = bulk_latency;
3139                 else if ((packets < 5) && (bytes > 512))
3140                         retval = low_latency;
3141                 break;
3142         case low_latency:  /* 50 usec aka 20000 ints/s */
3143                 if (bytes > 10000) {
3144                         /* this if handles the TSO accounting */
3145                         if (bytes/packets > 8000) {
3146                                 retval = bulk_latency;
3147                         } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3148                                 retval = bulk_latency;
3149                         } else if ((packets > 35)) {
3150                                 retval = lowest_latency;
3151                         }
3152                 } else if (bytes/packets > 2000) {
3153                         retval = bulk_latency;
3154                 } else if (packets <= 2 && bytes < 512) {
3155                         retval = lowest_latency;
3156                 }
3157                 break;
3158         case bulk_latency: /* 250 usec aka 4000 ints/s */
3159                 if (bytes > 25000) {
3160                         if (packets > 35)
3161                                 retval = low_latency;
3162                 } else if (bytes < 1500) {
3163                         retval = low_latency;
3164                 }
3165                 break;
3166         }
3167
3168 update_itr_done:
3169         return retval;
3170 }
3171
3172 static void igb_set_itr(struct igb_adapter *adapter)
3173 {
3174         struct igb_q_vector *q_vector = adapter->q_vector[0];
3175         u16 current_itr;
3176         u32 new_itr = q_vector->itr_val;
3177
3178         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3179         if (adapter->link_speed != SPEED_1000) {
3180                 current_itr = 0;
3181                 new_itr = 4000;
3182                 goto set_itr_now;
3183         }
3184
3185         adapter->rx_itr = igb_update_itr(adapter,
3186                                     adapter->rx_itr,
3187                                     adapter->rx_ring->total_packets,
3188                                     adapter->rx_ring->total_bytes);
3189
3190         adapter->tx_itr = igb_update_itr(adapter,
3191                                     adapter->tx_itr,
3192                                     adapter->tx_ring->total_packets,
3193                                     adapter->tx_ring->total_bytes);
3194         current_itr = max(adapter->rx_itr, adapter->tx_itr);
3195
3196         /* conservative mode (itr 3) eliminates the lowest_latency setting */
3197         if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3198                 current_itr = low_latency;
3199
3200         switch (current_itr) {
3201         /* counts and packets in update_itr are dependent on these numbers */
3202         case lowest_latency:
3203                 new_itr = 56;  /* aka 70,000 ints/sec */
3204                 break;
3205         case low_latency:
3206                 new_itr = 196; /* aka 20,000 ints/sec */
3207                 break;
3208         case bulk_latency:
3209                 new_itr = 980; /* aka 4,000 ints/sec */
3210                 break;
3211         default:
3212                 break;
3213         }
3214
3215 set_itr_now:
3216         adapter->rx_ring->total_bytes = 0;
3217         adapter->rx_ring->total_packets = 0;
3218         adapter->tx_ring->total_bytes = 0;
3219         adapter->tx_ring->total_packets = 0;
3220
3221         if (new_itr != q_vector->itr_val) {
3222                 /* this attempts to bias the interrupt rate towards Bulk
3223                  * by adding intermediate steps when interrupt rate is
3224                  * increasing */
3225                 new_itr = new_itr > q_vector->itr_val ?
3226                              max((new_itr * q_vector->itr_val) /
3227                                  (new_itr + (q_vector->itr_val >> 2)),
3228                                  new_itr) :
3229                              new_itr;
3230                 /* Don't write the value here; it resets the adapter's
3231                  * internal timer, and causes us to delay far longer than
3232                  * we should between interrupts.  Instead, we write the ITR
3233                  * value at the beginning of the next interrupt so the timing
3234                  * ends up being correct.
3235                  */
3236                 q_vector->itr_val = new_itr;
3237                 q_vector->set_itr = 1;
3238         }
3239
3240         return;
3241 }
3242
3243 #define IGB_TX_FLAGS_CSUM               0x00000001
3244 #define IGB_TX_FLAGS_VLAN               0x00000002
3245 #define IGB_TX_FLAGS_TSO                0x00000004
3246 #define IGB_TX_FLAGS_IPV4               0x00000008
3247 #define IGB_TX_FLAGS_TSTAMP             0x00000010
3248 #define IGB_TX_FLAGS_VLAN_MASK  0xffff0000
3249 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3250
3251 static inline int igb_tso_adv(struct igb_ring *tx_ring,
3252                               struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3253 {
3254         struct e1000_adv_tx_context_desc *context_desc;
3255         unsigned int i;
3256         int err;
3257         struct igb_buffer *buffer_info;
3258         u32 info = 0, tu_cmd = 0;
3259         u32 mss_l4len_idx, l4len;
3260         *hdr_len = 0;
3261
3262         if (skb_header_cloned(skb)) {
3263                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3264                 if (err)
3265                         return err;
3266         }
3267
3268         l4len = tcp_hdrlen(skb);
3269         *hdr_len += l4len;
3270
3271         if (skb->protocol == htons(ETH_P_IP)) {
3272                 struct iphdr *iph = ip_hdr(skb);
3273                 iph->tot_len = 0;
3274                 iph->check = 0;
3275                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3276                                                          iph->daddr, 0,
3277                                                          IPPROTO_TCP,
3278                                                          0);
3279         } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3280                 ipv6_hdr(skb)->payload_len = 0;
3281                 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3282                                                        &ipv6_hdr(skb)->daddr,
3283                                                        0, IPPROTO_TCP, 0);
3284         }
3285
3286         i = tx_ring->next_to_use;
3287
3288         buffer_info = &tx_ring->buffer_info[i];
3289         context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3290         /* VLAN MACLEN IPLEN */
3291         if (tx_flags & IGB_TX_FLAGS_VLAN)
3292                 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3293         info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3294         *hdr_len += skb_network_offset(skb);
3295         info |= skb_network_header_len(skb);
3296         *hdr_len += skb_network_header_len(skb);
3297         context_desc->vlan_macip_lens = cpu_to_le32(info);
3298
3299         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3300         tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3301
3302         if (skb->protocol == htons(ETH_P_IP))
3303                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3304         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3305
3306         context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3307
3308         /* MSS L4LEN IDX */
3309         mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3310         mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3311
3312         /* For 82575, context index must be unique per ring. */
3313         if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3314                 mss_l4len_idx |= tx_ring->reg_idx << 4;
3315
3316         context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3317         context_desc->seqnum_seed = 0;
3318
3319         buffer_info->time_stamp = jiffies;
3320         buffer_info->next_to_watch = i;
3321         buffer_info->dma = 0;
3322         i++;
3323         if (i == tx_ring->count)
3324                 i = 0;
3325
3326         tx_ring->next_to_use = i;
3327
3328         return true;
3329 }
3330
3331 static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3332                                    struct sk_buff *skb, u32 tx_flags)
3333 {
3334         struct e1000_adv_tx_context_desc *context_desc;
3335         struct pci_dev *pdev = tx_ring->pdev;
3336         struct igb_buffer *buffer_info;
3337         u32 info = 0, tu_cmd = 0;
3338         unsigned int i;
3339
3340         if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3341             (tx_flags & IGB_TX_FLAGS_VLAN)) {
3342                 i = tx_ring->next_to_use;
3343                 buffer_info = &tx_ring->buffer_info[i];
3344                 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3345
3346                 if (tx_flags & IGB_TX_FLAGS_VLAN)
3347                         info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3348                 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3349                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3350                         info |= skb_network_header_len(skb);
3351
3352                 context_desc->vlan_macip_lens = cpu_to_le32(info);
3353
3354                 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3355
3356                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3357                         __be16 protocol;
3358
3359                         if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3360                                 const struct vlan_ethhdr *vhdr =
3361                                           (const struct vlan_ethhdr*)skb->data;
3362
3363                                 protocol = vhdr->h_vlan_encapsulated_proto;
3364                         } else {
3365                                 protocol = skb->protocol;
3366                         }
3367
3368                         switch (protocol) {
3369                         case cpu_to_be16(ETH_P_IP):
3370                                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3371                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3372                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3373                                 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3374                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3375                                 break;
3376                         case cpu_to_be16(ETH_P_IPV6):
3377                                 /* XXX what about other V6 headers?? */
3378                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3379                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3380                                 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3381                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3382                                 break;
3383                         default:
3384                                 if (unlikely(net_ratelimit()))
3385                                         dev_warn(&pdev->dev,
3386                                             "partial checksum but proto=%x!\n",
3387                                             skb->protocol);
3388                                 break;
3389                         }
3390                 }
3391
3392                 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3393                 context_desc->seqnum_seed = 0;
3394                 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3395                         context_desc->mss_l4len_idx =
3396                                 cpu_to_le32(tx_ring->reg_idx << 4);
3397
3398                 buffer_info->time_stamp = jiffies;
3399                 buffer_info->next_to_watch = i;
3400                 buffer_info->dma = 0;
3401
3402                 i++;
3403                 if (i == tx_ring->count)
3404                         i = 0;
3405                 tx_ring->next_to_use = i;
3406
3407                 return true;
3408         }
3409         return false;
3410 }
3411
3412 #define IGB_MAX_TXD_PWR 16
3413 #define IGB_MAX_DATA_PER_TXD    (1<<IGB_MAX_TXD_PWR)
3414
3415 static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3416                                  unsigned int first)
3417 {
3418         struct igb_buffer *buffer_info;
3419         struct pci_dev *pdev = tx_ring->pdev;
3420         unsigned int len = skb_headlen(skb);
3421         unsigned int count = 0, i;
3422         unsigned int f;
3423         dma_addr_t *map;
3424
3425         i = tx_ring->next_to_use;
3426
3427         if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3428                 dev_err(&pdev->dev, "TX DMA map failed\n");
3429                 return 0;
3430         }
3431
3432         map = skb_shinfo(skb)->dma_maps;
3433
3434         buffer_info = &tx_ring->buffer_info[i];
3435         BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3436         buffer_info->length = len;
3437         /* set time_stamp *before* dma to help avoid a possible race */
3438         buffer_info->time_stamp = jiffies;
3439         buffer_info->next_to_watch = i;
3440         buffer_info->dma = skb_shinfo(skb)->dma_head;
3441
3442         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3443                 struct skb_frag_struct *frag;
3444
3445                 i++;
3446                 if (i == tx_ring->count)
3447                         i = 0;
3448
3449                 frag = &skb_shinfo(skb)->frags[f];
3450                 len = frag->size;
3451
3452                 buffer_info = &tx_ring->buffer_info[i];
3453                 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3454                 buffer_info->length = len;
3455                 buffer_info->time_stamp = jiffies;
3456                 buffer_info->next_to_watch = i;
3457                 buffer_info->dma = map[count];
3458                 count++;
3459         }
3460
3461         tx_ring->buffer_info[i].skb = skb;
3462         tx_ring->buffer_info[first].next_to_watch = i;
3463
3464         return count + 1;
3465 }
3466
3467 static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3468                                     int tx_flags, int count, u32 paylen,
3469                                     u8 hdr_len)
3470 {
3471         union e1000_adv_tx_desc *tx_desc = NULL;
3472         struct igb_buffer *buffer_info;
3473         u32 olinfo_status = 0, cmd_type_len;
3474         unsigned int i;
3475
3476         cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3477                         E1000_ADVTXD_DCMD_DEXT);
3478
3479         if (tx_flags & IGB_TX_FLAGS_VLAN)
3480                 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3481
3482         if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3483                 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3484
3485         if (tx_flags & IGB_TX_FLAGS_TSO) {
3486                 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3487
3488                 /* insert tcp checksum */
3489                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3490
3491                 /* insert ip checksum */
3492                 if (tx_flags & IGB_TX_FLAGS_IPV4)
3493                         olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3494
3495         } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3496                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3497         }
3498
3499         if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3500             (tx_flags & (IGB_TX_FLAGS_CSUM |
3501                          IGB_TX_FLAGS_TSO |
3502                          IGB_TX_FLAGS_VLAN)))
3503                 olinfo_status |= tx_ring->reg_idx << 4;
3504
3505         olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3506
3507         i = tx_ring->next_to_use;
3508         while (count--) {
3509                 buffer_info = &tx_ring->buffer_info[i];
3510                 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3511                 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3512                 tx_desc->read.cmd_type_len =
3513                         cpu_to_le32(cmd_type_len | buffer_info->length);
3514                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3515                 i++;
3516                 if (i == tx_ring->count)
3517                         i = 0;
3518         }
3519
3520         tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3521         /* Force memory writes to complete before letting h/w
3522          * know there are new descriptors to fetch.  (Only
3523          * applicable for weak-ordered memory model archs,
3524          * such as IA-64). */
3525         wmb();
3526
3527         tx_ring->next_to_use = i;
3528         writel(i, tx_ring->tail);
3529         /* we need this if more than one processor can write to our tail
3530          * at a time, it syncronizes IO on IA64/Altix systems */
3531         mmiowb();
3532 }
3533
3534 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3535 {
3536         struct net_device *netdev = tx_ring->netdev;
3537
3538         netif_stop_subqueue(netdev, tx_ring->queue_index);
3539
3540         /* Herbert's original patch had:
3541          *  smp_mb__after_netif_stop_queue();
3542          * but since that doesn't exist yet, just open code it. */
3543         smp_mb();
3544
3545         /* We need to check again in a case another CPU has just
3546          * made room available. */
3547         if (igb_desc_unused(tx_ring) < size)
3548                 return -EBUSY;
3549
3550         /* A reprieve! */
3551         netif_wake_subqueue(netdev, tx_ring->queue_index);
3552         tx_ring->tx_stats.restart_queue++;
3553         return 0;
3554 }
3555
3556 static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3557 {
3558         if (igb_desc_unused(tx_ring) >= size)
3559                 return 0;
3560         return __igb_maybe_stop_tx(tx_ring, size);
3561 }
3562
3563 netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3564                                     struct igb_ring *tx_ring)
3565 {
3566         struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3567         unsigned int first;
3568         unsigned int tx_flags = 0;
3569         u8 hdr_len = 0;
3570         int count = 0;
3571         int tso = 0;
3572         union skb_shared_tx *shtx = skb_tx(skb);
3573
3574         /* need: 1 descriptor per page,
3575          *       + 2 desc gap to keep tail from touching head,
3576          *       + 1 desc for skb->data,
3577          *       + 1 desc for context descriptor,
3578          * otherwise try next time */
3579         if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3580                 /* this is a hard error */
3581                 return NETDEV_TX_BUSY;
3582         }
3583
3584         if (unlikely(shtx->hardware)) {
3585                 shtx->in_progress = 1;
3586                 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3587         }
3588
3589         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3590                 tx_flags |= IGB_TX_FLAGS_VLAN;
3591                 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3592         }
3593
3594         if (skb->protocol == htons(ETH_P_IP))
3595                 tx_flags |= IGB_TX_FLAGS_IPV4;
3596
3597         first = tx_ring->next_to_use;
3598         if (skb_is_gso(skb)) {
3599                 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3600                 if (tso < 0) {
3601                         dev_kfree_skb_any(skb);
3602                         return NETDEV_TX_OK;
3603                 }
3604         }
3605
3606         if (tso)
3607                 tx_flags |= IGB_TX_FLAGS_TSO;
3608         else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3609                  (skb->ip_summed == CHECKSUM_PARTIAL))
3610                 tx_flags |= IGB_TX_FLAGS_CSUM;
3611
3612         /*
3613          * count reflects descriptors mapped, if 0 then mapping error
3614          * has occured and we need to rewind the descriptor queue
3615          */
3616         count = igb_tx_map_adv(tx_ring, skb, first);
3617
3618         if (!count) {
3619                 dev_kfree_skb_any(skb);
3620                 tx_ring->buffer_info[first].time_stamp = 0;
3621                 tx_ring->next_to_use = first;
3622                 return NETDEV_TX_OK;
3623         }
3624
3625         igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3626
3627         /* Make sure there is space in the ring for the next send. */
3628         igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3629
3630         return NETDEV_TX_OK;
3631 }
3632
3633 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3634                                       struct net_device *netdev)
3635 {
3636         struct igb_adapter *adapter = netdev_priv(netdev);
3637         struct igb_ring *tx_ring;
3638         int r_idx = 0;
3639
3640         if (test_bit(__IGB_DOWN, &adapter->state)) {
3641                 dev_kfree_skb_any(skb);
3642                 return NETDEV_TX_OK;
3643         }
3644
3645         if (skb->len <= 0) {
3646                 dev_kfree_skb_any(skb);
3647                 return NETDEV_TX_OK;
3648         }
3649
3650         r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3651         tx_ring = adapter->multi_tx_table[r_idx];
3652
3653         /* This goes back to the question of how to logically map a tx queue
3654          * to a flow.  Right now, performance is impacted slightly negatively
3655          * if using multiple tx queues.  If the stack breaks away from a
3656          * single qdisc implementation, we can look at this again. */
3657         return igb_xmit_frame_ring_adv(skb, tx_ring);
3658 }
3659
3660 /**
3661  * igb_tx_timeout - Respond to a Tx Hang
3662  * @netdev: network interface device structure
3663  **/
3664 static void igb_tx_timeout(struct net_device *netdev)
3665 {
3666         struct igb_adapter *adapter = netdev_priv(netdev);
3667         struct e1000_hw *hw = &adapter->hw;
3668
3669         /* Do the reset outside of interrupt context */
3670         adapter->tx_timeout_count++;
3671
3672         schedule_work(&adapter->reset_task);
3673         wr32(E1000_EICS,
3674              (adapter->eims_enable_mask & ~adapter->eims_other));
3675 }
3676
3677 static void igb_reset_task(struct work_struct *work)
3678 {
3679         struct igb_adapter *adapter;
3680         adapter = container_of(work, struct igb_adapter, reset_task);
3681
3682         igb_reinit_locked(adapter);
3683 }
3684
3685 /**
3686  * igb_get_stats - Get System Network Statistics
3687  * @netdev: network interface device structure
3688  *
3689  * Returns the address of the device statistics structure.
3690  * The statistics are actually updated from the timer callback.
3691  **/
3692 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3693 {
3694         /* only return the current stats */
3695         return &netdev->stats;
3696 }
3697
3698 /**
3699  * igb_change_mtu - Change the Maximum Transfer Unit
3700  * @netdev: network interface device structure
3701  * @new_mtu: new value for maximum frame size
3702  *
3703  * Returns 0 on success, negative on failure
3704  **/
3705 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3706 {
3707         struct igb_adapter *adapter = netdev_priv(netdev);
3708         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3709         u32 rx_buffer_len, i;
3710
3711         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3712             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3713                 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3714                 return -EINVAL;
3715         }
3716
3717         if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3718                 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3719                 return -EINVAL;
3720         }
3721
3722         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3723                 msleep(1);
3724
3725         /* igb_down has a dependency on max_frame_size */
3726         adapter->max_frame_size = max_frame;
3727         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3728          * means we reserve 2 more, this pushes us to allocate from the next
3729          * larger slab size.
3730          * i.e. RXBUFFER_2048 --> size-4096 slab
3731          */
3732
3733         if (max_frame <= IGB_RXBUFFER_1024)
3734                 rx_buffer_len = IGB_RXBUFFER_1024;
3735         else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3736                 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3737         else
3738                 rx_buffer_len = IGB_RXBUFFER_128;
3739
3740         if (netif_running(netdev))
3741                 igb_down(adapter);
3742
3743         dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3744                  netdev->mtu, new_mtu);
3745         netdev->mtu = new_mtu;
3746
3747         for (i = 0; i < adapter->num_rx_queues; i++)
3748                 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3749
3750         if (netif_running(netdev))
3751                 igb_up(adapter);
3752         else
3753                 igb_reset(adapter);
3754
3755         clear_bit(__IGB_RESETTING, &adapter->state);
3756
3757         return 0;
3758 }
3759
3760 /**
3761  * igb_update_stats - Update the board statistics counters
3762  * @adapter: board private structure
3763  **/
3764
3765 void igb_update_stats(struct igb_adapter *adapter)
3766 {
3767         struct net_device *netdev = adapter->netdev;
3768         struct e1000_hw *hw = &adapter->hw;
3769         struct pci_dev *pdev = adapter->pdev;
3770         u32 rnbc;
3771         u16 phy_tmp;
3772         int i;
3773         u64 bytes, packets;
3774
3775 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3776
3777         /*
3778          * Prevent stats update while adapter is being reset, or if the pci
3779          * connection is down.
3780          */
3781         if (adapter->link_speed == 0)
3782                 return;
3783         if (pci_channel_offline(pdev))
3784                 return;
3785
3786         bytes = 0;
3787         packets = 0;
3788         for (i = 0; i < adapter->num_rx_queues; i++) {
3789                 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3790                 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3791                 netdev->stats.rx_fifo_errors += rqdpc_tmp;
3792                 bytes += adapter->rx_ring[i].rx_stats.bytes;
3793                 packets += adapter->rx_ring[i].rx_stats.packets;
3794         }
3795
3796         netdev->stats.rx_bytes = bytes;
3797         netdev->stats.rx_packets = packets;
3798
3799         bytes = 0;
3800         packets = 0;
3801         for (i = 0; i < adapter->num_tx_queues; i++) {
3802                 bytes += adapter->tx_ring[i].tx_stats.bytes;
3803                 packets += adapter->tx_ring[i].tx_stats.packets;
3804         }
3805         netdev->stats.tx_bytes = bytes;
3806         netdev->stats.tx_packets = packets;
3807
3808         /* read stats registers */
3809         adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3810         adapter->stats.gprc += rd32(E1000_GPRC);
3811         adapter->stats.gorc += rd32(E1000_GORCL);
3812         rd32(E1000_GORCH); /* clear GORCL */
3813         adapter->stats.bprc += rd32(E1000_BPRC);
3814         adapter->stats.mprc += rd32(E1000_MPRC);
3815         adapter->stats.roc += rd32(E1000_ROC);
3816
3817         adapter->stats.prc64 += rd32(E1000_PRC64);
3818         adapter->stats.prc127 += rd32(E1000_PRC127);
3819         adapter->stats.prc255 += rd32(E1000_PRC255);
3820         adapter->stats.prc511 += rd32(E1000_PRC511);
3821         adapter->stats.prc1023 += rd32(E1000_PRC1023);
3822         adapter->stats.prc1522 += rd32(E1000_PRC1522);
3823         adapter->stats.symerrs += rd32(E1000_SYMERRS);
3824         adapter->stats.sec += rd32(E1000_SEC);
3825
3826         adapter->stats.mpc += rd32(E1000_MPC);
3827         adapter->stats.scc += rd32(E1000_SCC);
3828         adapter->stats.ecol += rd32(E1000_ECOL);
3829         adapter->stats.mcc += rd32(E1000_MCC);
3830         adapter->stats.latecol += rd32(E1000_LATECOL);
3831         adapter->stats.dc += rd32(E1000_DC);
3832         adapter->stats.rlec += rd32(E1000_RLEC);
3833         adapter->stats.xonrxc += rd32(E1000_XONRXC);
3834         adapter->stats.xontxc += rd32(E1000_XONTXC);
3835         adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3836         adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3837         adapter->stats.fcruc += rd32(E1000_FCRUC);
3838         adapter->stats.gptc += rd32(E1000_GPTC);
3839         adapter->stats.gotc += rd32(E1000_GOTCL);
3840         rd32(E1000_GOTCH); /* clear GOTCL */
3841         rnbc = rd32(E1000_RNBC);
3842         adapter->stats.rnbc += rnbc;
3843         netdev->stats.rx_fifo_errors += rnbc;
3844         adapter->stats.ruc += rd32(E1000_RUC);
3845         adapter->stats.rfc += rd32(E1000_RFC);
3846         adapter->stats.rjc += rd32(E1000_RJC);
3847         adapter->stats.tor += rd32(E1000_TORH);
3848         adapter->stats.tot += rd32(E1000_TOTH);
3849         adapter->stats.tpr += rd32(E1000_TPR);
3850
3851         adapter->stats.ptc64 += rd32(E1000_PTC64);
3852         adapter->stats.ptc127 += rd32(E1000_PTC127);
3853         adapter->stats.ptc255 += rd32(E1000_PTC255);
3854         adapter->stats.ptc511 += rd32(E1000_PTC511);
3855         adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3856         adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3857
3858         adapter->stats.mptc += rd32(E1000_MPTC);
3859         adapter->stats.bptc += rd32(E1000_BPTC);
3860
3861         /* used for adaptive IFS */
3862
3863         hw->mac.tx_packet_delta = rd32(E1000_TPT);
3864         adapter->stats.tpt += hw->mac.tx_packet_delta;
3865         hw->mac.collision_delta = rd32(E1000_COLC);
3866         adapter->stats.colc += hw->mac.collision_delta;
3867
3868         adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3869         adapter->stats.rxerrc += rd32(E1000_RXERRC);
3870         adapter->stats.tncrs += rd32(E1000_TNCRS);
3871         adapter->stats.tsctc += rd32(E1000_TSCTC);
3872         adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3873
3874         adapter->stats.iac += rd32(E1000_IAC);
3875         adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3876         adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3877         adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3878         adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3879         adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3880         adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3881         adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3882         adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3883
3884         /* Fill out the OS statistics structure */
3885         netdev->stats.multicast = adapter->stats.mprc;
3886         netdev->stats.collisions = adapter->stats.colc;
3887
3888         /* Rx Errors */
3889
3890         /* RLEC on some newer hardware can be incorrect so build
3891          * our own version based on RUC and ROC */
3892         netdev->stats.rx_errors = adapter->stats.rxerrc +
3893                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3894                 adapter->stats.ruc + adapter->stats.roc +
3895                 adapter->stats.cexterr;
3896         netdev->stats.rx_length_errors = adapter->stats.ruc +
3897                                               adapter->stats.roc;
3898         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3899         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3900         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3901
3902         /* Tx Errors */
3903         netdev->stats.tx_errors = adapter->stats.ecol +
3904                                        adapter->stats.latecol;
3905         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3906         netdev->stats.tx_window_errors = adapter->stats.latecol;
3907         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3908
3909         /* Tx Dropped needs to be maintained elsewhere */
3910
3911         /* Phy Stats */
3912         if (hw->phy.media_type == e1000_media_type_copper) {
3913                 if ((adapter->link_speed == SPEED_1000) &&
3914                    (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3915                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3916                         adapter->phy_stats.idle_errors += phy_tmp;
3917                 }
3918         }
3919
3920         /* Management Stats */
3921         adapter->stats.mgptc += rd32(E1000_MGTPTC);
3922         adapter->stats.mgprc += rd32(E1000_MGTPRC);
3923         adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3924 }
3925
3926 static irqreturn_t igb_msix_other(int irq, void *data)
3927 {
3928         struct igb_adapter *adapter = data;
3929         struct e1000_hw *hw = &adapter->hw;
3930         u32 icr = rd32(E1000_ICR);
3931         /* reading ICR causes bit 31 of EICR to be cleared */
3932
3933         if (icr & E1000_ICR_DOUTSYNC) {
3934                 /* HW is reporting DMA is out of sync */
3935                 adapter->stats.doosync++;
3936         }
3937
3938         /* Check for a mailbox event */
3939         if (icr & E1000_ICR_VMMB)
3940                 igb_msg_task(adapter);
3941
3942         if (icr & E1000_ICR_LSC) {
3943                 hw->mac.get_link_status = 1;
3944                 /* guard against interrupt when we're going down */
3945                 if (!test_bit(__IGB_DOWN, &adapter->state))
3946                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3947         }
3948
3949         wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3950         wr32(E1000_EIMS, adapter->eims_other);
3951
3952         return IRQ_HANDLED;
3953 }
3954
3955 static void igb_write_itr(struct igb_q_vector *q_vector)
3956 {
3957         u32 itr_val = q_vector->itr_val & 0x7FFC;
3958
3959         if (!q_vector->set_itr)
3960                 return;
3961
3962         if (!itr_val)
3963                 itr_val = 0x4;
3964
3965         if (q_vector->itr_shift)
3966                 itr_val |= itr_val << q_vector->itr_shift;
3967         else
3968                 itr_val |= 0x8000000;
3969
3970         writel(itr_val, q_vector->itr_register);
3971         q_vector->set_itr = 0;
3972 }
3973
3974 static irqreturn_t igb_msix_ring(int irq, void *data)
3975 {
3976         struct igb_q_vector *q_vector = data;
3977
3978         /* Write the ITR value calculated from the previous interrupt. */
3979         igb_write_itr(q_vector);
3980
3981         napi_schedule(&q_vector->napi);
3982
3983         return IRQ_HANDLED;
3984 }
3985
3986 #ifdef CONFIG_IGB_DCA
3987 static void igb_update_dca(struct igb_q_vector *q_vector)
3988 {
3989         struct igb_adapter *adapter = q_vector->adapter;
3990         struct e1000_hw *hw = &adapter->hw;
3991         int cpu = get_cpu();
3992
3993         if (q_vector->cpu == cpu)
3994                 goto out_no_update;
3995
3996         if (q_vector->tx_ring) {
3997                 int q = q_vector->tx_ring->reg_idx;
3998                 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3999                 if (hw->mac.type == e1000_82575) {
4000                         dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4001                         dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4002                 } else {
4003                         dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4004                         dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4005                                       E1000_DCA_TXCTRL_CPUID_SHIFT;
4006                 }
4007                 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4008                 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4009         }
4010         if (q_vector->rx_ring) {
4011                 int q = q_vector->rx_ring->reg_idx;
4012                 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4013                 if (hw->mac.type == e1000_82575) {
4014                         dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4015                         dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4016                 } else {
4017                         dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4018                         dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4019                                       E1000_DCA_RXCTRL_CPUID_SHIFT;
4020                 }
4021                 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4022                 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4023                 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4024                 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4025         }
4026         q_vector->cpu = cpu;
4027 out_no_update:
4028         put_cpu();
4029 }
4030
4031 static void igb_setup_dca(struct igb_adapter *adapter)
4032 {
4033         struct e1000_hw *hw = &adapter->hw;
4034         int i;
4035
4036         if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4037                 return;
4038
4039         /* Always use CB2 mode, difference is masked in the CB driver. */
4040         wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4041
4042         for (i = 0; i < adapter->num_q_vectors; i++) {
4043                 struct igb_q_vector *q_vector = adapter->q_vector[i];
4044                 q_vector->cpu = -1;
4045                 igb_update_dca(q_vector);
4046         }
4047 }
4048
4049 static int __igb_notify_dca(struct device *dev, void *data)
4050 {
4051         struct net_device *netdev = dev_get_drvdata(dev);
4052         struct igb_adapter *adapter = netdev_priv(netdev);
4053         struct e1000_hw *hw = &adapter->hw;
4054         unsigned long event = *(unsigned long *)data;
4055
4056         switch (event) {
4057         case DCA_PROVIDER_ADD:
4058                 /* if already enabled, don't do it again */
4059                 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4060                         break;
4061                 /* Always use CB2 mode, difference is masked
4062                  * in the CB driver. */
4063                 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4064                 if (dca_add_requester(dev) == 0) {
4065                         adapter->flags |= IGB_FLAG_DCA_ENABLED;
4066                         dev_info(&adapter->pdev->dev, "DCA enabled\n");
4067                         igb_setup_dca(adapter);
4068                         break;
4069                 }
4070                 /* Fall Through since DCA is disabled. */
4071         case DCA_PROVIDER_REMOVE:
4072                 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4073                         /* without this a class_device is left
4074                          * hanging around in the sysfs model */
4075                         dca_remove_requester(dev);
4076                         dev_info(&adapter->pdev->dev, "DCA disabled\n");
4077                         adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4078                         wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4079                 }
4080                 break;
4081         }
4082
4083         return 0;
4084 }
4085
4086 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4087                           void *p)
4088 {
4089         int ret_val;
4090
4091         ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4092                                          __igb_notify_dca);
4093
4094         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4095 }
4096 #endif /* CONFIG_IGB_DCA */
4097
4098 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4099 {
4100         struct e1000_hw *hw = &adapter->hw;
4101         u32 ping;
4102         int i;
4103
4104         for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4105                 ping = E1000_PF_CONTROL_MSG;
4106                 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4107                         ping |= E1000_VT_MSGTYPE_CTS;
4108                 igb_write_mbx(hw, &ping, 1, i);
4109         }
4110 }
4111
4112 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4113 {
4114         struct e1000_hw *hw = &adapter->hw;
4115         u32 vmolr = rd32(E1000_VMOLR(vf));
4116         struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4117
4118         vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4119                             IGB_VF_FLAG_MULTI_PROMISC);
4120         vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4121
4122         if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4123                 vmolr |= E1000_VMOLR_MPME;
4124                 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4125         } else {
4126                 /*
4127                  * if we have hashes and we are clearing a multicast promisc
4128                  * flag we need to write the hashes to the MTA as this step
4129                  * was previously skipped
4130                  */
4131                 if (vf_data->num_vf_mc_hashes > 30) {
4132                         vmolr |= E1000_VMOLR_MPME;
4133                 } else if (vf_data->num_vf_mc_hashes) {
4134                         int j;
4135                         vmolr |= E1000_VMOLR_ROMPE;
4136                         for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4137                                 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4138                 }
4139         }
4140
4141         wr32(E1000_VMOLR(vf), vmolr);
4142
4143         /* there are flags left unprocessed, likely not supported */
4144         if (*msgbuf & E1000_VT_MSGINFO_MASK)
4145                 return -EINVAL;
4146
4147         return 0;
4148
4149 }
4150
4151 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4152                                   u32 *msgbuf, u32 vf)
4153 {
4154         int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4155         u16 *hash_list = (u16 *)&msgbuf[1];
4156         struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4157         int i;
4158
4159         /* salt away the number of multicast addresses assigned
4160          * to this VF for later use to restore when the PF multi cast
4161          * list changes
4162          */
4163         vf_data->num_vf_mc_hashes = n;
4164
4165         /* only up to 30 hash values supported */
4166         if (n > 30)
4167                 n = 30;
4168
4169         /* store the hashes for later use */
4170         for (i = 0; i < n; i++)
4171                 vf_data->vf_mc_hashes[i] = hash_list[i];
4172
4173         /* Flush and reset the mta with the new values */
4174         igb_set_rx_mode(adapter->netdev);
4175
4176         return 0;
4177 }
4178
4179 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4180 {
4181         struct e1000_hw *hw = &adapter->hw;
4182         struct vf_data_storage *vf_data;
4183         int i, j;
4184
4185         for (i = 0; i < adapter->vfs_allocated_count; i++) {
4186                 u32 vmolr = rd32(E1000_VMOLR(i));
4187                 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4188
4189                 vf_data = &adapter->vf_data[i];
4190
4191                 if ((vf_data->num_vf_mc_hashes > 30) ||
4192                     (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4193                         vmolr |= E1000_VMOLR_MPME;
4194                 } else if (vf_data->num_vf_mc_hashes) {
4195                         vmolr |= E1000_VMOLR_ROMPE;
4196                         for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4197                                 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4198                 }
4199                 wr32(E1000_VMOLR(i), vmolr);
4200         }
4201 }
4202
4203 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4204 {
4205         struct e1000_hw *hw = &adapter->hw;
4206         u32 pool_mask, reg, vid;
4207         int i;
4208
4209         pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4210
4211         /* Find the vlan filter for this id */
4212         for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4213                 reg = rd32(E1000_VLVF(i));
4214
4215                 /* remove the vf from the pool */
4216                 reg &= ~pool_mask;
4217
4218                 /* if pool is empty then remove entry from vfta */
4219                 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4220                     (reg & E1000_VLVF_VLANID_ENABLE)) {
4221                         reg = 0;
4222                         vid = reg & E1000_VLVF_VLANID_MASK;
4223                         igb_vfta_set(hw, vid, false);
4224                 }
4225
4226                 wr32(E1000_VLVF(i), reg);
4227         }
4228
4229         adapter->vf_data[vf].vlans_enabled = 0;
4230 }
4231
4232 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4233 {
4234         struct e1000_hw *hw = &adapter->hw;
4235         u32 reg, i;
4236
4237         /* The vlvf table only exists on 82576 hardware and newer */
4238         if (hw->mac.type < e1000_82576)
4239                 return -1;
4240
4241         /* we only need to do this if VMDq is enabled */
4242         if (!adapter->vfs_allocated_count)
4243                 return -1;
4244
4245         /* Find the vlan filter for this id */
4246         for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4247                 reg = rd32(E1000_VLVF(i));
4248                 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4249                     vid == (reg & E1000_VLVF_VLANID_MASK))
4250                         break;
4251         }
4252
4253         if (add) {
4254                 if (i == E1000_VLVF_ARRAY_SIZE) {
4255                         /* Did not find a matching VLAN ID entry that was
4256                          * enabled.  Search for a free filter entry, i.e.
4257                          * one without the enable bit set
4258                          */
4259                         for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4260                                 reg = rd32(E1000_VLVF(i));
4261                                 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4262                                         break;
4263                         }
4264                 }
4265                 if (i < E1000_VLVF_ARRAY_SIZE) {
4266                         /* Found an enabled/available entry */
4267                         reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4268
4269                         /* if !enabled we need to set this up in vfta */
4270                         if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4271                                 /* add VID to filter table */
4272                                 igb_vfta_set(hw, vid, true);
4273                                 reg |= E1000_VLVF_VLANID_ENABLE;
4274                         }
4275                         reg &= ~E1000_VLVF_VLANID_MASK;
4276                         reg |= vid;
4277                         wr32(E1000_VLVF(i), reg);
4278
4279                         /* do not modify RLPML for PF devices */
4280                         if (vf >= adapter->vfs_allocated_count)
4281                                 return 0;
4282
4283                         if (!adapter->vf_data[vf].vlans_enabled) {
4284                                 u32 size;
4285                                 reg = rd32(E1000_VMOLR(vf));
4286                                 size = reg & E1000_VMOLR_RLPML_MASK;
4287                                 size += 4;
4288                                 reg &= ~E1000_VMOLR_RLPML_MASK;
4289                                 reg |= size;
4290                                 wr32(E1000_VMOLR(vf), reg);
4291                         }
4292
4293                         adapter->vf_data[vf].vlans_enabled++;
4294                         return 0;
4295                 }
4296         } else {
4297                 if (i < E1000_VLVF_ARRAY_SIZE) {
4298                         /* remove vf from the pool */
4299                         reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4300                         /* if pool is empty then remove entry from vfta */
4301                         if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4302                                 reg = 0;
4303                                 igb_vfta_set(hw, vid, false);
4304                         }
4305                         wr32(E1000_VLVF(i), reg);
4306
4307                         /* do not modify RLPML for PF devices */
4308                         if (vf >= adapter->vfs_allocated_count)
4309                                 return 0;
4310
4311                         adapter->vf_data[vf].vlans_enabled--;
4312                         if (!adapter->vf_data[vf].vlans_enabled) {
4313                                 u32 size;
4314                                 reg = rd32(E1000_VMOLR(vf));
4315                                 size = reg & E1000_VMOLR_RLPML_MASK;
4316                                 size -= 4;
4317                                 reg &= ~E1000_VMOLR_RLPML_MASK;
4318                                 reg |= size;
4319                                 wr32(E1000_VMOLR(vf), reg);
4320                         }
4321                         return 0;
4322                 }
4323         }
4324         return -1;
4325 }
4326
4327 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4328 {
4329         int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4330         int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4331
4332         return igb_vlvf_set(adapter, vid, add, vf);
4333 }
4334
4335 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4336 {
4337         /* clear all flags */
4338         adapter->vf_data[vf].flags = 0;
4339         adapter->vf_data[vf].last_nack = jiffies;
4340
4341         /* reset offloads to defaults */
4342         igb_set_vmolr(adapter, vf);
4343
4344         /* reset vlans for device */
4345         igb_clear_vf_vfta(adapter, vf);
4346
4347         /* reset multicast table array for vf */
4348         adapter->vf_data[vf].num_vf_mc_hashes = 0;
4349
4350         /* Flush and reset the mta with the new values */
4351         igb_set_rx_mode(adapter->netdev);
4352 }
4353
4354 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4355 {
4356         unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4357
4358         /* generate a new mac address as we were hotplug removed/added */
4359         random_ether_addr(vf_mac);
4360
4361         /* process remaining reset events */
4362         igb_vf_reset(adapter, vf);
4363 }
4364
4365 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4366 {
4367         struct e1000_hw *hw = &adapter->hw;
4368         unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4369         int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4370         u32 reg, msgbuf[3];
4371         u8 *addr = (u8 *)(&msgbuf[1]);
4372
4373         /* process all the same items cleared in a function level reset */
4374         igb_vf_reset(adapter, vf);
4375
4376         /* set vf mac address */
4377         igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4378
4379         /* enable transmit and receive for vf */
4380         reg = rd32(E1000_VFTE);
4381         wr32(E1000_VFTE, reg | (1 << vf));
4382         reg = rd32(E1000_VFRE);
4383         wr32(E1000_VFRE, reg | (1 << vf));
4384
4385         adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4386
4387         /* reply to reset with ack and vf mac address */
4388         msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4389         memcpy(addr, vf_mac, 6);
4390         igb_write_mbx(hw, msgbuf, 3, vf);
4391 }
4392
4393 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4394 {
4395         unsigned char *addr = (char *)&msg[1];
4396         int err = -1;
4397
4398         if (is_valid_ether_addr(addr))
4399                 err = igb_set_vf_mac(adapter, vf, addr);
4400
4401         return err;
4402 }
4403
4404 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4405 {
4406         struct e1000_hw *hw = &adapter->hw;
4407         struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4408         u32 msg = E1000_VT_MSGTYPE_NACK;
4409
4410         /* if device isn't clear to send it shouldn't be reading either */
4411         if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4412             time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4413                 igb_write_mbx(hw, &msg, 1, vf);
4414                 vf_data->last_nack = jiffies;
4415         }
4416 }
4417
4418 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4419 {
4420         struct pci_dev *pdev = adapter->pdev;
4421         u32 msgbuf[E1000_VFMAILBOX_SIZE];
4422         struct e1000_hw *hw = &adapter->hw;
4423         struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4424         s32 retval;
4425
4426         retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4427
4428         if (retval)
4429                 dev_err(&pdev->dev, "Error receiving message from VF\n");
4430
4431         /* this is a message we already processed, do nothing */
4432         if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4433                 return;
4434
4435         /*
4436          * until the vf completes a reset it should not be
4437          * allowed to start any configuration.
4438          */
4439
4440         if (msgbuf[0] == E1000_VF_RESET) {
4441                 igb_vf_reset_msg(adapter, vf);
4442                 return;
4443         }
4444
4445         if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4446                 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4447                 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4448                         igb_write_mbx(hw, msgbuf, 1, vf);
4449                         vf_data->last_nack = jiffies;
4450                 }
4451                 return;
4452         }
4453
4454         switch ((msgbuf[0] & 0xFFFF)) {
4455         case E1000_VF_SET_MAC_ADDR:
4456                 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4457                 break;
4458         case E1000_VF_SET_PROMISC:
4459                 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4460                 break;
4461         case E1000_VF_SET_MULTICAST:
4462                 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4463                 break;
4464         case E1000_VF_SET_LPE:
4465                 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4466                 break;
4467         case E1000_VF_SET_VLAN:
4468                 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4469                 break;
4470         default:
4471                 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4472                 retval = -1;
4473                 break;
4474         }
4475
4476         /* notify the VF of the results of what it sent us */
4477         if (retval)
4478                 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4479         else
4480                 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4481
4482         msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4483
4484         igb_write_mbx(hw, msgbuf, 1, vf);
4485 }
4486
4487 static void igb_msg_task(struct igb_adapter *adapter)
4488 {
4489         struct e1000_hw *hw = &adapter->hw;
4490         u32 vf;
4491
4492         for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4493                 /* process any reset requests */
4494                 if (!igb_check_for_rst(hw, vf))
4495                         igb_vf_reset_event(adapter, vf);
4496
4497                 /* process any messages pending */
4498                 if (!igb_check_for_msg(hw, vf))
4499                         igb_rcv_msg_from_vf(adapter, vf);
4500
4501                 /* process any acks */
4502                 if (!igb_check_for_ack(hw, vf))
4503                         igb_rcv_ack_from_vf(adapter, vf);
4504         }
4505 }
4506
4507 /**
4508  *  igb_set_uta - Set unicast filter table address
4509  *  @adapter: board private structure
4510  *
4511  *  The unicast table address is a register array of 32-bit registers.
4512  *  The table is meant to be used in a way similar to how the MTA is used
4513  *  however due to certain limitations in the hardware it is necessary to
4514  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4515  *  enable bit to allow vlan tag stripping when promiscous mode is enabled
4516  **/
4517 static void igb_set_uta(struct igb_adapter *adapter)
4518 {
4519         struct e1000_hw *hw = &adapter->hw;
4520         int i;
4521
4522         /* The UTA table only exists on 82576 hardware and newer */
4523         if (hw->mac.type < e1000_82576)
4524                 return;
4525
4526         /* we only need to do this if VMDq is enabled */
4527         if (!adapter->vfs_allocated_count)
4528                 return;
4529
4530         for (i = 0; i < hw->mac.uta_reg_count; i++)
4531                 array_wr32(E1000_UTA, i, ~0);
4532 }
4533
4534 /**
4535  * igb_intr_msi - Interrupt Handler
4536  * @irq: interrupt number
4537  * @data: pointer to a network interface device structure
4538  **/
4539 static irqreturn_t igb_intr_msi(int irq, void *data)
4540 {
4541         struct igb_adapter *adapter = data;
4542         struct igb_q_vector *q_vector = adapter->q_vector[0];
4543         struct e1000_hw *hw = &adapter->hw;
4544         /* read ICR disables interrupts using IAM */
4545         u32 icr = rd32(E1000_ICR);
4546
4547         igb_write_itr(q_vector);
4548
4549         if (icr & E1000_ICR_DOUTSYNC) {
4550                 /* HW is reporting DMA is out of sync */
4551                 adapter->stats.doosync++;
4552         }
4553
4554         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4555                 hw->mac.get_link_status = 1;
4556                 if (!test_bit(__IGB_DOWN, &adapter->state))
4557                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
4558         }
4559
4560         napi_schedule(&q_vector->napi);
4561
4562         return IRQ_HANDLED;
4563 }
4564
4565 /**
4566  * igb_intr - Legacy Interrupt Handler
4567  * @irq: interrupt number
4568  * @data: pointer to a network interface device structure
4569  **/
4570 static irqreturn_t igb_intr(int irq, void *data)
4571 {
4572         struct igb_adapter *adapter = data;
4573         struct igb_q_vector *q_vector = adapter->q_vector[0];
4574         struct e1000_hw *hw = &adapter->hw;
4575         /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
4576          * need for the IMC write */
4577         u32 icr = rd32(E1000_ICR);
4578         if (!icr)
4579                 return IRQ_NONE;  /* Not our interrupt */
4580
4581         igb_write_itr(q_vector);
4582
4583         /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4584          * not set, then the adapter didn't send an interrupt */
4585         if (!(icr & E1000_ICR_INT_ASSERTED))
4586                 return IRQ_NONE;
4587
4588         if (icr & E1000_ICR_DOUTSYNC) {
4589                 /* HW is reporting DMA is out of sync */
4590                 adapter->stats.doosync++;
4591         }
4592
4593         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4594                 hw->mac.get_link_status = 1;
4595                 /* guard against interrupt when we're going down */
4596                 if (!test_bit(__IGB_DOWN, &adapter->state))
4597                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
4598         }
4599
4600         napi_schedule(&q_vector->napi);
4601
4602         return IRQ_HANDLED;
4603 }
4604
4605 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4606 {
4607         struct igb_adapter *adapter = q_vector->adapter;
4608         struct e1000_hw *hw = &adapter->hw;
4609
4610         if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4611             (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4612                 if (!adapter->msix_entries)
4613                         igb_set_itr(adapter);
4614                 else
4615                         igb_update_ring_itr(q_vector);
4616         }
4617
4618         if (!test_bit(__IGB_DOWN, &adapter->state)) {
4619                 if (adapter->msix_entries)
4620                         wr32(E1000_EIMS, q_vector->eims_value);
4621                 else
4622                         igb_irq_enable(adapter);
4623         }
4624 }
4625
4626 /**
4627  * igb_poll - NAPI Rx polling callback
4628  * @napi: napi polling structure
4629  * @budget: count of how many packets we should handle
4630  **/
4631 static int igb_poll(struct napi_struct *napi, int budget)
4632 {
4633         struct igb_q_vector *q_vector = container_of(napi,
4634                                                      struct igb_q_vector,
4635                                                      napi);
4636         int tx_clean_complete = 1, work_done = 0;
4637
4638 #ifdef CONFIG_IGB_DCA
4639         if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4640                 igb_update_dca(q_vector);
4641 #endif
4642         if (q_vector->tx_ring)
4643                 tx_clean_complete = igb_clean_tx_irq(q_vector);
4644
4645         if (q_vector->rx_ring)
4646                 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4647
4648         if (!tx_clean_complete)
4649                 work_done = budget;
4650
4651         /* If not enough Rx work done, exit the polling mode */
4652         if (work_done < budget) {
4653                 napi_complete(napi);
4654                 igb_ring_irq_enable(q_vector);
4655         }
4656
4657         return work_done;
4658 }
4659
4660 /**
4661  * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4662  * @adapter: board private structure
4663  * @shhwtstamps: timestamp structure to update
4664  * @regval: unsigned 64bit system time value.
4665  *
4666  * We need to convert the system time value stored in the RX/TXSTMP registers
4667  * into a hwtstamp which can be used by the upper level timestamping functions
4668  */
4669 static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4670                                    struct skb_shared_hwtstamps *shhwtstamps,
4671                                    u64 regval)
4672 {
4673         u64 ns;
4674
4675         ns = timecounter_cyc2time(&adapter->clock, regval);
4676         timecompare_update(&adapter->compare, ns);
4677         memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4678         shhwtstamps->hwtstamp = ns_to_ktime(ns);
4679         shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4680 }
4681
4682 /**
4683  * igb_tx_hwtstamp - utility function which checks for TX time stamp
4684  * @q_vector: pointer to q_vector containing needed info
4685  * @skb: packet that was just sent
4686  *
4687  * If we were asked to do hardware stamping and such a time stamp is
4688  * available, then it must have been for this skb here because we only
4689  * allow only one such packet into the queue.
4690  */
4691 static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4692 {
4693         struct igb_adapter *adapter = q_vector->adapter;
4694         union skb_shared_tx *shtx = skb_tx(skb);
4695         struct e1000_hw *hw = &adapter->hw;
4696         struct skb_shared_hwtstamps shhwtstamps;
4697         u64 regval;
4698
4699         /* if skb does not support hw timestamp or TX stamp not valid exit */
4700         if (likely(!shtx->hardware) ||
4701             !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4702                 return;
4703
4704         regval = rd32(E1000_TXSTMPL);
4705         regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4706
4707         igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4708         skb_tstamp_tx(skb, &shhwtstamps);
4709 }
4710
4711 /**
4712  * igb_clean_tx_irq - Reclaim resources after transmit completes
4713  * @q_vector: pointer to q_vector containing needed info
4714  * returns true if ring is completely cleaned
4715  **/
4716 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4717 {
4718         struct igb_adapter *adapter = q_vector->adapter;
4719         struct igb_ring *tx_ring = q_vector->tx_ring;
4720         struct net_device *netdev = tx_ring->netdev;
4721         struct e1000_hw *hw = &adapter->hw;
4722         struct igb_buffer *buffer_info;
4723         struct sk_buff *skb;
4724         union e1000_adv_tx_desc *tx_desc, *eop_desc;
4725         unsigned int total_bytes = 0, total_packets = 0;
4726         unsigned int i, eop, count = 0;
4727         bool cleaned = false;
4728
4729         i = tx_ring->next_to_clean;
4730         eop = tx_ring->buffer_info[i].next_to_watch;
4731         eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4732
4733         while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4734                (count < tx_ring->count)) {
4735                 for (cleaned = false; !cleaned; count++) {
4736                         tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4737                         buffer_info = &tx_ring->buffer_info[i];
4738                         cleaned = (i == eop);
4739                         skb = buffer_info->skb;
4740
4741                         if (skb) {
4742                                 unsigned int segs, bytecount;
4743                                 /* gso_segs is currently only valid for tcp */
4744                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
4745                                 /* multiply data chunks by size of headers */
4746                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
4747                                             skb->len;
4748                                 total_packets += segs;
4749                                 total_bytes += bytecount;
4750
4751                                 igb_tx_hwtstamp(q_vector, skb);
4752                         }
4753
4754                         igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4755                         tx_desc->wb.status = 0;
4756
4757                         i++;
4758                         if (i == tx_ring->count)
4759                                 i = 0;
4760                 }
4761                 eop = tx_ring->buffer_info[i].next_to_watch;
4762                 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4763         }
4764
4765         tx_ring->next_to_clean = i;
4766
4767         if (unlikely(count &&
4768                      netif_carrier_ok(netdev) &&
4769                      igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
4770                 /* Make sure that anybody stopping the queue after this
4771                  * sees the new next_to_clean.
4772                  */
4773                 smp_mb();
4774                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4775                     !(test_bit(__IGB_DOWN, &adapter->state))) {
4776                         netif_wake_subqueue(netdev, tx_ring->queue_index);
4777                         tx_ring->tx_stats.restart_queue++;
4778                 }
4779         }
4780
4781         if (tx_ring->detect_tx_hung) {
4782                 /* Detect a transmit hang in hardware, this serializes the
4783                  * check with the clearing of time_stamp and movement of i */
4784                 tx_ring->detect_tx_hung = false;
4785                 if (tx_ring->buffer_info[i].time_stamp &&
4786                     time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4787                                (adapter->tx_timeout_factor * HZ))
4788                     && !(rd32(E1000_STATUS) &
4789                          E1000_STATUS_TXOFF)) {
4790
4791                         /* detected Tx unit hang */
4792                         dev_err(&tx_ring->pdev->dev,
4793                                 "Detected Tx Unit Hang\n"
4794                                 "  Tx Queue             <%d>\n"
4795                                 "  TDH                  <%x>\n"
4796                                 "  TDT                  <%x>\n"
4797                                 "  next_to_use          <%x>\n"
4798                                 "  next_to_clean        <%x>\n"
4799                                 "buffer_info[next_to_clean]\n"
4800                                 "  time_stamp           <%lx>\n"
4801                                 "  next_to_watch        <%x>\n"
4802                                 "  jiffies              <%lx>\n"
4803                                 "  desc.status          <%x>\n",
4804                                 tx_ring->queue_index,
4805                                 readl(tx_ring->head),
4806                                 readl(tx_ring->tail),
4807                                 tx_ring->next_to_use,
4808                                 tx_ring->next_to_clean,
4809                                 tx_ring->buffer_info[eop].time_stamp,
4810                                 eop,
4811                                 jiffies,
4812                                 eop_desc->wb.status);
4813                         netif_stop_subqueue(netdev, tx_ring->queue_index);
4814                 }
4815         }
4816         tx_ring->total_bytes += total_bytes;
4817         tx_ring->total_packets += total_packets;
4818         tx_ring->tx_stats.bytes += total_bytes;
4819         tx_ring->tx_stats.packets += total_packets;
4820         return (count < tx_ring->count);
4821 }
4822
4823 /**
4824  * igb_receive_skb - helper function to handle rx indications
4825  * @q_vector: structure containing interrupt and ring information
4826  * @skb: packet to send up
4827  * @vlan_tag: vlan tag for packet
4828  **/
4829 static void igb_receive_skb(struct igb_q_vector *q_vector,
4830                             struct sk_buff *skb,
4831                             u16 vlan_tag)
4832 {
4833         struct igb_adapter *adapter = q_vector->adapter;
4834
4835         if (vlan_tag)
4836                 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4837                                  vlan_tag, skb);
4838         else
4839                 napi_gro_receive(&q_vector->napi, skb);
4840 }
4841
4842 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4843                                        u32 status_err, struct sk_buff *skb)
4844 {
4845         skb->ip_summed = CHECKSUM_NONE;
4846
4847         /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4848         if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4849              (status_err & E1000_RXD_STAT_IXSM))
4850                 return;
4851
4852         /* TCP/UDP checksum error bit is set */
4853         if (status_err &
4854             (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4855                 /*
4856                  * work around errata with sctp packets where the TCPE aka
4857                  * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4858                  * packets, (aka let the stack check the crc32c)
4859                  */
4860                 if ((skb->len == 60) &&
4861                     (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4862                         ring->rx_stats.csum_err++;
4863
4864                 /* let the stack verify checksum errors */
4865                 return;
4866         }
4867         /* It must be a TCP or UDP packet with a valid checksum */
4868         if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4869                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4870
4871         dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4872 }
4873
4874 static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4875                                    struct sk_buff *skb)
4876 {
4877         struct igb_adapter *adapter = q_vector->adapter;
4878         struct e1000_hw *hw = &adapter->hw;
4879         u64 regval;
4880
4881         /*
4882          * If this bit is set, then the RX registers contain the time stamp. No
4883          * other packet will be time stamped until we read these registers, so
4884          * read the registers to make them available again. Because only one
4885          * packet can be time stamped at a time, we know that the register
4886          * values must belong to this one here and therefore we don't need to
4887          * compare any of the additional attributes stored for it.
4888          *
4889          * If nothing went wrong, then it should have a skb_shared_tx that we
4890          * can turn into a skb_shared_hwtstamps.
4891          */
4892         if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4893                 return;
4894         if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4895                 return;
4896
4897         regval = rd32(E1000_RXSTMPL);
4898         regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4899
4900         igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4901 }
4902 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4903                                union e1000_adv_rx_desc *rx_desc)
4904 {
4905         /* HW will not DMA in data larger than the given buffer, even if it
4906          * parses the (NFS, of course) header to be larger.  In that case, it
4907          * fills the header buffer and spills the rest into the page.
4908          */
4909         u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4910                    E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4911         if (hlen > rx_ring->rx_buffer_len)
4912                 hlen = rx_ring->rx_buffer_len;
4913         return hlen;
4914 }
4915
4916 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4917                                  int *work_done, int budget)
4918 {
4919         struct igb_ring *rx_ring = q_vector->rx_ring;
4920         struct net_device *netdev = rx_ring->netdev;
4921         struct pci_dev *pdev = rx_ring->pdev;
4922         union e1000_adv_rx_desc *rx_desc , *next_rxd;
4923         struct igb_buffer *buffer_info , *next_buffer;
4924         struct sk_buff *skb;
4925         bool cleaned = false;
4926         int cleaned_count = 0;
4927         unsigned int total_bytes = 0, total_packets = 0;
4928         unsigned int i;
4929         u32 staterr;
4930         u16 length;
4931         u16 vlan_tag;
4932
4933         i = rx_ring->next_to_clean;
4934         buffer_info = &rx_ring->buffer_info[i];
4935         rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4936         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4937
4938         while (staterr & E1000_RXD_STAT_DD) {
4939                 if (*work_done >= budget)
4940                         break;
4941                 (*work_done)++;
4942
4943                 skb = buffer_info->skb;
4944                 prefetch(skb->data - NET_IP_ALIGN);
4945                 buffer_info->skb = NULL;
4946
4947                 i++;
4948                 if (i == rx_ring->count)
4949                         i = 0;
4950                 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4951                 prefetch(next_rxd);
4952                 next_buffer = &rx_ring->buffer_info[i];
4953
4954                 length = le16_to_cpu(rx_desc->wb.upper.length);
4955                 cleaned = true;
4956                 cleaned_count++;
4957
4958                 if (buffer_info->dma) {
4959                         pci_unmap_single(pdev, buffer_info->dma,
4960                                          rx_ring->rx_buffer_len,
4961                                          PCI_DMA_FROMDEVICE);
4962                         buffer_info->dma = 0;
4963                         if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4964                                 skb_put(skb, length);
4965                                 goto send_up;
4966                         }
4967                         skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4968                 }
4969
4970                 if (length) {
4971                         pci_unmap_page(pdev, buffer_info->page_dma,
4972                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
4973                         buffer_info->page_dma = 0;
4974
4975                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4976                                                 buffer_info->page,
4977                                                 buffer_info->page_offset,
4978                                                 length);
4979
4980                         if (page_count(buffer_info->page) != 1)
4981                                 buffer_info->page = NULL;
4982                         else
4983                                 get_page(buffer_info->page);
4984
4985                         skb->len += length;
4986                         skb->data_len += length;
4987
4988                         skb->truesize += length;
4989                 }
4990
4991                 if (!(staterr & E1000_RXD_STAT_EOP)) {
4992                         buffer_info->skb = next_buffer->skb;
4993                         buffer_info->dma = next_buffer->dma;
4994                         next_buffer->skb = skb;
4995                         next_buffer->dma = 0;
4996                         goto next_desc;
4997                 }
4998 send_up:
4999                 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5000                         dev_kfree_skb_irq(skb);
5001                         goto next_desc;
5002                 }
5003
5004                 igb_rx_hwtstamp(q_vector, staterr, skb);
5005                 total_bytes += skb->len;
5006                 total_packets++;
5007
5008                 igb_rx_checksum_adv(rx_ring, staterr, skb);
5009
5010                 skb->protocol = eth_type_trans(skb, netdev);
5011                 skb_record_rx_queue(skb, rx_ring->queue_index);
5012
5013                 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5014                             le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5015
5016                 igb_receive_skb(q_vector, skb, vlan_tag);
5017
5018 next_desc:
5019                 rx_desc->wb.upper.status_error = 0;
5020
5021                 /* return some buffers to hardware, one at a time is too slow */
5022                 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5023                         igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5024                         cleaned_count = 0;
5025                 }
5026
5027                 /* use prefetched values */
5028                 rx_desc = next_rxd;
5029                 buffer_info = next_buffer;
5030                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5031         }
5032
5033         rx_ring->next_to_clean = i;
5034         cleaned_count = igb_desc_unused(rx_ring);
5035
5036         if (cleaned_count)
5037                 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5038
5039         rx_ring->total_packets += total_packets;
5040         rx_ring->total_bytes += total_bytes;
5041         rx_ring->rx_stats.packets += total_packets;
5042         rx_ring->rx_stats.bytes += total_bytes;
5043         return cleaned;
5044 }
5045
5046 /**
5047  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5048  * @adapter: address of board private structure
5049  **/
5050 void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5051 {
5052         struct net_device *netdev = rx_ring->netdev;
5053         union e1000_adv_rx_desc *rx_desc;
5054         struct igb_buffer *buffer_info;
5055         struct sk_buff *skb;
5056         unsigned int i;
5057         int bufsz;
5058
5059         i = rx_ring->next_to_use;
5060         buffer_info = &rx_ring->buffer_info[i];
5061
5062         bufsz = rx_ring->rx_buffer_len;
5063
5064         while (cleaned_count--) {
5065                 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5066
5067                 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5068                         if (!buffer_info->page) {
5069                                 buffer_info->page = alloc_page(GFP_ATOMIC);
5070                                 if (!buffer_info->page) {
5071                                         rx_ring->rx_stats.alloc_failed++;
5072                                         goto no_buffers;
5073                                 }
5074                                 buffer_info->page_offset = 0;
5075                         } else {
5076                                 buffer_info->page_offset ^= PAGE_SIZE / 2;
5077                         }
5078                         buffer_info->page_dma =
5079                                 pci_map_page(rx_ring->pdev, buffer_info->page,
5080                                              buffer_info->page_offset,
5081                                              PAGE_SIZE / 2,
5082                                              PCI_DMA_FROMDEVICE);
5083                 }
5084
5085                 if (!buffer_info->skb) {
5086                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5087                         if (!skb) {
5088                                 rx_ring->rx_stats.alloc_failed++;
5089                                 goto no_buffers;
5090                         }
5091
5092                         buffer_info->skb = skb;
5093                         buffer_info->dma = pci_map_single(rx_ring->pdev,
5094                                                           skb->data,
5095                                                           bufsz,
5096                                                           PCI_DMA_FROMDEVICE);
5097                 }
5098                 /* Refresh the desc even if buffer_addrs didn't change because
5099                  * each write-back erases this info. */
5100                 if (bufsz < IGB_RXBUFFER_1024) {
5101                         rx_desc->read.pkt_addr =
5102                              cpu_to_le64(buffer_info->page_dma);
5103                         rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5104                 } else {
5105                         rx_desc->read.pkt_addr =
5106                              cpu_to_le64(buffer_info->dma);
5107                         rx_desc->read.hdr_addr = 0;
5108                 }
5109
5110                 i++;
5111                 if (i == rx_ring->count)
5112                         i = 0;
5113                 buffer_info = &rx_ring->buffer_info[i];
5114         }
5115
5116 no_buffers:
5117         if (rx_ring->next_to_use != i) {
5118                 rx_ring->next_to_use = i;
5119                 if (i == 0)
5120                         i = (rx_ring->count - 1);
5121                 else
5122                         i--;
5123
5124                 /* Force memory writes to complete before letting h/w
5125                  * know there are new descriptors to fetch.  (Only
5126                  * applicable for weak-ordered memory model archs,
5127                  * such as IA-64). */
5128                 wmb();
5129                 writel(i, rx_ring->tail);
5130         }
5131 }
5132
5133 /**
5134  * igb_mii_ioctl -
5135  * @netdev:
5136  * @ifreq:
5137  * @cmd:
5138  **/
5139 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5140 {
5141         struct igb_adapter *adapter = netdev_priv(netdev);
5142         struct mii_ioctl_data *data = if_mii(ifr);
5143
5144         if (adapter->hw.phy.media_type != e1000_media_type_copper)
5145                 return -EOPNOTSUPP;
5146
5147         switch (cmd) {
5148         case SIOCGMIIPHY:
5149                 data->phy_id = adapter->hw.phy.addr;
5150                 break;
5151         case SIOCGMIIREG:
5152                 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5153                                      &data->val_out))
5154                         return -EIO;
5155                 break;
5156         case SIOCSMIIREG:
5157         default:
5158                 return -EOPNOTSUPP;
5159         }
5160         return 0;
5161 }
5162
5163 /**
5164  * igb_hwtstamp_ioctl - control hardware time stamping
5165  * @netdev:
5166  * @ifreq:
5167  * @cmd:
5168  *
5169  * Outgoing time stamping can be enabled and disabled. Play nice and
5170  * disable it when requested, although it shouldn't case any overhead
5171  * when no packet needs it. At most one packet in the queue may be
5172  * marked for time stamping, otherwise it would be impossible to tell
5173  * for sure to which packet the hardware time stamp belongs.
5174  *
5175  * Incoming time stamping has to be configured via the hardware
5176  * filters. Not all combinations are supported, in particular event
5177  * type has to be specified. Matching the kind of event packet is
5178  * not supported, with the exception of "all V2 events regardless of
5179  * level 2 or 4".
5180  *
5181  **/
5182 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5183                               struct ifreq *ifr, int cmd)
5184 {
5185         struct igb_adapter *adapter = netdev_priv(netdev);
5186         struct e1000_hw *hw = &adapter->hw;
5187         struct hwtstamp_config config;
5188         u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5189         u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5190         u32 tsync_rx_cfg = 0;
5191         bool is_l4 = false;
5192         bool is_l2 = false;
5193         u32 regval;
5194
5195         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5196                 return -EFAULT;
5197
5198         /* reserved for future extensions */
5199         if (config.flags)
5200                 return -EINVAL;
5201
5202         switch (config.tx_type) {
5203         case HWTSTAMP_TX_OFF:
5204                 tsync_tx_ctl = 0;
5205         case HWTSTAMP_TX_ON:
5206                 break;
5207         default:
5208                 return -ERANGE;
5209         }
5210
5211         switch (config.rx_filter) {
5212         case HWTSTAMP_FILTER_NONE:
5213                 tsync_rx_ctl = 0;
5214                 break;
5215         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5216         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5217         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5218         case HWTSTAMP_FILTER_ALL:
5219                 /*
5220                  * register TSYNCRXCFG must be set, therefore it is not
5221                  * possible to time stamp both Sync and Delay_Req messages
5222                  * => fall back to time stamping all packets
5223                  */
5224                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5225                 config.rx_filter = HWTSTAMP_FILTER_ALL;
5226                 break;
5227         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5228                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5229                 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5230                 is_l4 = true;
5231                 break;
5232         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5233                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5234                 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5235                 is_l4 = true;
5236                 break;
5237         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5238         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5239                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5240                 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5241                 is_l2 = true;
5242                 is_l4 = true;
5243                 config.rx_filter = HWTSTAMP_FILTER_SOME;
5244                 break;
5245         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5246         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5247                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5248                 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5249                 is_l2 = true;
5250                 is_l4 = true;
5251                 config.rx_filter = HWTSTAMP_FILTER_SOME;
5252                 break;
5253         case HWTSTAMP_FILTER_PTP_V2_EVENT:
5254         case HWTSTAMP_FILTER_PTP_V2_SYNC:
5255         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5256                 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5257                 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5258                 is_l2 = true;
5259                 break;
5260         default:
5261                 return -ERANGE;
5262         }
5263
5264         if (hw->mac.type == e1000_82575) {
5265                 if (tsync_rx_ctl | tsync_tx_ctl)
5266                         return -EINVAL;
5267                 return 0;
5268         }
5269
5270         /* enable/disable TX */
5271         regval = rd32(E1000_TSYNCTXCTL);
5272         regval &= ~E1000_TSYNCTXCTL_ENABLED;
5273         regval |= tsync_tx_ctl;
5274         wr32(E1000_TSYNCTXCTL, regval);
5275
5276         /* enable/disable RX */
5277         regval = rd32(E1000_TSYNCRXCTL);
5278         regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5279         regval |= tsync_rx_ctl;
5280         wr32(E1000_TSYNCRXCTL, regval);
5281
5282         /* define which PTP packets are time stamped */
5283         wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5284
5285         /* define ethertype filter for timestamped packets */
5286         if (is_l2)
5287                 wr32(E1000_ETQF(3),
5288                                 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5289                                  E1000_ETQF_1588 | /* enable timestamping */
5290                                  ETH_P_1588));     /* 1588 eth protocol type */
5291         else
5292                 wr32(E1000_ETQF(3), 0);
5293
5294 #define PTP_PORT 319
5295         /* L4 Queue Filter[3]: filter by destination port and protocol */
5296         if (is_l4) {
5297                 u32 ftqf = (IPPROTO_UDP /* UDP */
5298                         | E1000_FTQF_VF_BP /* VF not compared */
5299                         | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5300                         | E1000_FTQF_MASK); /* mask all inputs */
5301                 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5302
5303                 wr32(E1000_IMIR(3), htons(PTP_PORT));
5304                 wr32(E1000_IMIREXT(3),
5305                      (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5306                 if (hw->mac.type == e1000_82576) {
5307                         /* enable source port check */
5308                         wr32(E1000_SPQF(3), htons(PTP_PORT));
5309                         ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5310                 }
5311                 wr32(E1000_FTQF(3), ftqf);
5312         } else {
5313                 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5314         }
5315         wrfl();
5316
5317         adapter->hwtstamp_config = config;
5318
5319         /* clear TX/RX time stamp registers, just to be sure */
5320         regval = rd32(E1000_TXSTMPH);
5321         regval = rd32(E1000_RXSTMPH);
5322
5323         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5324                 -EFAULT : 0;
5325 }
5326
5327 /**
5328  * igb_ioctl -
5329  * @netdev:
5330  * @ifreq:
5331  * @cmd:
5332  **/
5333 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5334 {
5335         switch (cmd) {
5336         case SIOCGMIIPHY:
5337         case SIOCGMIIREG:
5338         case SIOCSMIIREG:
5339                 return igb_mii_ioctl(netdev, ifr, cmd);
5340         case SIOCSHWTSTAMP:
5341                 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5342         default:
5343                 return -EOPNOTSUPP;
5344         }
5345 }
5346
5347 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5348 {
5349         struct igb_adapter *adapter = hw->back;
5350         u16 cap_offset;
5351
5352         cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5353         if (!cap_offset)
5354                 return -E1000_ERR_CONFIG;
5355
5356         pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5357
5358         return 0;
5359 }
5360
5361 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5362 {
5363         struct igb_adapter *adapter = hw->back;
5364         u16 cap_offset;
5365
5366         cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5367         if (!cap_offset)
5368                 return -E1000_ERR_CONFIG;
5369
5370         pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5371
5372         return 0;
5373 }
5374
5375 static void igb_vlan_rx_register(struct net_device *netdev,
5376                                  struct vlan_group *grp)
5377 {
5378         struct igb_adapter *adapter = netdev_priv(netdev);
5379         struct e1000_hw *hw = &adapter->hw;
5380         u32 ctrl, rctl;
5381
5382         igb_irq_disable(adapter);
5383         adapter->vlgrp = grp;
5384
5385         if (grp) {
5386                 /* enable VLAN tag insert/strip */
5387                 ctrl = rd32(E1000_CTRL);
5388                 ctrl |= E1000_CTRL_VME;
5389                 wr32(E1000_CTRL, ctrl);
5390
5391                 /* Disable CFI check */
5392                 rctl = rd32(E1000_RCTL);
5393                 rctl &= ~E1000_RCTL_CFIEN;
5394                 wr32(E1000_RCTL, rctl);
5395         } else {
5396                 /* disable VLAN tag insert/strip */
5397                 ctrl = rd32(E1000_CTRL);
5398                 ctrl &= ~E1000_CTRL_VME;
5399                 wr32(E1000_CTRL, ctrl);
5400         }
5401
5402         igb_rlpml_set(adapter);
5403
5404         if (!test_bit(__IGB_DOWN, &adapter->state))
5405                 igb_irq_enable(adapter);
5406 }
5407
5408 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5409 {
5410         struct igb_adapter *adapter = netdev_priv(netdev);
5411         struct e1000_hw *hw = &adapter->hw;
5412         int pf_id = adapter->vfs_allocated_count;
5413
5414         /* attempt to add filter to vlvf array */
5415         igb_vlvf_set(adapter, vid, true, pf_id);
5416
5417         /* add the filter since PF can receive vlans w/o entry in vlvf */
5418         igb_vfta_set(hw, vid, true);
5419 }
5420
5421 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5422 {
5423         struct igb_adapter *adapter = netdev_priv(netdev);
5424         struct e1000_hw *hw = &adapter->hw;
5425         int pf_id = adapter->vfs_allocated_count;
5426         s32 err;
5427
5428         igb_irq_disable(adapter);
5429         vlan_group_set_device(adapter->vlgrp, vid, NULL);
5430
5431         if (!test_bit(__IGB_DOWN, &adapter->state))
5432                 igb_irq_enable(adapter);
5433
5434         /* remove vlan from VLVF table array */
5435         err = igb_vlvf_set(adapter, vid, false, pf_id);
5436
5437         /* if vid was not present in VLVF just remove it from table */
5438         if (err)
5439                 igb_vfta_set(hw, vid, false);
5440 }
5441
5442 static void igb_restore_vlan(struct igb_adapter *adapter)
5443 {
5444         igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5445
5446         if (adapter->vlgrp) {
5447                 u16 vid;
5448                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5449                         if (!vlan_group_get_device(adapter->vlgrp, vid))
5450                                 continue;
5451                         igb_vlan_rx_add_vid(adapter->netdev, vid);
5452                 }
5453         }
5454 }
5455
5456 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5457 {
5458         struct e1000_mac_info *mac = &adapter->hw.mac;
5459
5460         mac->autoneg = 0;
5461
5462         switch (spddplx) {
5463         case SPEED_10 + DUPLEX_HALF:
5464                 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5465                 break;
5466         case SPEED_10 + DUPLEX_FULL:
5467                 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5468                 break;
5469         case SPEED_100 + DUPLEX_HALF:
5470                 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5471                 break;
5472         case SPEED_100 + DUPLEX_FULL:
5473                 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5474                 break;
5475         case SPEED_1000 + DUPLEX_FULL:
5476                 mac->autoneg = 1;
5477                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5478                 break;
5479         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5480         default:
5481                 dev_err(&adapter->pdev->dev,
5482                         "Unsupported Speed/Duplex configuration\n");
5483                 return -EINVAL;
5484         }
5485         return 0;
5486 }
5487
5488 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5489 {
5490         struct net_device *netdev = pci_get_drvdata(pdev);
5491         struct igb_adapter *adapter = netdev_priv(netdev);
5492         struct e1000_hw *hw = &adapter->hw;
5493         u32 ctrl, rctl, status;
5494         u32 wufc = adapter->wol;
5495 #ifdef CONFIG_PM
5496         int retval = 0;
5497 #endif
5498
5499         netif_device_detach(netdev);
5500
5501         if (netif_running(netdev))
5502                 igb_close(netdev);
5503
5504         igb_clear_interrupt_scheme(adapter);
5505
5506 #ifdef CONFIG_PM
5507         retval = pci_save_state(pdev);
5508         if (retval)
5509                 return retval;
5510 #endif
5511
5512         status = rd32(E1000_STATUS);
5513         if (status & E1000_STATUS_LU)
5514                 wufc &= ~E1000_WUFC_LNKC;
5515
5516         if (wufc) {
5517                 igb_setup_rctl(adapter);
5518                 igb_set_rx_mode(netdev);
5519
5520                 /* turn on all-multi mode if wake on multicast is enabled */
5521                 if (wufc & E1000_WUFC_MC) {
5522                         rctl = rd32(E1000_RCTL);
5523                         rctl |= E1000_RCTL_MPE;
5524                         wr32(E1000_RCTL, rctl);
5525                 }
5526
5527                 ctrl = rd32(E1000_CTRL);
5528                 /* advertise wake from D3Cold */
5529                 #define E1000_CTRL_ADVD3WUC 0x00100000
5530                 /* phy power management enable */
5531                 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5532                 ctrl |= E1000_CTRL_ADVD3WUC;
5533                 wr32(E1000_CTRL, ctrl);
5534
5535                 /* Allow time for pending master requests to run */
5536                 igb_disable_pcie_master(&adapter->hw);
5537
5538                 wr32(E1000_WUC, E1000_WUC_PME_EN);
5539                 wr32(E1000_WUFC, wufc);
5540         } else {
5541                 wr32(E1000_WUC, 0);
5542                 wr32(E1000_WUFC, 0);
5543         }
5544
5545         *enable_wake = wufc || adapter->en_mng_pt;
5546         if (!*enable_wake)
5547                 igb_shutdown_serdes_link_82575(hw);
5548
5549         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
5550          * would have already happened in close and is redundant. */
5551         igb_release_hw_control(adapter);
5552
5553         pci_disable_device(pdev);
5554
5555         return 0;
5556 }
5557
5558 #ifdef CONFIG_PM
5559 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5560 {
5561         int retval;
5562         bool wake;
5563
5564         retval = __igb_shutdown(pdev, &wake);
5565         if (retval)
5566                 return retval;
5567
5568         if (wake) {
5569                 pci_prepare_to_sleep(pdev);
5570         } else {
5571                 pci_wake_from_d3(pdev, false);
5572                 pci_set_power_state(pdev, PCI_D3hot);
5573         }
5574
5575         return 0;
5576 }
5577
5578 static int igb_resume(struct pci_dev *pdev)
5579 {
5580         struct net_device *netdev = pci_get_drvdata(pdev);
5581         struct igb_adapter *adapter = netdev_priv(netdev);
5582         struct e1000_hw *hw = &adapter->hw;
5583         u32 err;
5584
5585         pci_set_power_state(pdev, PCI_D0);
5586         pci_restore_state(pdev);
5587
5588         err = pci_enable_device_mem(pdev);
5589         if (err) {
5590                 dev_err(&pdev->dev,
5591                         "igb: Cannot enable PCI device from suspend\n");
5592                 return err;
5593         }
5594         pci_set_master(pdev);
5595
5596         pci_enable_wake(pdev, PCI_D3hot, 0);
5597         pci_enable_wake(pdev, PCI_D3cold, 0);
5598
5599         if (igb_init_interrupt_scheme(adapter)) {
5600                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5601                 return -ENOMEM;
5602         }
5603
5604         /* e1000_power_up_phy(adapter); */
5605
5606         igb_reset(adapter);
5607
5608         /* let the f/w know that the h/w is now under the control of the
5609          * driver. */
5610         igb_get_hw_control(adapter);
5611
5612         wr32(E1000_WUS, ~0);
5613
5614         if (netif_running(netdev)) {
5615                 err = igb_open(netdev);
5616                 if (err)
5617                         return err;
5618         }
5619
5620         netif_device_attach(netdev);
5621
5622         return 0;
5623 }
5624 #endif
5625
5626 static void igb_shutdown(struct pci_dev *pdev)
5627 {
5628         bool wake;
5629
5630         __igb_shutdown(pdev, &wake);
5631
5632         if (system_state == SYSTEM_POWER_OFF) {
5633                 pci_wake_from_d3(pdev, wake);
5634                 pci_set_power_state(pdev, PCI_D3hot);
5635         }
5636 }
5637
5638 #ifdef CONFIG_NET_POLL_CONTROLLER
5639 /*
5640  * Polling 'interrupt' - used by things like netconsole to send skbs
5641  * without having to re-enable interrupts. It's not called while
5642  * the interrupt routine is executing.
5643  */
5644 static void igb_netpoll(struct net_device *netdev)
5645 {
5646         struct igb_adapter *adapter = netdev_priv(netdev);
5647         struct e1000_hw *hw = &adapter->hw;
5648         int i;
5649
5650         if (!adapter->msix_entries) {
5651                 struct igb_q_vector *q_vector = adapter->q_vector[0];
5652                 igb_irq_disable(adapter);
5653                 napi_schedule(&q_vector->napi);
5654                 return;
5655         }
5656
5657         for (i = 0; i < adapter->num_q_vectors; i++) {
5658                 struct igb_q_vector *q_vector = adapter->q_vector[i];
5659                 wr32(E1000_EIMC, q_vector->eims_value);
5660                 napi_schedule(&q_vector->napi);
5661         }
5662 }
5663 #endif /* CONFIG_NET_POLL_CONTROLLER */
5664
5665 /**
5666  * igb_io_error_detected - called when PCI error is detected
5667  * @pdev: Pointer to PCI device
5668  * @state: The current pci connection state
5669  *
5670  * This function is called after a PCI bus error affecting
5671  * this device has been detected.
5672  */
5673 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5674                                               pci_channel_state_t state)
5675 {
5676         struct net_device *netdev = pci_get_drvdata(pdev);
5677         struct igb_adapter *adapter = netdev_priv(netdev);
5678
5679         netif_device_detach(netdev);
5680
5681         if (state == pci_channel_io_perm_failure)
5682                 return PCI_ERS_RESULT_DISCONNECT;
5683
5684         if (netif_running(netdev))
5685                 igb_down(adapter);
5686         pci_disable_device(pdev);
5687
5688         /* Request a slot slot reset. */
5689         return PCI_ERS_RESULT_NEED_RESET;
5690 }
5691
5692 /**
5693  * igb_io_slot_reset - called after the pci bus has been reset.
5694  * @pdev: Pointer to PCI device
5695  *
5696  * Restart the card from scratch, as if from a cold-boot. Implementation
5697  * resembles the first-half of the igb_resume routine.
5698  */
5699 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5700 {
5701         struct net_device *netdev = pci_get_drvdata(pdev);
5702         struct igb_adapter *adapter = netdev_priv(netdev);
5703         struct e1000_hw *hw = &adapter->hw;
5704         pci_ers_result_t result;
5705         int err;
5706
5707         if (pci_enable_device_mem(pdev)) {
5708                 dev_err(&pdev->dev,
5709                         "Cannot re-enable PCI device after reset.\n");
5710                 result = PCI_ERS_RESULT_DISCONNECT;
5711         } else {
5712                 pci_set_master(pdev);
5713                 pci_restore_state(pdev);
5714
5715                 pci_enable_wake(pdev, PCI_D3hot, 0);
5716                 pci_enable_wake(pdev, PCI_D3cold, 0);
5717
5718                 igb_reset(adapter);
5719                 wr32(E1000_WUS, ~0);
5720                 result = PCI_ERS_RESULT_RECOVERED;
5721         }
5722
5723         err = pci_cleanup_aer_uncorrect_error_status(pdev);
5724         if (err) {
5725                 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5726                         "failed 0x%0x\n", err);
5727                 /* non-fatal, continue */
5728         }
5729
5730         return result;
5731 }
5732
5733 /**
5734  * igb_io_resume - called when traffic can start flowing again.
5735  * @pdev: Pointer to PCI device
5736  *
5737  * This callback is called when the error recovery driver tells us that
5738  * its OK to resume normal operation. Implementation resembles the
5739  * second-half of the igb_resume routine.
5740  */
5741 static void igb_io_resume(struct pci_dev *pdev)
5742 {
5743         struct net_device *netdev = pci_get_drvdata(pdev);
5744         struct igb_adapter *adapter = netdev_priv(netdev);
5745
5746         if (netif_running(netdev)) {
5747                 if (igb_up(adapter)) {
5748                         dev_err(&pdev->dev, "igb_up failed after reset\n");
5749                         return;
5750                 }
5751         }
5752
5753         netif_device_attach(netdev);
5754
5755         /* let the f/w know that the h/w is now under the control of the
5756          * driver. */
5757         igb_get_hw_control(adapter);
5758 }
5759
5760 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5761                              u8 qsel)
5762 {
5763         u32 rar_low, rar_high;
5764         struct e1000_hw *hw = &adapter->hw;
5765
5766         /* HW expects these in little endian so we reverse the byte order
5767          * from network order (big endian) to little endian
5768          */
5769         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5770                   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5771         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5772
5773         /* Indicate to hardware the Address is Valid. */
5774         rar_high |= E1000_RAH_AV;
5775
5776         if (hw->mac.type == e1000_82575)
5777                 rar_high |= E1000_RAH_POOL_1 * qsel;
5778         else
5779                 rar_high |= E1000_RAH_POOL_1 << qsel;
5780
5781         wr32(E1000_RAL(index), rar_low);
5782         wrfl();
5783         wr32(E1000_RAH(index), rar_high);
5784         wrfl();
5785 }
5786
5787 static int igb_set_vf_mac(struct igb_adapter *adapter,
5788                           int vf, unsigned char *mac_addr)
5789 {
5790         struct e1000_hw *hw = &adapter->hw;
5791         /* VF MAC addresses start at end of receive addresses and moves
5792          * torwards the first, as a result a collision should not be possible */
5793         int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5794
5795         memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5796
5797         igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5798
5799         return 0;
5800 }
5801
5802 static void igb_vmm_control(struct igb_adapter *adapter)
5803 {
5804         struct e1000_hw *hw = &adapter->hw;
5805         u32 reg;
5806
5807         /* replication is not supported for 82575 */
5808         if (hw->mac.type == e1000_82575)
5809                 return;
5810
5811         /* enable replication vlan tag stripping */
5812         reg = rd32(E1000_RPLOLR);
5813         reg |= E1000_RPLOLR_STRVLAN;
5814         wr32(E1000_RPLOLR, reg);
5815
5816         /* notify HW that the MAC is adding vlan tags */
5817         reg = rd32(E1000_DTXCTL);
5818         reg |= E1000_DTXCTL_VLAN_ADDED;
5819         wr32(E1000_DTXCTL, reg);
5820
5821         if (adapter->vfs_allocated_count) {
5822                 igb_vmdq_set_loopback_pf(hw, true);
5823                 igb_vmdq_set_replication_pf(hw, true);
5824         } else {
5825                 igb_vmdq_set_loopback_pf(hw, false);
5826                 igb_vmdq_set_replication_pf(hw, false);
5827         }
5828 }
5829
5830 /* igb_main.c */