1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
63 static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74 /* required last entry */
78 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
80 void igb_reset(struct igb_adapter *);
81 static int igb_setup_all_tx_resources(struct igb_adapter *);
82 static int igb_setup_all_rx_resources(struct igb_adapter *);
83 static void igb_free_all_tx_resources(struct igb_adapter *);
84 static void igb_free_all_rx_resources(struct igb_adapter *);
85 void igb_update_stats(struct igb_adapter *);
86 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
87 static void __devexit igb_remove(struct pci_dev *pdev);
88 static int igb_sw_init(struct igb_adapter *);
89 static int igb_open(struct net_device *);
90 static int igb_close(struct net_device *);
91 static void igb_configure_tx(struct igb_adapter *);
92 static void igb_configure_rx(struct igb_adapter *);
93 static void igb_setup_tctl(struct igb_adapter *);
94 static void igb_setup_rctl(struct igb_adapter *);
95 static void igb_clean_all_tx_rings(struct igb_adapter *);
96 static void igb_clean_all_rx_rings(struct igb_adapter *);
97 static void igb_clean_tx_ring(struct igb_ring *);
98 static void igb_clean_rx_ring(struct igb_ring *);
99 static void igb_set_rx_mode(struct net_device *);
100 static void igb_update_phy_info(unsigned long);
101 static void igb_watchdog(unsigned long);
102 static void igb_watchdog_task(struct work_struct *);
103 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
106 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
107 struct net_device *);
108 static struct net_device_stats *igb_get_stats(struct net_device *);
109 static int igb_change_mtu(struct net_device *, int);
110 static int igb_set_mac(struct net_device *, void *);
111 static void igb_set_uta(struct igb_adapter *adapter);
112 static irqreturn_t igb_intr(int irq, void *);
113 static irqreturn_t igb_intr_msi(int irq, void *);
114 static irqreturn_t igb_msix_other(int irq, void *);
115 static irqreturn_t igb_msix_ring(int irq, void *);
116 #ifdef CONFIG_IGB_DCA
117 static void igb_update_dca(struct igb_q_vector *);
118 static void igb_setup_dca(struct igb_adapter *);
119 #endif /* CONFIG_IGB_DCA */
120 static bool igb_clean_tx_irq(struct igb_q_vector *);
121 static int igb_poll(struct napi_struct *, int);
122 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
123 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
124 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
125 static void igb_tx_timeout(struct net_device *);
126 static void igb_reset_task(struct work_struct *);
127 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
128 static void igb_vlan_rx_add_vid(struct net_device *, u16);
129 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
130 static void igb_restore_vlan(struct igb_adapter *);
131 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
132 static void igb_ping_all_vfs(struct igb_adapter *);
133 static void igb_msg_task(struct igb_adapter *);
134 static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
135 static void igb_vmm_control(struct igb_adapter *);
136 static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
137 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
139 static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
143 reg_data = rd32(E1000_VMOLR(vfn));
144 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
151 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
154 struct e1000_hw *hw = &adapter->hw;
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
172 static int igb_suspend(struct pci_dev *, pm_message_t);
173 static int igb_resume(struct pci_dev *);
175 static void igb_shutdown(struct pci_dev *);
176 #ifdef CONFIG_IGB_DCA
177 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
178 static struct notifier_block dca_notifier = {
179 .notifier_call = igb_notify_dca,
184 #ifdef CONFIG_NET_POLL_CONTROLLER
185 /* for netdump / net console */
186 static void igb_netpoll(struct net_device *);
188 #ifdef CONFIG_PCI_IOV
189 static unsigned int max_vfs = 0;
190 module_param(max_vfs, uint, 0);
191 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
192 "per physical function");
193 #endif /* CONFIG_PCI_IOV */
195 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
196 pci_channel_state_t);
197 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
198 static void igb_io_resume(struct pci_dev *);
200 static struct pci_error_handlers igb_err_handler = {
201 .error_detected = igb_io_error_detected,
202 .slot_reset = igb_io_slot_reset,
203 .resume = igb_io_resume,
207 static struct pci_driver igb_driver = {
208 .name = igb_driver_name,
209 .id_table = igb_pci_tbl,
211 .remove = __devexit_p(igb_remove),
213 /* Power Managment Hooks */
214 .suspend = igb_suspend,
215 .resume = igb_resume,
217 .shutdown = igb_shutdown,
218 .err_handler = &igb_err_handler
221 static int global_quad_port_a; /* global quad port a indication */
223 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
224 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
225 MODULE_LICENSE("GPL");
226 MODULE_VERSION(DRV_VERSION);
229 * Scale the NIC clock cycle by a large factor so that
230 * relatively small clock corrections can be added or
231 * substracted at each clock tick. The drawbacks of a
232 * large factor are a) that the clock register overflows
233 * more quickly (not such a big deal) and b) that the
234 * increment per tick has to fit into 24 bits.
237 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
239 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
241 * The base scale factor is intentionally a power of two
242 * so that the division in %struct timecounter can be done with
245 #define IGB_TSYNC_SHIFT (19)
246 #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
249 * The duration of one clock cycle of the NIC.
251 * @todo This hard-coded value is part of the specification and might change
252 * in future hardware revisions. Add revision check.
254 #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
256 #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
257 # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
261 * igb_read_clock - read raw cycle counter (to be used by time counter)
263 static cycle_t igb_read_clock(const struct cyclecounter *tc)
265 struct igb_adapter *adapter =
266 container_of(tc, struct igb_adapter, cycles);
267 struct e1000_hw *hw = &adapter->hw;
270 stamp = rd32(E1000_SYSTIML);
271 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
278 * igb_get_hw_dev_name - return device name string
279 * used by hardware layer to print debugging information
281 char *igb_get_hw_dev_name(struct e1000_hw *hw)
283 struct igb_adapter *adapter = hw->back;
284 return adapter->netdev->name;
288 * igb_get_time_str - format current NIC and system time as string
290 static char *igb_get_time_str(struct igb_adapter *adapter,
293 cycle_t hw = adapter->cycles.read(&adapter->cycles);
294 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
296 struct timespec delta;
297 getnstimeofday(&sys);
299 delta = timespec_sub(nic, sys);
302 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
304 (long)nic.tv_sec, nic.tv_nsec,
305 (long)sys.tv_sec, sys.tv_nsec,
306 (long)delta.tv_sec, delta.tv_nsec);
313 * igb_desc_unused - calculate if we have unused descriptors
315 static int igb_desc_unused(struct igb_ring *ring)
317 if (ring->next_to_clean > ring->next_to_use)
318 return ring->next_to_clean - ring->next_to_use - 1;
320 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
324 * igb_init_module - Driver Registration Routine
326 * igb_init_module is the first routine called when the driver is
327 * loaded. All it does is register with the PCI subsystem.
329 static int __init igb_init_module(void)
332 printk(KERN_INFO "%s - version %s\n",
333 igb_driver_string, igb_driver_version);
335 printk(KERN_INFO "%s\n", igb_copyright);
337 global_quad_port_a = 0;
339 #ifdef CONFIG_IGB_DCA
340 dca_register_notify(&dca_notifier);
343 ret = pci_register_driver(&igb_driver);
347 module_init(igb_init_module);
350 * igb_exit_module - Driver Exit Cleanup Routine
352 * igb_exit_module is called just before the driver is removed
355 static void __exit igb_exit_module(void)
357 #ifdef CONFIG_IGB_DCA
358 dca_unregister_notify(&dca_notifier);
360 pci_unregister_driver(&igb_driver);
363 module_exit(igb_exit_module);
365 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
367 * igb_cache_ring_register - Descriptor ring to register mapping
368 * @adapter: board private structure to initialize
370 * Once we know the feature-set enabled for the device, we'll cache
371 * the register offset the descriptor ring is assigned to.
373 static void igb_cache_ring_register(struct igb_adapter *adapter)
376 u32 rbase_offset = adapter->vfs_allocated_count;
378 switch (adapter->hw.mac.type) {
380 /* The queues are allocated for virtualization such that VF 0
381 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
382 * In order to avoid collision we start at the first free queue
383 * and continue consuming queues in the same sequence
385 for (i = 0; i < adapter->num_rx_queues; i++)
386 adapter->rx_ring[i].reg_idx = rbase_offset +
388 for (i = 0; i < adapter->num_tx_queues; i++)
389 adapter->tx_ring[i].reg_idx = rbase_offset +
394 for (i = 0; i < adapter->num_rx_queues; i++)
395 adapter->rx_ring[i].reg_idx = i;
396 for (i = 0; i < adapter->num_tx_queues; i++)
397 adapter->tx_ring[i].reg_idx = i;
402 static void igb_free_queues(struct igb_adapter *adapter)
404 kfree(adapter->tx_ring);
405 kfree(adapter->rx_ring);
407 adapter->tx_ring = NULL;
408 adapter->rx_ring = NULL;
410 adapter->num_rx_queues = 0;
411 adapter->num_tx_queues = 0;
415 * igb_alloc_queues - Allocate memory for all rings
416 * @adapter: board private structure to initialize
418 * We allocate one ring per queue at run-time since we don't know the
419 * number of queues at compile-time.
421 static int igb_alloc_queues(struct igb_adapter *adapter)
425 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
426 sizeof(struct igb_ring), GFP_KERNEL);
427 if (!adapter->tx_ring)
430 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
431 sizeof(struct igb_ring), GFP_KERNEL);
432 if (!adapter->rx_ring)
435 for (i = 0; i < adapter->num_tx_queues; i++) {
436 struct igb_ring *ring = &(adapter->tx_ring[i]);
437 ring->count = adapter->tx_ring_count;
438 ring->queue_index = i;
440 for (i = 0; i < adapter->num_rx_queues; i++) {
441 struct igb_ring *ring = &(adapter->rx_ring[i]);
442 ring->count = adapter->rx_ring_count;
443 ring->queue_index = i;
446 igb_cache_ring_register(adapter);
451 igb_free_queues(adapter);
456 #define IGB_N0_QUEUE -1
457 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
460 struct igb_adapter *adapter = q_vector->adapter;
461 struct e1000_hw *hw = &adapter->hw;
463 int rx_queue = IGB_N0_QUEUE;
464 int tx_queue = IGB_N0_QUEUE;
466 if (q_vector->rx_ring)
467 rx_queue = q_vector->rx_ring->reg_idx;
468 if (q_vector->tx_ring)
469 tx_queue = q_vector->tx_ring->reg_idx;
471 switch (hw->mac.type) {
473 /* The 82575 assigns vectors using a bitmask, which matches the
474 bitmask for the EICR/EIMS/EIMC registers. To assign one
475 or more queues to a vector, we write the appropriate bits
476 into the MSIXBM register for that vector. */
477 if (rx_queue > IGB_N0_QUEUE)
478 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
479 if (tx_queue > IGB_N0_QUEUE)
480 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
481 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
482 q_vector->eims_value = msixbm;
485 /* 82576 uses a table-based method for assigning vectors.
486 Each queue has a single entry in the table to which we write
487 a vector number along with a "valid" bit. Sadly, the layout
488 of the table is somewhat counterintuitive. */
489 if (rx_queue > IGB_N0_QUEUE) {
490 index = (rx_queue & 0x7);
491 ivar = array_rd32(E1000_IVAR0, index);
493 /* vector goes into low byte of register */
494 ivar = ivar & 0xFFFFFF00;
495 ivar |= msix_vector | E1000_IVAR_VALID;
497 /* vector goes into third byte of register */
498 ivar = ivar & 0xFF00FFFF;
499 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
501 array_wr32(E1000_IVAR0, index, ivar);
503 if (tx_queue > IGB_N0_QUEUE) {
504 index = (tx_queue & 0x7);
505 ivar = array_rd32(E1000_IVAR0, index);
507 /* vector goes into second byte of register */
508 ivar = ivar & 0xFFFF00FF;
509 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
511 /* vector goes into high byte of register */
512 ivar = ivar & 0x00FFFFFF;
513 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
515 array_wr32(E1000_IVAR0, index, ivar);
517 q_vector->eims_value = 1 << msix_vector;
526 * igb_configure_msix - Configure MSI-X hardware
528 * igb_configure_msix sets up the hardware to properly
529 * generate MSI-X interrupts.
531 static void igb_configure_msix(struct igb_adapter *adapter)
535 struct e1000_hw *hw = &adapter->hw;
537 adapter->eims_enable_mask = 0;
539 /* set vector for other causes, i.e. link changes */
540 switch (hw->mac.type) {
542 tmp = rd32(E1000_CTRL_EXT);
543 /* enable MSI-X PBA support*/
544 tmp |= E1000_CTRL_EXT_PBA_CLR;
546 /* Auto-Mask interrupts upon ICR read. */
547 tmp |= E1000_CTRL_EXT_EIAME;
548 tmp |= E1000_CTRL_EXT_IRCA;
550 wr32(E1000_CTRL_EXT, tmp);
552 /* enable msix_other interrupt */
553 array_wr32(E1000_MSIXBM(0), vector++,
555 adapter->eims_other = E1000_EIMS_OTHER;
560 /* Turn on MSI-X capability first, or our settings
561 * won't stick. And it will take days to debug. */
562 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
563 E1000_GPIE_PBA | E1000_GPIE_EIAME |
566 /* enable msix_other interrupt */
567 adapter->eims_other = 1 << vector;
568 tmp = (vector++ | E1000_IVAR_VALID) << 8;
570 wr32(E1000_IVAR_MISC, tmp);
573 /* do nothing, since nothing else supports MSI-X */
575 } /* switch (hw->mac.type) */
577 adapter->eims_enable_mask |= adapter->eims_other;
579 for (i = 0; i < adapter->num_q_vectors; i++) {
580 struct igb_q_vector *q_vector = adapter->q_vector[i];
581 igb_assign_vector(q_vector, vector++);
582 adapter->eims_enable_mask |= q_vector->eims_value;
589 * igb_request_msix - Initialize MSI-X interrupts
591 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
594 static int igb_request_msix(struct igb_adapter *adapter)
596 struct net_device *netdev = adapter->netdev;
597 struct e1000_hw *hw = &adapter->hw;
598 int i, err = 0, vector = 0;
600 err = request_irq(adapter->msix_entries[vector].vector,
601 &igb_msix_other, 0, netdev->name, adapter);
606 for (i = 0; i < adapter->num_q_vectors; i++) {
607 struct igb_q_vector *q_vector = adapter->q_vector[i];
609 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
611 if (q_vector->rx_ring && q_vector->tx_ring)
612 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
613 q_vector->rx_ring->queue_index);
614 else if (q_vector->tx_ring)
615 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
616 q_vector->tx_ring->queue_index);
617 else if (q_vector->rx_ring)
618 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
619 q_vector->rx_ring->queue_index);
621 sprintf(q_vector->name, "%s-unused", netdev->name);
623 err = request_irq(adapter->msix_entries[vector].vector,
624 &igb_msix_ring, 0, q_vector->name,
631 igb_configure_msix(adapter);
637 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
639 if (adapter->msix_entries) {
640 pci_disable_msix(adapter->pdev);
641 kfree(adapter->msix_entries);
642 adapter->msix_entries = NULL;
643 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
644 pci_disable_msi(adapter->pdev);
649 * igb_free_q_vectors - Free memory allocated for interrupt vectors
650 * @adapter: board private structure to initialize
652 * This function frees the memory allocated to the q_vectors. In addition if
653 * NAPI is enabled it will delete any references to the NAPI struct prior
654 * to freeing the q_vector.
656 static void igb_free_q_vectors(struct igb_adapter *adapter)
660 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
661 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
662 adapter->q_vector[v_idx] = NULL;
663 netif_napi_del(&q_vector->napi);
666 adapter->num_q_vectors = 0;
670 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
672 * This function resets the device so that it has 0 rx queues, tx queues, and
673 * MSI-X interrupts allocated.
675 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
677 igb_free_queues(adapter);
678 igb_free_q_vectors(adapter);
679 igb_reset_interrupt_capability(adapter);
683 * igb_set_interrupt_capability - set MSI or MSI-X if supported
685 * Attempt to configure interrupts using the best available
686 * capabilities of the hardware and kernel.
688 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
693 /* Number of supported queues. */
694 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
695 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
697 /* start with one vector for every rx queue */
698 numvecs = adapter->num_rx_queues;
700 /* if tx handler is seperate add 1 for every tx queue */
701 numvecs += adapter->num_tx_queues;
703 /* store the number of vectors reserved for queues */
704 adapter->num_q_vectors = numvecs;
706 /* add 1 vector for link status interrupts */
708 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
710 if (!adapter->msix_entries)
713 for (i = 0; i < numvecs; i++)
714 adapter->msix_entries[i].entry = i;
716 err = pci_enable_msix(adapter->pdev,
717 adapter->msix_entries,
722 igb_reset_interrupt_capability(adapter);
724 /* If we can't do MSI-X, try MSI */
726 #ifdef CONFIG_PCI_IOV
727 /* disable SR-IOV for non MSI-X configurations */
728 if (adapter->vf_data) {
729 struct e1000_hw *hw = &adapter->hw;
730 /* disable iov and allow time for transactions to clear */
731 pci_disable_sriov(adapter->pdev);
734 kfree(adapter->vf_data);
735 adapter->vf_data = NULL;
736 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
738 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
741 adapter->num_rx_queues = 1;
742 adapter->num_tx_queues = 1;
743 adapter->num_q_vectors = 1;
744 if (!pci_enable_msi(adapter->pdev))
745 adapter->flags |= IGB_FLAG_HAS_MSI;
747 /* Notify the stack of the (possibly) reduced Tx Queue count. */
748 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
753 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
754 * @adapter: board private structure to initialize
756 * We allocate one q_vector per queue interrupt. If allocation fails we
759 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
761 struct igb_q_vector *q_vector;
762 struct e1000_hw *hw = &adapter->hw;
765 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
766 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
769 q_vector->adapter = adapter;
770 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
771 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
772 q_vector->itr_val = IGB_START_ITR;
773 q_vector->set_itr = 1;
774 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
775 adapter->q_vector[v_idx] = q_vector;
782 q_vector = adapter->q_vector[v_idx];
783 netif_napi_del(&q_vector->napi);
785 adapter->q_vector[v_idx] = NULL;
790 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
791 int ring_idx, int v_idx)
793 struct igb_q_vector *q_vector;
795 q_vector = adapter->q_vector[v_idx];
796 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
797 q_vector->rx_ring->q_vector = q_vector;
798 q_vector->itr_val = adapter->itr;
801 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
802 int ring_idx, int v_idx)
804 struct igb_q_vector *q_vector;
806 q_vector = adapter->q_vector[v_idx];
807 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
808 q_vector->tx_ring->q_vector = q_vector;
809 q_vector->itr_val = adapter->itr;
813 * igb_map_ring_to_vector - maps allocated queues to vectors
815 * This function maps the recently allocated queues to vectors.
817 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
822 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
823 (adapter->num_q_vectors < adapter->num_tx_queues))
826 if (adapter->num_q_vectors >=
827 (adapter->num_rx_queues + adapter->num_tx_queues)) {
828 for (i = 0; i < adapter->num_rx_queues; i++)
829 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
830 for (i = 0; i < adapter->num_tx_queues; i++)
831 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
833 for (i = 0; i < adapter->num_rx_queues; i++) {
834 if (i < adapter->num_tx_queues)
835 igb_map_tx_ring_to_vector(adapter, i, v_idx);
836 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
838 for (; i < adapter->num_tx_queues; i++)
839 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
845 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
847 * This function initializes the interrupts and allocates all of the queues.
849 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
851 struct pci_dev *pdev = adapter->pdev;
854 igb_set_interrupt_capability(adapter);
856 err = igb_alloc_q_vectors(adapter);
858 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
859 goto err_alloc_q_vectors;
862 err = igb_alloc_queues(adapter);
864 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
865 goto err_alloc_queues;
868 err = igb_map_ring_to_vector(adapter);
870 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
877 igb_free_queues(adapter);
879 igb_free_q_vectors(adapter);
881 igb_reset_interrupt_capability(adapter);
886 * igb_request_irq - initialize interrupts
888 * Attempts to configure interrupts using the best available
889 * capabilities of the hardware and kernel.
891 static int igb_request_irq(struct igb_adapter *adapter)
893 struct net_device *netdev = adapter->netdev;
894 struct pci_dev *pdev = adapter->pdev;
895 struct e1000_hw *hw = &adapter->hw;
898 if (adapter->msix_entries) {
899 err = igb_request_msix(adapter);
902 /* fall back to MSI */
903 igb_clear_interrupt_scheme(adapter);
904 if (!pci_enable_msi(adapter->pdev))
905 adapter->flags |= IGB_FLAG_HAS_MSI;
906 igb_free_all_tx_resources(adapter);
907 igb_free_all_rx_resources(adapter);
908 adapter->num_tx_queues = 1;
909 adapter->num_rx_queues = 1;
910 adapter->num_q_vectors = 1;
911 err = igb_alloc_q_vectors(adapter);
914 "Unable to allocate memory for vectors\n");
917 err = igb_alloc_queues(adapter);
920 "Unable to allocate memory for queues\n");
921 igb_free_q_vectors(adapter);
924 igb_setup_all_tx_resources(adapter);
925 igb_setup_all_rx_resources(adapter);
927 switch (hw->mac.type) {
929 wr32(E1000_MSIXBM(0),
930 (E1000_EICR_RX_QUEUE0 |
931 E1000_EICR_TX_QUEUE0 |
935 wr32(E1000_IVAR0, E1000_IVAR_VALID);
942 if (adapter->flags & IGB_FLAG_HAS_MSI) {
943 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
944 netdev->name, adapter);
948 /* fall back to legacy interrupts */
949 igb_reset_interrupt_capability(adapter);
950 adapter->flags &= ~IGB_FLAG_HAS_MSI;
953 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
954 netdev->name, adapter);
957 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
964 static void igb_free_irq(struct igb_adapter *adapter)
966 if (adapter->msix_entries) {
969 free_irq(adapter->msix_entries[vector++].vector, adapter);
971 for (i = 0; i < adapter->num_q_vectors; i++) {
972 struct igb_q_vector *q_vector = adapter->q_vector[i];
973 free_irq(adapter->msix_entries[vector++].vector,
977 free_irq(adapter->pdev->irq, adapter);
982 * igb_irq_disable - Mask off interrupt generation on the NIC
983 * @adapter: board private structure
985 static void igb_irq_disable(struct igb_adapter *adapter)
987 struct e1000_hw *hw = &adapter->hw;
989 if (adapter->msix_entries) {
990 u32 regval = rd32(E1000_EIAM);
991 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
992 wr32(E1000_EIMC, adapter->eims_enable_mask);
993 regval = rd32(E1000_EIAC);
994 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1000 synchronize_irq(adapter->pdev->irq);
1004 * igb_irq_enable - Enable default interrupt generation settings
1005 * @adapter: board private structure
1007 static void igb_irq_enable(struct igb_adapter *adapter)
1009 struct e1000_hw *hw = &adapter->hw;
1011 if (adapter->msix_entries) {
1012 u32 regval = rd32(E1000_EIAC);
1013 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1014 regval = rd32(E1000_EIAM);
1015 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1016 wr32(E1000_EIMS, adapter->eims_enable_mask);
1017 if (adapter->vfs_allocated_count)
1018 wr32(E1000_MBVFIMR, 0xFF);
1019 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
1020 E1000_IMS_DOUTSYNC));
1022 wr32(E1000_IMS, IMS_ENABLE_MASK);
1023 wr32(E1000_IAM, IMS_ENABLE_MASK);
1027 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1029 struct net_device *netdev = adapter->netdev;
1030 u16 vid = adapter->hw.mng_cookie.vlan_id;
1031 u16 old_vid = adapter->mng_vlan_id;
1032 if (adapter->vlgrp) {
1033 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1034 if (adapter->hw.mng_cookie.status &
1035 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1036 igb_vlan_rx_add_vid(netdev, vid);
1037 adapter->mng_vlan_id = vid;
1039 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1041 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1043 !vlan_group_get_device(adapter->vlgrp, old_vid))
1044 igb_vlan_rx_kill_vid(netdev, old_vid);
1046 adapter->mng_vlan_id = vid;
1051 * igb_release_hw_control - release control of the h/w to f/w
1052 * @adapter: address of board private structure
1054 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1055 * For ASF and Pass Through versions of f/w this means that the
1056 * driver is no longer loaded.
1059 static void igb_release_hw_control(struct igb_adapter *adapter)
1061 struct e1000_hw *hw = &adapter->hw;
1064 /* Let firmware take over control of h/w */
1065 ctrl_ext = rd32(E1000_CTRL_EXT);
1066 wr32(E1000_CTRL_EXT,
1067 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1072 * igb_get_hw_control - get control of the h/w from f/w
1073 * @adapter: address of board private structure
1075 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1076 * For ASF and Pass Through versions of f/w this means that
1077 * the driver is loaded.
1080 static void igb_get_hw_control(struct igb_adapter *adapter)
1082 struct e1000_hw *hw = &adapter->hw;
1085 /* Let firmware know the driver has taken over */
1086 ctrl_ext = rd32(E1000_CTRL_EXT);
1087 wr32(E1000_CTRL_EXT,
1088 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1092 * igb_configure - configure the hardware for RX and TX
1093 * @adapter: private board structure
1095 static void igb_configure(struct igb_adapter *adapter)
1097 struct net_device *netdev = adapter->netdev;
1100 igb_get_hw_control(adapter);
1101 igb_set_rx_mode(netdev);
1103 igb_restore_vlan(adapter);
1105 igb_setup_tctl(adapter);
1106 igb_setup_rctl(adapter);
1108 igb_configure_tx(adapter);
1109 igb_configure_rx(adapter);
1111 igb_rx_fifo_flush_82575(&adapter->hw);
1113 /* call igb_desc_unused which always leaves
1114 * at least 1 descriptor unused to make sure
1115 * next_to_use != next_to_clean */
1116 for (i = 0; i < adapter->num_rx_queues; i++) {
1117 struct igb_ring *ring = &adapter->rx_ring[i];
1118 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1122 adapter->tx_queue_len = netdev->tx_queue_len;
1127 * igb_up - Open the interface and prepare it to handle traffic
1128 * @adapter: board private structure
1131 int igb_up(struct igb_adapter *adapter)
1133 struct e1000_hw *hw = &adapter->hw;
1136 /* hardware has been reset, we need to reload some things */
1137 igb_configure(adapter);
1139 clear_bit(__IGB_DOWN, &adapter->state);
1141 for (i = 0; i < adapter->num_q_vectors; i++) {
1142 struct igb_q_vector *q_vector = adapter->q_vector[i];
1143 napi_enable(&q_vector->napi);
1145 if (adapter->msix_entries)
1146 igb_configure_msix(adapter);
1148 igb_vmm_control(adapter);
1149 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1151 /* Clear any pending interrupts. */
1153 igb_irq_enable(adapter);
1155 netif_tx_start_all_queues(adapter->netdev);
1157 /* Fire a link change interrupt to start the watchdog. */
1158 wr32(E1000_ICS, E1000_ICS_LSC);
1162 void igb_down(struct igb_adapter *adapter)
1164 struct e1000_hw *hw = &adapter->hw;
1165 struct net_device *netdev = adapter->netdev;
1169 /* signal that we're down so the interrupt handler does not
1170 * reschedule our watchdog timer */
1171 set_bit(__IGB_DOWN, &adapter->state);
1173 /* disable receives in the hardware */
1174 rctl = rd32(E1000_RCTL);
1175 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1176 /* flush and sleep below */
1178 netif_tx_stop_all_queues(netdev);
1180 /* disable transmits in the hardware */
1181 tctl = rd32(E1000_TCTL);
1182 tctl &= ~E1000_TCTL_EN;
1183 wr32(E1000_TCTL, tctl);
1184 /* flush both disables and wait for them to finish */
1188 for (i = 0; i < adapter->num_q_vectors; i++) {
1189 struct igb_q_vector *q_vector = adapter->q_vector[i];
1190 napi_disable(&q_vector->napi);
1193 igb_irq_disable(adapter);
1195 del_timer_sync(&adapter->watchdog_timer);
1196 del_timer_sync(&adapter->phy_info_timer);
1198 netdev->tx_queue_len = adapter->tx_queue_len;
1199 netif_carrier_off(netdev);
1201 /* record the stats before reset*/
1202 igb_update_stats(adapter);
1204 adapter->link_speed = 0;
1205 adapter->link_duplex = 0;
1207 if (!pci_channel_offline(adapter->pdev))
1209 igb_clean_all_tx_rings(adapter);
1210 igb_clean_all_rx_rings(adapter);
1211 #ifdef CONFIG_IGB_DCA
1213 /* since we reset the hardware DCA settings were cleared */
1214 igb_setup_dca(adapter);
1218 void igb_reinit_locked(struct igb_adapter *adapter)
1220 WARN_ON(in_interrupt());
1221 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1225 clear_bit(__IGB_RESETTING, &adapter->state);
1228 void igb_reset(struct igb_adapter *adapter)
1230 struct e1000_hw *hw = &adapter->hw;
1231 struct e1000_mac_info *mac = &hw->mac;
1232 struct e1000_fc_info *fc = &hw->fc;
1233 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1236 /* Repartition Pba for greater than 9k mtu
1237 * To take effect CTRL.RST is required.
1239 switch (mac->type) {
1241 pba = E1000_PBA_64K;
1245 pba = E1000_PBA_34K;
1249 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1250 (mac->type < e1000_82576)) {
1251 /* adjust PBA for jumbo frames */
1252 wr32(E1000_PBA, pba);
1254 /* To maintain wire speed transmits, the Tx FIFO should be
1255 * large enough to accommodate two full transmit packets,
1256 * rounded up to the next 1KB and expressed in KB. Likewise,
1257 * the Rx FIFO should be large enough to accommodate at least
1258 * one full receive packet and is similarly rounded up and
1259 * expressed in KB. */
1260 pba = rd32(E1000_PBA);
1261 /* upper 16 bits has Tx packet buffer allocation size in KB */
1262 tx_space = pba >> 16;
1263 /* lower 16 bits has Rx packet buffer allocation size in KB */
1265 /* the tx fifo also stores 16 bytes of information about the tx
1266 * but don't include ethernet FCS because hardware appends it */
1267 min_tx_space = (adapter->max_frame_size +
1268 sizeof(union e1000_adv_tx_desc) -
1270 min_tx_space = ALIGN(min_tx_space, 1024);
1271 min_tx_space >>= 10;
1272 /* software strips receive CRC, so leave room for it */
1273 min_rx_space = adapter->max_frame_size;
1274 min_rx_space = ALIGN(min_rx_space, 1024);
1275 min_rx_space >>= 10;
1277 /* If current Tx allocation is less than the min Tx FIFO size,
1278 * and the min Tx FIFO size is less than the current Rx FIFO
1279 * allocation, take space away from current Rx allocation */
1280 if (tx_space < min_tx_space &&
1281 ((min_tx_space - tx_space) < pba)) {
1282 pba = pba - (min_tx_space - tx_space);
1284 /* if short on rx space, rx wins and must trump tx
1286 if (pba < min_rx_space)
1289 wr32(E1000_PBA, pba);
1292 /* flow control settings */
1293 /* The high water mark must be low enough to fit one full frame
1294 * (or the size used for early receive) above it in the Rx FIFO.
1295 * Set it to the lower of:
1296 * - 90% of the Rx FIFO size, or
1297 * - the full Rx FIFO size minus one full frame */
1298 hwm = min(((pba << 10) * 9 / 10),
1299 ((pba << 10) - 2 * adapter->max_frame_size));
1301 if (mac->type < e1000_82576) {
1302 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1303 fc->low_water = fc->high_water - 8;
1305 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1306 fc->low_water = fc->high_water - 16;
1308 fc->pause_time = 0xFFFF;
1310 fc->current_mode = fc->requested_mode;
1312 /* disable receive for all VFs and wait one second */
1313 if (adapter->vfs_allocated_count) {
1315 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1316 adapter->vf_data[i].clear_to_send = false;
1318 /* ping all the active vfs to let them know we are going down */
1319 igb_ping_all_vfs(adapter);
1321 /* disable transmits and receives */
1322 wr32(E1000_VFRE, 0);
1323 wr32(E1000_VFTE, 0);
1326 /* Allow time for pending master requests to run */
1327 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1330 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1331 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1333 igb_update_mng_vlan(adapter);
1335 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1336 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1338 igb_reset_adaptive(&adapter->hw);
1339 igb_get_phy_info(&adapter->hw);
1342 static const struct net_device_ops igb_netdev_ops = {
1343 .ndo_open = igb_open,
1344 .ndo_stop = igb_close,
1345 .ndo_start_xmit = igb_xmit_frame_adv,
1346 .ndo_get_stats = igb_get_stats,
1347 .ndo_set_rx_mode = igb_set_rx_mode,
1348 .ndo_set_multicast_list = igb_set_rx_mode,
1349 .ndo_set_mac_address = igb_set_mac,
1350 .ndo_change_mtu = igb_change_mtu,
1351 .ndo_do_ioctl = igb_ioctl,
1352 .ndo_tx_timeout = igb_tx_timeout,
1353 .ndo_validate_addr = eth_validate_addr,
1354 .ndo_vlan_rx_register = igb_vlan_rx_register,
1355 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1356 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1357 #ifdef CONFIG_NET_POLL_CONTROLLER
1358 .ndo_poll_controller = igb_netpoll,
1363 * igb_probe - Device Initialization Routine
1364 * @pdev: PCI device information struct
1365 * @ent: entry in igb_pci_tbl
1367 * Returns 0 on success, negative on failure
1369 * igb_probe initializes an adapter identified by a pci_dev structure.
1370 * The OS initialization, configuring of the adapter private structure,
1371 * and a hardware reset occur.
1373 static int __devinit igb_probe(struct pci_dev *pdev,
1374 const struct pci_device_id *ent)
1376 struct net_device *netdev;
1377 struct igb_adapter *adapter;
1378 struct e1000_hw *hw;
1379 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1380 unsigned long mmio_start, mmio_len;
1381 int err, pci_using_dac;
1382 u16 eeprom_data = 0;
1383 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1386 err = pci_enable_device_mem(pdev);
1391 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1393 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1397 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1399 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1401 dev_err(&pdev->dev, "No usable DMA "
1402 "configuration, aborting\n");
1408 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1414 pci_enable_pcie_error_reporting(pdev);
1416 pci_set_master(pdev);
1417 pci_save_state(pdev);
1420 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1421 IGB_ABS_MAX_TX_QUEUES);
1423 goto err_alloc_etherdev;
1425 SET_NETDEV_DEV(netdev, &pdev->dev);
1427 pci_set_drvdata(pdev, netdev);
1428 adapter = netdev_priv(netdev);
1429 adapter->netdev = netdev;
1430 adapter->pdev = pdev;
1433 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1435 mmio_start = pci_resource_start(pdev, 0);
1436 mmio_len = pci_resource_len(pdev, 0);
1439 hw->hw_addr = ioremap(mmio_start, mmio_len);
1443 netdev->netdev_ops = &igb_netdev_ops;
1444 igb_set_ethtool_ops(netdev);
1445 netdev->watchdog_timeo = 5 * HZ;
1447 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1449 netdev->mem_start = mmio_start;
1450 netdev->mem_end = mmio_start + mmio_len;
1452 /* PCI config space info */
1453 hw->vendor_id = pdev->vendor;
1454 hw->device_id = pdev->device;
1455 hw->revision_id = pdev->revision;
1456 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1457 hw->subsystem_device_id = pdev->subsystem_device;
1459 /* setup the private structure */
1461 /* Copy the default MAC, PHY and NVM function pointers */
1462 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1463 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1464 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1465 /* Initialize skew-specific constants */
1466 err = ei->get_invariants(hw);
1470 #ifdef CONFIG_PCI_IOV
1471 /* since iov functionality isn't critical to base device function we
1472 * can accept failure. If it fails we don't allow iov to be enabled */
1473 if (hw->mac.type == e1000_82576) {
1474 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1475 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1477 unsigned char mac_addr[ETH_ALEN];
1480 adapter->vf_data = kcalloc(num_vfs,
1481 sizeof(struct vf_data_storage),
1483 if (!adapter->vf_data) {
1485 "Could not allocate VF private data - "
1486 "IOV enable failed\n");
1488 err = pci_enable_sriov(pdev, num_vfs);
1490 adapter->vfs_allocated_count = num_vfs;
1491 dev_info(&pdev->dev,
1492 "%d vfs allocated\n",
1495 i < adapter->vfs_allocated_count;
1497 random_ether_addr(mac_addr);
1498 igb_set_vf_mac(adapter, i,
1502 kfree(adapter->vf_data);
1503 adapter->vf_data = NULL;
1510 /* setup the private structure */
1511 err = igb_sw_init(adapter);
1515 igb_get_bus_info_pcie(hw);
1518 switch (hw->mac.type) {
1520 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1527 hw->phy.autoneg_wait_to_complete = false;
1528 hw->mac.adaptive_ifs = true;
1530 /* Copper options */
1531 if (hw->phy.media_type == e1000_media_type_copper) {
1532 hw->phy.mdix = AUTO_ALL_MODES;
1533 hw->phy.disable_polarity_correction = false;
1534 hw->phy.ms_type = e1000_ms_hw_default;
1537 if (igb_check_reset_block(hw))
1538 dev_info(&pdev->dev,
1539 "PHY reset is blocked due to SOL/IDER session.\n");
1541 netdev->features = NETIF_F_SG |
1543 NETIF_F_HW_VLAN_TX |
1544 NETIF_F_HW_VLAN_RX |
1545 NETIF_F_HW_VLAN_FILTER;
1547 netdev->features |= NETIF_F_IPV6_CSUM;
1548 netdev->features |= NETIF_F_TSO;
1549 netdev->features |= NETIF_F_TSO6;
1551 netdev->features |= NETIF_F_GRO;
1553 netdev->vlan_features |= NETIF_F_TSO;
1554 netdev->vlan_features |= NETIF_F_TSO6;
1555 netdev->vlan_features |= NETIF_F_IP_CSUM;
1556 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1557 netdev->vlan_features |= NETIF_F_SG;
1560 netdev->features |= NETIF_F_HIGHDMA;
1562 if (adapter->hw.mac.type == e1000_82576)
1563 netdev->features |= NETIF_F_SCTP_CSUM;
1565 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1567 /* before reading the NVM, reset the controller to put the device in a
1568 * known good starting state */
1569 hw->mac.ops.reset_hw(hw);
1571 /* make sure the NVM is good */
1572 if (igb_validate_nvm_checksum(hw) < 0) {
1573 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1578 /* copy the MAC address out of the NVM */
1579 if (hw->mac.ops.read_mac_addr(hw))
1580 dev_err(&pdev->dev, "NVM Read Error\n");
1582 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1583 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1585 if (!is_valid_ether_addr(netdev->perm_addr)) {
1586 dev_err(&pdev->dev, "Invalid MAC Address\n");
1591 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1592 (unsigned long) adapter);
1593 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1594 (unsigned long) adapter);
1596 INIT_WORK(&adapter->reset_task, igb_reset_task);
1597 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1599 /* Initialize link properties that are user-changeable */
1600 adapter->fc_autoneg = true;
1601 hw->mac.autoneg = true;
1602 hw->phy.autoneg_advertised = 0x2f;
1604 hw->fc.requested_mode = e1000_fc_default;
1605 hw->fc.current_mode = e1000_fc_default;
1607 adapter->itr_setting = IGB_DEFAULT_ITR;
1608 adapter->itr = IGB_START_ITR;
1610 igb_validate_mdi_setting(hw);
1612 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1613 * enable the ACPI Magic Packet filter
1616 if (hw->bus.func == 0)
1617 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1618 else if (hw->bus.func == 1)
1619 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1621 if (eeprom_data & eeprom_apme_mask)
1622 adapter->eeprom_wol |= E1000_WUFC_MAG;
1624 /* now that we have the eeprom settings, apply the special cases where
1625 * the eeprom may be wrong or the board simply won't support wake on
1626 * lan on a particular port */
1627 switch (pdev->device) {
1628 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1629 adapter->eeprom_wol = 0;
1631 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1632 case E1000_DEV_ID_82576_FIBER:
1633 case E1000_DEV_ID_82576_SERDES:
1634 /* Wake events only supported on port A for dual fiber
1635 * regardless of eeprom setting */
1636 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1637 adapter->eeprom_wol = 0;
1639 case E1000_DEV_ID_82576_QUAD_COPPER:
1640 /* if quad port adapter, disable WoL on all but port A */
1641 if (global_quad_port_a != 0)
1642 adapter->eeprom_wol = 0;
1644 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1645 /* Reset for multiple quad port adapters */
1646 if (++global_quad_port_a == 4)
1647 global_quad_port_a = 0;
1651 /* initialize the wol settings based on the eeprom settings */
1652 adapter->wol = adapter->eeprom_wol;
1653 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1655 /* reset the hardware with the new settings */
1658 /* let the f/w know that the h/w is now under the control of the
1660 igb_get_hw_control(adapter);
1662 strcpy(netdev->name, "eth%d");
1663 err = register_netdev(netdev);
1667 /* carrier off reporting is important to ethtool even BEFORE open */
1668 netif_carrier_off(netdev);
1670 #ifdef CONFIG_IGB_DCA
1671 if (dca_add_requester(&pdev->dev) == 0) {
1672 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1673 dev_info(&pdev->dev, "DCA enabled\n");
1674 igb_setup_dca(adapter);
1679 * Initialize hardware timer: we keep it running just in case
1680 * that some program needs it later on.
1682 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1683 adapter->cycles.read = igb_read_clock;
1684 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1685 adapter->cycles.mult = 1;
1686 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1689 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1692 * Avoid rollover while we initialize by resetting the time counter.
1694 wr32(E1000_SYSTIML, 0x00000000);
1695 wr32(E1000_SYSTIMH, 0x00000000);
1698 * Set registers so that rollover occurs soon to test this.
1700 wr32(E1000_SYSTIML, 0x00000000);
1701 wr32(E1000_SYSTIMH, 0xFF800000);
1704 timecounter_init(&adapter->clock,
1706 ktime_to_ns(ktime_get_real()));
1709 * Synchronize our NIC clock against system wall clock. NIC
1710 * time stamp reading requires ~3us per sample, each sample
1711 * was pretty stable even under load => only require 10
1712 * samples for each offset comparison.
1714 memset(&adapter->compare, 0, sizeof(adapter->compare));
1715 adapter->compare.source = &adapter->clock;
1716 adapter->compare.target = ktime_get_real;
1717 adapter->compare.num_samples = 10;
1718 timecompare_update(&adapter->compare, 0);
1724 "igb: %s: hw %p initialized timer\n",
1725 igb_get_time_str(adapter, buffer),
1730 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1731 /* print bus type/speed/width info */
1732 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1734 ((hw->bus.speed == e1000_bus_speed_2500)
1735 ? "2.5Gb/s" : "unknown"),
1736 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1737 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1738 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1742 igb_read_part_num(hw, &part_num);
1743 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1744 (part_num >> 8), (part_num & 0xff));
1746 dev_info(&pdev->dev,
1747 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1748 adapter->msix_entries ? "MSI-X" :
1749 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1750 adapter->num_rx_queues, adapter->num_tx_queues);
1755 igb_release_hw_control(adapter);
1757 if (!igb_check_reset_block(hw))
1760 if (hw->flash_address)
1761 iounmap(hw->flash_address);
1763 igb_clear_interrupt_scheme(adapter);
1764 iounmap(hw->hw_addr);
1766 free_netdev(netdev);
1768 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1772 pci_disable_device(pdev);
1777 * igb_remove - Device Removal Routine
1778 * @pdev: PCI device information struct
1780 * igb_remove is called by the PCI subsystem to alert the driver
1781 * that it should release a PCI device. The could be caused by a
1782 * Hot-Plug event, or because the driver is going to be removed from
1785 static void __devexit igb_remove(struct pci_dev *pdev)
1787 struct net_device *netdev = pci_get_drvdata(pdev);
1788 struct igb_adapter *adapter = netdev_priv(netdev);
1789 struct e1000_hw *hw = &adapter->hw;
1791 /* flush_scheduled work may reschedule our watchdog task, so
1792 * explicitly disable watchdog tasks from being rescheduled */
1793 set_bit(__IGB_DOWN, &adapter->state);
1794 del_timer_sync(&adapter->watchdog_timer);
1795 del_timer_sync(&adapter->phy_info_timer);
1797 flush_scheduled_work();
1799 #ifdef CONFIG_IGB_DCA
1800 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1801 dev_info(&pdev->dev, "DCA disabled\n");
1802 dca_remove_requester(&pdev->dev);
1803 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1804 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1808 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1809 * would have already happened in close and is redundant. */
1810 igb_release_hw_control(adapter);
1812 unregister_netdev(netdev);
1814 if (!igb_check_reset_block(&adapter->hw))
1815 igb_reset_phy(&adapter->hw);
1817 igb_clear_interrupt_scheme(adapter);
1819 #ifdef CONFIG_PCI_IOV
1820 /* reclaim resources allocated to VFs */
1821 if (adapter->vf_data) {
1822 /* disable iov and allow time for transactions to clear */
1823 pci_disable_sriov(pdev);
1826 kfree(adapter->vf_data);
1827 adapter->vf_data = NULL;
1828 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1830 dev_info(&pdev->dev, "IOV Disabled\n");
1833 iounmap(hw->hw_addr);
1834 if (hw->flash_address)
1835 iounmap(hw->flash_address);
1836 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1839 free_netdev(netdev);
1841 pci_disable_pcie_error_reporting(pdev);
1843 pci_disable_device(pdev);
1847 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1848 * @adapter: board private structure to initialize
1850 * igb_sw_init initializes the Adapter private data structure.
1851 * Fields are initialized based on PCI device information and
1852 * OS network device settings (MTU size).
1854 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1856 struct e1000_hw *hw = &adapter->hw;
1857 struct net_device *netdev = adapter->netdev;
1858 struct pci_dev *pdev = adapter->pdev;
1860 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1862 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1863 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1864 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1865 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1866 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1868 /* This call may decrease the number of queues depending on
1869 * interrupt mode. */
1870 if (igb_init_interrupt_scheme(adapter)) {
1871 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1875 /* Explicitly disable IRQ since the NIC can be in any state. */
1876 igb_irq_disable(adapter);
1878 set_bit(__IGB_DOWN, &adapter->state);
1883 * igb_open - Called when a network interface is made active
1884 * @netdev: network interface device structure
1886 * Returns 0 on success, negative value on failure
1888 * The open entry point is called when a network interface is made
1889 * active by the system (IFF_UP). At this point all resources needed
1890 * for transmit and receive operations are allocated, the interrupt
1891 * handler is registered with the OS, the watchdog timer is started,
1892 * and the stack is notified that the interface is ready.
1894 static int igb_open(struct net_device *netdev)
1896 struct igb_adapter *adapter = netdev_priv(netdev);
1897 struct e1000_hw *hw = &adapter->hw;
1901 /* disallow open during test */
1902 if (test_bit(__IGB_TESTING, &adapter->state))
1905 netif_carrier_off(netdev);
1907 /* allocate transmit descriptors */
1908 err = igb_setup_all_tx_resources(adapter);
1912 /* allocate receive descriptors */
1913 err = igb_setup_all_rx_resources(adapter);
1917 /* e1000_power_up_phy(adapter); */
1919 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1920 if ((adapter->hw.mng_cookie.status &
1921 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1922 igb_update_mng_vlan(adapter);
1924 /* before we allocate an interrupt, we must be ready to handle it.
1925 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1926 * as soon as we call pci_request_irq, so we have to setup our
1927 * clean_rx handler before we do so. */
1928 igb_configure(adapter);
1930 igb_vmm_control(adapter);
1931 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1933 err = igb_request_irq(adapter);
1937 /* From here on the code is the same as igb_up() */
1938 clear_bit(__IGB_DOWN, &adapter->state);
1940 for (i = 0; i < adapter->num_q_vectors; i++) {
1941 struct igb_q_vector *q_vector = adapter->q_vector[i];
1942 napi_enable(&q_vector->napi);
1945 /* Clear any pending interrupts. */
1948 igb_irq_enable(adapter);
1950 netif_tx_start_all_queues(netdev);
1952 /* Fire a link status change interrupt to start the watchdog. */
1953 wr32(E1000_ICS, E1000_ICS_LSC);
1958 igb_release_hw_control(adapter);
1959 /* e1000_power_down_phy(adapter); */
1960 igb_free_all_rx_resources(adapter);
1962 igb_free_all_tx_resources(adapter);
1970 * igb_close - Disables a network interface
1971 * @netdev: network interface device structure
1973 * Returns 0, this is not allowed to fail
1975 * The close entry point is called when an interface is de-activated
1976 * by the OS. The hardware is still under the driver's control, but
1977 * needs to be disabled. A global MAC reset is issued to stop the
1978 * hardware, and all transmit and receive resources are freed.
1980 static int igb_close(struct net_device *netdev)
1982 struct igb_adapter *adapter = netdev_priv(netdev);
1984 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1987 igb_free_irq(adapter);
1989 igb_free_all_tx_resources(adapter);
1990 igb_free_all_rx_resources(adapter);
1992 /* kill manageability vlan ID if supported, but not if a vlan with
1993 * the same ID is registered on the host OS (let 8021q kill it) */
1994 if ((adapter->hw.mng_cookie.status &
1995 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1997 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1998 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2004 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2005 * @adapter: board private structure
2006 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2008 * Return 0 on success, negative on failure
2010 int igb_setup_tx_resources(struct igb_adapter *adapter,
2011 struct igb_ring *tx_ring)
2013 struct pci_dev *pdev = adapter->pdev;
2016 size = sizeof(struct igb_buffer) * tx_ring->count;
2017 tx_ring->buffer_info = vmalloc(size);
2018 if (!tx_ring->buffer_info)
2020 memset(tx_ring->buffer_info, 0, size);
2022 /* round up to nearest 4K */
2023 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2024 tx_ring->size = ALIGN(tx_ring->size, 4096);
2026 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2032 tx_ring->next_to_use = 0;
2033 tx_ring->next_to_clean = 0;
2037 vfree(tx_ring->buffer_info);
2039 "Unable to allocate memory for the transmit descriptor ring\n");
2044 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2045 * (Descriptors) for all queues
2046 * @adapter: board private structure
2048 * Return 0 on success, negative on failure
2050 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2055 for (i = 0; i < adapter->num_tx_queues; i++) {
2056 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2058 dev_err(&adapter->pdev->dev,
2059 "Allocation for Tx Queue %u failed\n", i);
2060 for (i--; i >= 0; i--)
2061 igb_free_tx_resources(&adapter->tx_ring[i]);
2066 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
2067 r_idx = i % adapter->num_tx_queues;
2068 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
2074 * igb_setup_tctl - configure the transmit control registers
2075 * @adapter: Board private structure
2077 static void igb_setup_tctl(struct igb_adapter *adapter)
2079 struct e1000_hw *hw = &adapter->hw;
2082 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2083 wr32(E1000_TXDCTL(0), 0);
2085 /* Program the Transmit Control Register */
2086 tctl = rd32(E1000_TCTL);
2087 tctl &= ~E1000_TCTL_CT;
2088 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2089 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2091 igb_config_collision_dist(hw);
2093 /* Enable transmits */
2094 tctl |= E1000_TCTL_EN;
2096 wr32(E1000_TCTL, tctl);
2100 * igb_configure_tx_ring - Configure transmit ring after Reset
2101 * @adapter: board private structure
2102 * @ring: tx ring to configure
2104 * Configure a transmit ring after a reset.
2106 static void igb_configure_tx_ring(struct igb_adapter *adapter,
2107 struct igb_ring *ring)
2109 struct e1000_hw *hw = &adapter->hw;
2111 u64 tdba = ring->dma;
2112 int reg_idx = ring->reg_idx;
2114 /* disable the queue */
2115 txdctl = rd32(E1000_TXDCTL(reg_idx));
2116 wr32(E1000_TXDCTL(reg_idx),
2117 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2121 wr32(E1000_TDLEN(reg_idx),
2122 ring->count * sizeof(union e1000_adv_tx_desc));
2123 wr32(E1000_TDBAL(reg_idx),
2124 tdba & 0x00000000ffffffffULL);
2125 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2127 ring->head = E1000_TDH(reg_idx);
2128 ring->tail = E1000_TDT(reg_idx);
2129 writel(0, hw->hw_addr + ring->tail);
2130 writel(0, hw->hw_addr + ring->head);
2132 txdctl |= IGB_TX_PTHRESH;
2133 txdctl |= IGB_TX_HTHRESH << 8;
2134 txdctl |= IGB_TX_WTHRESH << 16;
2136 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2137 wr32(E1000_TXDCTL(reg_idx), txdctl);
2141 * igb_configure_tx - Configure transmit Unit after Reset
2142 * @adapter: board private structure
2144 * Configure the Tx unit of the MAC after a reset.
2146 static void igb_configure_tx(struct igb_adapter *adapter)
2150 for (i = 0; i < adapter->num_tx_queues; i++)
2151 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2153 /* Setup Transmit Descriptor Settings for eop descriptor */
2154 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
2158 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2159 * @adapter: board private structure
2160 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2162 * Returns 0 on success, negative on failure
2164 int igb_setup_rx_resources(struct igb_adapter *adapter,
2165 struct igb_ring *rx_ring)
2167 struct pci_dev *pdev = adapter->pdev;
2170 size = sizeof(struct igb_buffer) * rx_ring->count;
2171 rx_ring->buffer_info = vmalloc(size);
2172 if (!rx_ring->buffer_info)
2174 memset(rx_ring->buffer_info, 0, size);
2176 desc_len = sizeof(union e1000_adv_rx_desc);
2178 /* Round up to nearest 4K */
2179 rx_ring->size = rx_ring->count * desc_len;
2180 rx_ring->size = ALIGN(rx_ring->size, 4096);
2182 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2188 rx_ring->next_to_clean = 0;
2189 rx_ring->next_to_use = 0;
2194 vfree(rx_ring->buffer_info);
2195 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
2196 "the receive descriptor ring\n");
2201 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2202 * (Descriptors) for all queues
2203 * @adapter: board private structure
2205 * Return 0 on success, negative on failure
2207 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2211 for (i = 0; i < adapter->num_rx_queues; i++) {
2212 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2214 dev_err(&adapter->pdev->dev,
2215 "Allocation for Rx Queue %u failed\n", i);
2216 for (i--; i >= 0; i--)
2217 igb_free_rx_resources(&adapter->rx_ring[i]);
2226 * igb_setup_rctl - configure the receive control registers
2227 * @adapter: Board private structure
2229 static void igb_setup_rctl(struct igb_adapter *adapter)
2231 struct e1000_hw *hw = &adapter->hw;
2234 rctl = rd32(E1000_RCTL);
2236 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2237 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2239 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2240 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2243 * enable stripping of CRC. It's unlikely this will break BMC
2244 * redirection as it did with e1000. Newer features require
2245 * that the HW strips the CRC.
2247 rctl |= E1000_RCTL_SECRC;
2250 * disable store bad packets and clear size bits.
2252 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2254 /* enable LPE to prevent packets larger than max_frame_size */
2255 rctl |= E1000_RCTL_LPE;
2257 /* disable queue 0 to prevent tail write w/o re-config */
2258 wr32(E1000_RXDCTL(0), 0);
2260 /* Attention!!! For SR-IOV PF driver operations you must enable
2261 * queue drop for all VF and PF queues to prevent head of line blocking
2262 * if an un-trusted VF does not provide descriptors to hardware.
2264 if (adapter->vfs_allocated_count) {
2267 /* set all queue drop enable bits */
2268 wr32(E1000_QDE, ALL_QUEUES);
2270 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
2271 if (rctl & E1000_RCTL_LPE)
2272 vmolr |= E1000_VMOLR_LPE;
2273 if (adapter->num_rx_queues > 1)
2274 vmolr |= E1000_VMOLR_RSSE;
2275 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2278 wr32(E1000_RCTL, rctl);
2282 * igb_rlpml_set - set maximum receive packet size
2283 * @adapter: board private structure
2285 * Configure maximum receivable packet size.
2287 static void igb_rlpml_set(struct igb_adapter *adapter)
2289 u32 max_frame_size = adapter->max_frame_size;
2290 struct e1000_hw *hw = &adapter->hw;
2291 u16 pf_id = adapter->vfs_allocated_count;
2294 max_frame_size += VLAN_TAG_SIZE;
2296 /* if vfs are enabled we set RLPML to the largest possible request
2297 * size and set the VMOLR RLPML to the size we need */
2299 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2300 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2303 wr32(E1000_RLPML, max_frame_size);
2307 * igb_configure_vt_default_pool - Configure VT default pool
2308 * @adapter: board private structure
2310 * Configure the default pool
2312 static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2314 struct e1000_hw *hw = &adapter->hw;
2315 u16 pf_id = adapter->vfs_allocated_count;
2318 /* not in sr-iov mode - do nothing */
2322 vtctl = rd32(E1000_VT_CTL);
2323 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2324 E1000_VT_CTL_DISABLE_DEF_POOL);
2325 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2326 wr32(E1000_VT_CTL, vtctl);
2330 * igb_configure_rx_ring - Configure a receive ring after Reset
2331 * @adapter: board private structure
2332 * @ring: receive ring to be configured
2334 * Configure the Rx unit of the MAC after a reset.
2336 static void igb_configure_rx_ring(struct igb_adapter *adapter,
2337 struct igb_ring *ring)
2339 struct e1000_hw *hw = &adapter->hw;
2340 u64 rdba = ring->dma;
2341 int reg_idx = ring->reg_idx;
2344 /* disable the queue */
2345 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2346 wr32(E1000_RXDCTL(reg_idx),
2347 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2349 /* Set DMA base address registers */
2350 wr32(E1000_RDBAL(reg_idx),
2351 rdba & 0x00000000ffffffffULL);
2352 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2353 wr32(E1000_RDLEN(reg_idx),
2354 ring->count * sizeof(union e1000_adv_rx_desc));
2356 /* initialize head and tail */
2357 ring->head = E1000_RDH(reg_idx);
2358 ring->tail = E1000_RDT(reg_idx);
2359 writel(0, hw->hw_addr + ring->head);
2360 writel(0, hw->hw_addr + ring->tail);
2362 /* set descriptor configuration */
2363 if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) {
2364 srrctl = ALIGN(adapter->rx_buffer_len, 64) <<
2365 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2366 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2367 srrctl |= IGB_RXBUFFER_16384 >>
2368 E1000_SRRCTL_BSIZEPKT_SHIFT;
2370 srrctl |= (PAGE_SIZE / 2) >>
2371 E1000_SRRCTL_BSIZEPKT_SHIFT;
2373 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2375 srrctl = ALIGN(adapter->rx_buffer_len, 1024) >>
2376 E1000_SRRCTL_BSIZEPKT_SHIFT;
2377 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2380 wr32(E1000_SRRCTL(reg_idx), srrctl);
2382 /* enable receive descriptor fetching */
2383 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2384 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2385 rxdctl &= 0xFFF00000;
2386 rxdctl |= IGB_RX_PTHRESH;
2387 rxdctl |= IGB_RX_HTHRESH << 8;
2388 rxdctl |= IGB_RX_WTHRESH << 16;
2389 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2393 * igb_configure_rx - Configure receive Unit after Reset
2394 * @adapter: board private structure
2396 * Configure the Rx unit of the MAC after a reset.
2398 static void igb_configure_rx(struct igb_adapter *adapter)
2400 struct e1000_hw *hw = &adapter->hw;
2404 /* disable receives while setting up the descriptors */
2405 rctl = rd32(E1000_RCTL);
2406 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2410 if (adapter->itr_setting > 3)
2411 wr32(E1000_ITR, adapter->itr);
2413 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2414 * the Base and Length of the Rx Descriptor Ring */
2415 for (i = 0; i < adapter->num_rx_queues; i++)
2416 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2418 if (adapter->num_rx_queues > 1) {
2427 get_random_bytes(&random[0], 40);
2429 if (hw->mac.type >= e1000_82576)
2433 for (j = 0; j < (32 * 4); j++) {
2435 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2438 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2440 if (adapter->vfs_allocated_count)
2441 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2443 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2445 /* Fill out hash function seeds */
2446 for (j = 0; j < 10; j++)
2447 array_wr32(E1000_RSSRK(0), j, random[j]);
2449 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2450 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2451 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2452 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2453 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2454 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2455 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2456 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2458 wr32(E1000_MRQC, mrqc);
2459 } else if (adapter->vfs_allocated_count) {
2460 /* Enable multi-queue for sr-iov */
2461 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2464 /* Enable Receive Checksum Offload for TCP and UDP */
2465 rxcsum = rd32(E1000_RXCSUM);
2466 /* Disable raw packet checksumming */
2467 rxcsum |= E1000_RXCSUM_PCSD;
2469 if (adapter->hw.mac.type == e1000_82576)
2470 /* Enable Receive Checksum Offload for SCTP */
2471 rxcsum |= E1000_RXCSUM_CRCOFL;
2473 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2474 wr32(E1000_RXCSUM, rxcsum);
2476 /* Set the default pool for the PF's first queue */
2477 igb_configure_vt_default_pool(adapter);
2479 /* set UTA to appropriate mode */
2480 igb_set_uta(adapter);
2482 /* set the correct pool for the PF default MAC address in entry 0 */
2483 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2484 adapter->vfs_allocated_count);
2486 igb_rlpml_set(adapter);
2488 /* Enable Receives */
2489 wr32(E1000_RCTL, rctl);
2493 * igb_free_tx_resources - Free Tx Resources per Queue
2494 * @tx_ring: Tx descriptor ring for a specific queue
2496 * Free all transmit software resources
2498 void igb_free_tx_resources(struct igb_ring *tx_ring)
2500 struct pci_dev *pdev = tx_ring->q_vector->adapter->pdev;
2502 igb_clean_tx_ring(tx_ring);
2504 vfree(tx_ring->buffer_info);
2505 tx_ring->buffer_info = NULL;
2507 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2509 tx_ring->desc = NULL;
2513 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2514 * @adapter: board private structure
2516 * Free all transmit software resources
2518 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2522 for (i = 0; i < adapter->num_tx_queues; i++)
2523 igb_free_tx_resources(&adapter->tx_ring[i]);
2526 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2527 struct igb_buffer *buffer_info)
2529 buffer_info->dma = 0;
2530 if (buffer_info->skb) {
2531 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
2533 dev_kfree_skb_any(buffer_info->skb);
2534 buffer_info->skb = NULL;
2536 buffer_info->time_stamp = 0;
2537 /* buffer_info must be completely set up in the transmit path */
2541 * igb_clean_tx_ring - Free Tx Buffers
2542 * @tx_ring: ring to be cleaned
2544 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2546 struct igb_adapter *adapter = tx_ring->q_vector->adapter;
2547 struct igb_buffer *buffer_info;
2551 if (!tx_ring->buffer_info)
2553 /* Free all the Tx ring sk_buffs */
2555 for (i = 0; i < tx_ring->count; i++) {
2556 buffer_info = &tx_ring->buffer_info[i];
2557 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2560 size = sizeof(struct igb_buffer) * tx_ring->count;
2561 memset(tx_ring->buffer_info, 0, size);
2563 /* Zero out the descriptor ring */
2565 memset(tx_ring->desc, 0, tx_ring->size);
2567 tx_ring->next_to_use = 0;
2568 tx_ring->next_to_clean = 0;
2570 writel(0, adapter->hw.hw_addr + tx_ring->head);
2571 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2575 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2576 * @adapter: board private structure
2578 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2582 for (i = 0; i < adapter->num_tx_queues; i++)
2583 igb_clean_tx_ring(&adapter->tx_ring[i]);
2587 * igb_free_rx_resources - Free Rx Resources
2588 * @rx_ring: ring to clean the resources from
2590 * Free all receive software resources
2592 void igb_free_rx_resources(struct igb_ring *rx_ring)
2594 struct pci_dev *pdev = rx_ring->q_vector->adapter->pdev;
2596 igb_clean_rx_ring(rx_ring);
2598 vfree(rx_ring->buffer_info);
2599 rx_ring->buffer_info = NULL;
2601 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2603 rx_ring->desc = NULL;
2607 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2608 * @adapter: board private structure
2610 * Free all receive software resources
2612 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2616 for (i = 0; i < adapter->num_rx_queues; i++)
2617 igb_free_rx_resources(&adapter->rx_ring[i]);
2621 * igb_clean_rx_ring - Free Rx Buffers per Queue
2622 * @rx_ring: ring to free buffers from
2624 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2626 struct igb_adapter *adapter = rx_ring->q_vector->adapter;
2627 struct igb_buffer *buffer_info;
2628 struct pci_dev *pdev = adapter->pdev;
2632 if (!rx_ring->buffer_info)
2634 /* Free all the Rx ring sk_buffs */
2635 for (i = 0; i < rx_ring->count; i++) {
2636 buffer_info = &rx_ring->buffer_info[i];
2637 if (buffer_info->dma) {
2638 pci_unmap_single(pdev, buffer_info->dma,
2639 adapter->rx_buffer_len,
2640 PCI_DMA_FROMDEVICE);
2641 buffer_info->dma = 0;
2644 if (buffer_info->skb) {
2645 dev_kfree_skb(buffer_info->skb);
2646 buffer_info->skb = NULL;
2648 if (buffer_info->page_dma) {
2649 pci_unmap_page(pdev, buffer_info->page_dma,
2651 PCI_DMA_FROMDEVICE);
2652 buffer_info->page_dma = 0;
2654 if (buffer_info->page) {
2655 put_page(buffer_info->page);
2656 buffer_info->page = NULL;
2657 buffer_info->page_offset = 0;
2661 size = sizeof(struct igb_buffer) * rx_ring->count;
2662 memset(rx_ring->buffer_info, 0, size);
2664 /* Zero out the descriptor ring */
2665 memset(rx_ring->desc, 0, rx_ring->size);
2667 rx_ring->next_to_clean = 0;
2668 rx_ring->next_to_use = 0;
2670 writel(0, adapter->hw.hw_addr + rx_ring->head);
2671 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2675 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2676 * @adapter: board private structure
2678 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2682 for (i = 0; i < adapter->num_rx_queues; i++)
2683 igb_clean_rx_ring(&adapter->rx_ring[i]);
2687 * igb_set_mac - Change the Ethernet Address of the NIC
2688 * @netdev: network interface device structure
2689 * @p: pointer to an address structure
2691 * Returns 0 on success, negative on failure
2693 static int igb_set_mac(struct net_device *netdev, void *p)
2695 struct igb_adapter *adapter = netdev_priv(netdev);
2696 struct e1000_hw *hw = &adapter->hw;
2697 struct sockaddr *addr = p;
2699 if (!is_valid_ether_addr(addr->sa_data))
2700 return -EADDRNOTAVAIL;
2702 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2703 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2705 /* set the correct pool for the new PF MAC address in entry 0 */
2706 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2707 adapter->vfs_allocated_count);
2713 * igb_write_mc_addr_list - write multicast addresses to MTA
2714 * @netdev: network interface device structure
2716 * Writes multicast address list to the MTA hash table.
2717 * Returns: -ENOMEM on failure
2718 * 0 on no addresses written
2719 * X on writing X addresses to MTA
2721 static int igb_write_mc_addr_list(struct net_device *netdev)
2723 struct igb_adapter *adapter = netdev_priv(netdev);
2724 struct e1000_hw *hw = &adapter->hw;
2725 struct dev_mc_list *mc_ptr = netdev->mc_list;
2730 if (!netdev->mc_count) {
2731 /* nothing to program, so clear mc list */
2732 igb_update_mc_addr_list(hw, NULL, 0);
2733 igb_restore_vf_multicasts(adapter);
2737 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2741 /* set vmolr receive overflow multicast bit */
2742 vmolr |= E1000_VMOLR_ROMPE;
2744 /* The shared function expects a packed array of only addresses. */
2745 mc_ptr = netdev->mc_list;
2747 for (i = 0; i < netdev->mc_count; i++) {
2750 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2751 mc_ptr = mc_ptr->next;
2753 igb_update_mc_addr_list(hw, mta_list, i);
2756 return netdev->mc_count;
2760 * igb_write_uc_addr_list - write unicast addresses to RAR table
2761 * @netdev: network interface device structure
2763 * Writes unicast address list to the RAR table.
2764 * Returns: -ENOMEM on failure/insufficient address space
2765 * 0 on no addresses written
2766 * X on writing X addresses to the RAR table
2768 static int igb_write_uc_addr_list(struct net_device *netdev)
2770 struct igb_adapter *adapter = netdev_priv(netdev);
2771 struct e1000_hw *hw = &adapter->hw;
2772 unsigned int vfn = adapter->vfs_allocated_count;
2773 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2776 /* return ENOMEM indicating insufficient memory for addresses */
2777 if (netdev->uc.count > rar_entries)
2780 if (netdev->uc.count && rar_entries) {
2781 struct netdev_hw_addr *ha;
2782 list_for_each_entry(ha, &netdev->uc.list, list) {
2785 igb_rar_set_qsel(adapter, ha->addr,
2791 /* write the addresses in reverse order to avoid write combining */
2792 for (; rar_entries > 0 ; rar_entries--) {
2793 wr32(E1000_RAH(rar_entries), 0);
2794 wr32(E1000_RAL(rar_entries), 0);
2802 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2803 * @netdev: network interface device structure
2805 * The set_rx_mode entry point is called whenever the unicast or multicast
2806 * address lists or the network interface flags are updated. This routine is
2807 * responsible for configuring the hardware for proper unicast, multicast,
2808 * promiscuous mode, and all-multi behavior.
2810 static void igb_set_rx_mode(struct net_device *netdev)
2812 struct igb_adapter *adapter = netdev_priv(netdev);
2813 struct e1000_hw *hw = &adapter->hw;
2814 unsigned int vfn = adapter->vfs_allocated_count;
2815 u32 rctl, vmolr = 0;
2818 /* Check for Promiscuous and All Multicast modes */
2819 rctl = rd32(E1000_RCTL);
2821 /* clear the effected bits */
2822 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2824 if (netdev->flags & IFF_PROMISC) {
2825 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2826 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2828 if (netdev->flags & IFF_ALLMULTI) {
2829 rctl |= E1000_RCTL_MPE;
2830 vmolr |= E1000_VMOLR_MPME;
2833 * Write addresses to the MTA, if the attempt fails
2834 * then we should just turn on promiscous mode so
2835 * that we can at least receive multicast traffic
2837 count = igb_write_mc_addr_list(netdev);
2839 rctl |= E1000_RCTL_MPE;
2840 vmolr |= E1000_VMOLR_MPME;
2842 vmolr |= E1000_VMOLR_ROMPE;
2846 * Write addresses to available RAR registers, if there is not
2847 * sufficient space to store all the addresses then enable
2848 * unicast promiscous mode
2850 count = igb_write_uc_addr_list(netdev);
2852 rctl |= E1000_RCTL_UPE;
2853 vmolr |= E1000_VMOLR_ROPE;
2855 rctl |= E1000_RCTL_VFE;
2857 wr32(E1000_RCTL, rctl);
2860 * In order to support SR-IOV and eventually VMDq it is necessary to set
2861 * the VMOLR to enable the appropriate modes. Without this workaround
2862 * we will have issues with VLAN tag stripping not being done for frames
2863 * that are only arriving because we are the default pool
2865 if (hw->mac.type < e1000_82576)
2868 vmolr |= rd32(E1000_VMOLR(vfn)) &
2869 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2870 wr32(E1000_VMOLR(vfn), vmolr);
2871 igb_restore_vf_multicasts(adapter);
2874 /* Need to wait a few seconds after link up to get diagnostic information from
2876 static void igb_update_phy_info(unsigned long data)
2878 struct igb_adapter *adapter = (struct igb_adapter *) data;
2879 igb_get_phy_info(&adapter->hw);
2883 * igb_has_link - check shared code for link and determine up/down
2884 * @adapter: pointer to driver private info
2886 static bool igb_has_link(struct igb_adapter *adapter)
2888 struct e1000_hw *hw = &adapter->hw;
2889 bool link_active = false;
2892 /* get_link_status is set on LSC (link status) interrupt or
2893 * rx sequence error interrupt. get_link_status will stay
2894 * false until the e1000_check_for_link establishes link
2895 * for copper adapters ONLY
2897 switch (hw->phy.media_type) {
2898 case e1000_media_type_copper:
2899 if (hw->mac.get_link_status) {
2900 ret_val = hw->mac.ops.check_for_link(hw);
2901 link_active = !hw->mac.get_link_status;
2906 case e1000_media_type_internal_serdes:
2907 ret_val = hw->mac.ops.check_for_link(hw);
2908 link_active = hw->mac.serdes_has_link;
2911 case e1000_media_type_unknown:
2919 * igb_watchdog - Timer Call-back
2920 * @data: pointer to adapter cast into an unsigned long
2922 static void igb_watchdog(unsigned long data)
2924 struct igb_adapter *adapter = (struct igb_adapter *)data;
2925 /* Do the rest outside of interrupt context */
2926 schedule_work(&adapter->watchdog_task);
2929 static void igb_watchdog_task(struct work_struct *work)
2931 struct igb_adapter *adapter = container_of(work,
2932 struct igb_adapter, watchdog_task);
2933 struct e1000_hw *hw = &adapter->hw;
2934 struct net_device *netdev = adapter->netdev;
2935 struct igb_ring *tx_ring = adapter->tx_ring;
2939 link = igb_has_link(adapter);
2940 if ((netif_carrier_ok(netdev)) && link)
2944 if (!netif_carrier_ok(netdev)) {
2946 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2947 &adapter->link_speed,
2948 &adapter->link_duplex);
2950 ctrl = rd32(E1000_CTRL);
2951 /* Links status message must follow this format */
2952 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2953 "Flow Control: %s\n",
2955 adapter->link_speed,
2956 adapter->link_duplex == FULL_DUPLEX ?
2957 "Full Duplex" : "Half Duplex",
2958 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2959 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2960 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2961 E1000_CTRL_TFCE) ? "TX" : "None")));
2963 /* tweak tx_queue_len according to speed/duplex and
2964 * adjust the timeout factor */
2965 netdev->tx_queue_len = adapter->tx_queue_len;
2966 adapter->tx_timeout_factor = 1;
2967 switch (adapter->link_speed) {
2969 netdev->tx_queue_len = 10;
2970 adapter->tx_timeout_factor = 14;
2973 netdev->tx_queue_len = 100;
2974 /* maybe add some timeout factor ? */
2978 netif_carrier_on(netdev);
2980 igb_ping_all_vfs(adapter);
2982 /* link state has changed, schedule phy info update */
2983 if (!test_bit(__IGB_DOWN, &adapter->state))
2984 mod_timer(&adapter->phy_info_timer,
2985 round_jiffies(jiffies + 2 * HZ));
2988 if (netif_carrier_ok(netdev)) {
2989 adapter->link_speed = 0;
2990 adapter->link_duplex = 0;
2991 /* Links status message must follow this format */
2992 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2994 netif_carrier_off(netdev);
2996 igb_ping_all_vfs(adapter);
2998 /* link state has changed, schedule phy info update */
2999 if (!test_bit(__IGB_DOWN, &adapter->state))
3000 mod_timer(&adapter->phy_info_timer,
3001 round_jiffies(jiffies + 2 * HZ));
3006 igb_update_stats(adapter);
3008 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3009 adapter->tpt_old = adapter->stats.tpt;
3010 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
3011 adapter->colc_old = adapter->stats.colc;
3013 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3014 adapter->gorc_old = adapter->stats.gorc;
3015 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3016 adapter->gotc_old = adapter->stats.gotc;
3018 igb_update_adaptive(&adapter->hw);
3020 if (!netif_carrier_ok(netdev)) {
3021 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3022 /* We've lost link, so the controller stops DMA,
3023 * but we've got queued Tx work that's never going
3024 * to get done, so reset controller to flush Tx.
3025 * (Do the reset outside of interrupt context). */
3026 adapter->tx_timeout_count++;
3027 schedule_work(&adapter->reset_task);
3028 /* return immediately since reset is imminent */
3033 /* Cause software interrupt to ensure rx ring is cleaned */
3034 if (adapter->msix_entries) {
3036 for (i = 0; i < adapter->num_q_vectors; i++) {
3037 struct igb_q_vector *q_vector = adapter->q_vector[i];
3038 eics |= q_vector->eims_value;
3040 wr32(E1000_EICS, eics);
3042 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3045 /* Force detection of hung controller every watchdog period */
3046 tx_ring->detect_tx_hung = true;
3048 /* Reset the timer */
3049 if (!test_bit(__IGB_DOWN, &adapter->state))
3050 mod_timer(&adapter->watchdog_timer,
3051 round_jiffies(jiffies + 2 * HZ));
3054 enum latency_range {
3058 latency_invalid = 255
3063 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3065 * Stores a new ITR value based on strictly on packet size. This
3066 * algorithm is less sophisticated than that used in igb_update_itr,
3067 * due to the difficulty of synchronizing statistics across multiple
3068 * receive rings. The divisors and thresholds used by this fuction
3069 * were determined based on theoretical maximum wire speed and testing
3070 * data, in order to minimize response time while increasing bulk
3072 * This functionality is controlled by the InterruptThrottleRate module
3073 * parameter (see igb_param.c)
3074 * NOTE: This function is called only when operating in a multiqueue
3075 * receive environment.
3076 * @q_vector: pointer to q_vector
3078 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3080 int new_val = q_vector->itr_val;
3081 int avg_wire_size = 0;
3082 struct igb_adapter *adapter = q_vector->adapter;
3084 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3085 * ints/sec - ITR timer value of 120 ticks.
3087 if (adapter->link_speed != SPEED_1000) {
3092 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3093 struct igb_ring *ring = q_vector->rx_ring;
3094 avg_wire_size = ring->total_bytes / ring->total_packets;
3097 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3098 struct igb_ring *ring = q_vector->tx_ring;
3099 avg_wire_size = max_t(u32, avg_wire_size,
3100 (ring->total_bytes /
3101 ring->total_packets));
3104 /* if avg_wire_size isn't set no work was done */
3108 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3109 avg_wire_size += 24;
3111 /* Don't starve jumbo frames */
3112 avg_wire_size = min(avg_wire_size, 3000);
3114 /* Give a little boost to mid-size frames */
3115 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3116 new_val = avg_wire_size / 3;
3118 new_val = avg_wire_size / 2;
3121 if (new_val != q_vector->itr_val) {
3122 q_vector->itr_val = new_val;
3123 q_vector->set_itr = 1;
3126 if (q_vector->rx_ring) {
3127 q_vector->rx_ring->total_bytes = 0;
3128 q_vector->rx_ring->total_packets = 0;
3130 if (q_vector->tx_ring) {
3131 q_vector->tx_ring->total_bytes = 0;
3132 q_vector->tx_ring->total_packets = 0;
3137 * igb_update_itr - update the dynamic ITR value based on statistics
3138 * Stores a new ITR value based on packets and byte
3139 * counts during the last interrupt. The advantage of per interrupt
3140 * computation is faster updates and more accurate ITR for the current
3141 * traffic pattern. Constants in this function were computed
3142 * based on theoretical maximum wire speed and thresholds were set based
3143 * on testing data as well as attempting to minimize response time
3144 * while increasing bulk throughput.
3145 * this functionality is controlled by the InterruptThrottleRate module
3146 * parameter (see igb_param.c)
3147 * NOTE: These calculations are only valid when operating in a single-
3148 * queue environment.
3149 * @adapter: pointer to adapter
3150 * @itr_setting: current q_vector->itr_val
3151 * @packets: the number of packets during this measurement interval
3152 * @bytes: the number of bytes during this measurement interval
3154 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3155 int packets, int bytes)
3157 unsigned int retval = itr_setting;
3160 goto update_itr_done;
3162 switch (itr_setting) {
3163 case lowest_latency:
3164 /* handle TSO and jumbo frames */
3165 if (bytes/packets > 8000)
3166 retval = bulk_latency;
3167 else if ((packets < 5) && (bytes > 512))
3168 retval = low_latency;
3170 case low_latency: /* 50 usec aka 20000 ints/s */
3171 if (bytes > 10000) {
3172 /* this if handles the TSO accounting */
3173 if (bytes/packets > 8000) {
3174 retval = bulk_latency;
3175 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3176 retval = bulk_latency;
3177 } else if ((packets > 35)) {
3178 retval = lowest_latency;
3180 } else if (bytes/packets > 2000) {
3181 retval = bulk_latency;
3182 } else if (packets <= 2 && bytes < 512) {
3183 retval = lowest_latency;
3186 case bulk_latency: /* 250 usec aka 4000 ints/s */
3187 if (bytes > 25000) {
3189 retval = low_latency;
3190 } else if (bytes < 1500) {
3191 retval = low_latency;
3200 static void igb_set_itr(struct igb_adapter *adapter)
3202 struct igb_q_vector *q_vector = adapter->q_vector[0];
3204 u32 new_itr = q_vector->itr_val;
3206 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3207 if (adapter->link_speed != SPEED_1000) {
3213 adapter->rx_itr = igb_update_itr(adapter,
3215 adapter->rx_ring->total_packets,
3216 adapter->rx_ring->total_bytes);
3218 adapter->tx_itr = igb_update_itr(adapter,
3220 adapter->tx_ring->total_packets,
3221 adapter->tx_ring->total_bytes);
3222 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3224 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3225 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
3226 current_itr = low_latency;
3228 switch (current_itr) {
3229 /* counts and packets in update_itr are dependent on these numbers */
3230 case lowest_latency:
3231 new_itr = 56; /* aka 70,000 ints/sec */
3234 new_itr = 196; /* aka 20,000 ints/sec */
3237 new_itr = 980; /* aka 4,000 ints/sec */
3244 adapter->rx_ring->total_bytes = 0;
3245 adapter->rx_ring->total_packets = 0;
3246 adapter->tx_ring->total_bytes = 0;
3247 adapter->tx_ring->total_packets = 0;
3249 if (new_itr != q_vector->itr_val) {
3250 /* this attempts to bias the interrupt rate towards Bulk
3251 * by adding intermediate steps when interrupt rate is
3253 new_itr = new_itr > q_vector->itr_val ?
3254 max((new_itr * q_vector->itr_val) /
3255 (new_itr + (q_vector->itr_val >> 2)),
3258 /* Don't write the value here; it resets the adapter's
3259 * internal timer, and causes us to delay far longer than
3260 * we should between interrupts. Instead, we write the ITR
3261 * value at the beginning of the next interrupt so the timing
3262 * ends up being correct.
3264 q_vector->itr_val = new_itr;
3265 q_vector->set_itr = 1;
3271 #define IGB_TX_FLAGS_CSUM 0x00000001
3272 #define IGB_TX_FLAGS_VLAN 0x00000002
3273 #define IGB_TX_FLAGS_TSO 0x00000004
3274 #define IGB_TX_FLAGS_IPV4 0x00000008
3275 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3276 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3277 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3279 static inline int igb_tso_adv(struct igb_adapter *adapter,
3280 struct igb_ring *tx_ring,
3281 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3283 struct e1000_adv_tx_context_desc *context_desc;
3286 struct igb_buffer *buffer_info;
3287 u32 info = 0, tu_cmd = 0;
3288 u32 mss_l4len_idx, l4len;
3291 if (skb_header_cloned(skb)) {
3292 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3297 l4len = tcp_hdrlen(skb);
3300 if (skb->protocol == htons(ETH_P_IP)) {
3301 struct iphdr *iph = ip_hdr(skb);
3304 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3308 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3309 ipv6_hdr(skb)->payload_len = 0;
3310 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3311 &ipv6_hdr(skb)->daddr,
3315 i = tx_ring->next_to_use;
3317 buffer_info = &tx_ring->buffer_info[i];
3318 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3319 /* VLAN MACLEN IPLEN */
3320 if (tx_flags & IGB_TX_FLAGS_VLAN)
3321 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3322 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3323 *hdr_len += skb_network_offset(skb);
3324 info |= skb_network_header_len(skb);
3325 *hdr_len += skb_network_header_len(skb);
3326 context_desc->vlan_macip_lens = cpu_to_le32(info);
3328 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3329 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3331 if (skb->protocol == htons(ETH_P_IP))
3332 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3333 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3335 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3338 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3339 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3341 /* For 82575, context index must be unique per ring. */
3342 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3343 mss_l4len_idx |= tx_ring->queue_index << 4;
3345 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3346 context_desc->seqnum_seed = 0;
3348 buffer_info->time_stamp = jiffies;
3349 buffer_info->next_to_watch = i;
3350 buffer_info->dma = 0;
3352 if (i == tx_ring->count)
3355 tx_ring->next_to_use = i;
3360 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3361 struct igb_ring *tx_ring,
3362 struct sk_buff *skb, u32 tx_flags)
3364 struct e1000_adv_tx_context_desc *context_desc;
3366 struct igb_buffer *buffer_info;
3367 u32 info = 0, tu_cmd = 0;
3369 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3370 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3371 i = tx_ring->next_to_use;
3372 buffer_info = &tx_ring->buffer_info[i];
3373 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3375 if (tx_flags & IGB_TX_FLAGS_VLAN)
3376 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3377 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3378 if (skb->ip_summed == CHECKSUM_PARTIAL)
3379 info |= skb_network_header_len(skb);
3381 context_desc->vlan_macip_lens = cpu_to_le32(info);
3383 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3385 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3388 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3389 const struct vlan_ethhdr *vhdr =
3390 (const struct vlan_ethhdr*)skb->data;
3392 protocol = vhdr->h_vlan_encapsulated_proto;
3394 protocol = skb->protocol;
3398 case cpu_to_be16(ETH_P_IP):
3399 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3400 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3401 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3402 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3403 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3405 case cpu_to_be16(ETH_P_IPV6):
3406 /* XXX what about other V6 headers?? */
3407 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3408 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3409 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3410 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3413 if (unlikely(net_ratelimit()))
3414 dev_warn(&adapter->pdev->dev,
3415 "partial checksum but proto=%x!\n",
3421 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3422 context_desc->seqnum_seed = 0;
3423 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3424 context_desc->mss_l4len_idx =
3425 cpu_to_le32(tx_ring->queue_index << 4);
3427 context_desc->mss_l4len_idx = 0;
3429 buffer_info->time_stamp = jiffies;
3430 buffer_info->next_to_watch = i;
3431 buffer_info->dma = 0;
3434 if (i == tx_ring->count)
3436 tx_ring->next_to_use = i;
3443 #define IGB_MAX_TXD_PWR 16
3444 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3446 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3447 struct igb_ring *tx_ring, struct sk_buff *skb,
3450 struct igb_buffer *buffer_info;
3451 unsigned int len = skb_headlen(skb);
3452 unsigned int count = 0, i;
3456 i = tx_ring->next_to_use;
3458 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
3459 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3463 map = skb_shinfo(skb)->dma_maps;
3465 buffer_info = &tx_ring->buffer_info[i];
3466 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3467 buffer_info->length = len;
3468 /* set time_stamp *before* dma to help avoid a possible race */
3469 buffer_info->time_stamp = jiffies;
3470 buffer_info->next_to_watch = i;
3471 buffer_info->dma = skb_shinfo(skb)->dma_head;
3473 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3474 struct skb_frag_struct *frag;
3477 if (i == tx_ring->count)
3480 frag = &skb_shinfo(skb)->frags[f];
3483 buffer_info = &tx_ring->buffer_info[i];
3484 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3485 buffer_info->length = len;
3486 buffer_info->time_stamp = jiffies;
3487 buffer_info->next_to_watch = i;
3488 buffer_info->dma = map[count];
3492 tx_ring->buffer_info[i].skb = skb;
3493 tx_ring->buffer_info[first].next_to_watch = i;
3498 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3499 struct igb_ring *tx_ring,
3500 int tx_flags, int count, u32 paylen,
3503 union e1000_adv_tx_desc *tx_desc = NULL;
3504 struct igb_buffer *buffer_info;
3505 u32 olinfo_status = 0, cmd_type_len;
3508 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3509 E1000_ADVTXD_DCMD_DEXT);
3511 if (tx_flags & IGB_TX_FLAGS_VLAN)
3512 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3514 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3515 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3517 if (tx_flags & IGB_TX_FLAGS_TSO) {
3518 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3520 /* insert tcp checksum */
3521 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3523 /* insert ip checksum */
3524 if (tx_flags & IGB_TX_FLAGS_IPV4)
3525 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3527 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3528 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3531 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3532 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3533 IGB_TX_FLAGS_VLAN)))
3534 olinfo_status |= tx_ring->queue_index << 4;
3536 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3538 i = tx_ring->next_to_use;
3540 buffer_info = &tx_ring->buffer_info[i];
3541 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3542 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3543 tx_desc->read.cmd_type_len =
3544 cpu_to_le32(cmd_type_len | buffer_info->length);
3545 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3547 if (i == tx_ring->count)
3551 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3552 /* Force memory writes to complete before letting h/w
3553 * know there are new descriptors to fetch. (Only
3554 * applicable for weak-ordered memory model archs,
3555 * such as IA-64). */
3558 tx_ring->next_to_use = i;
3559 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3560 /* we need this if more than one processor can write to our tail
3561 * at a time, it syncronizes IO on IA64/Altix systems */
3565 static int __igb_maybe_stop_tx(struct net_device *netdev,
3566 struct igb_ring *tx_ring, int size)
3568 struct igb_adapter *adapter = netdev_priv(netdev);
3570 netif_stop_subqueue(netdev, tx_ring->queue_index);
3572 /* Herbert's original patch had:
3573 * smp_mb__after_netif_stop_queue();
3574 * but since that doesn't exist yet, just open code it. */
3577 /* We need to check again in a case another CPU has just
3578 * made room available. */
3579 if (igb_desc_unused(tx_ring) < size)
3583 netif_wake_subqueue(netdev, tx_ring->queue_index);
3584 ++adapter->restart_queue;
3588 static int igb_maybe_stop_tx(struct net_device *netdev,
3589 struct igb_ring *tx_ring, int size)
3591 if (igb_desc_unused(tx_ring) >= size)
3593 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3596 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3597 struct net_device *netdev,
3598 struct igb_ring *tx_ring)
3600 struct igb_adapter *adapter = netdev_priv(netdev);
3602 unsigned int tx_flags = 0;
3606 union skb_shared_tx *shtx;
3608 if (test_bit(__IGB_DOWN, &adapter->state)) {
3609 dev_kfree_skb_any(skb);
3610 return NETDEV_TX_OK;
3613 if (skb->len <= 0) {
3614 dev_kfree_skb_any(skb);
3615 return NETDEV_TX_OK;
3618 /* need: 1 descriptor per page,
3619 * + 2 desc gap to keep tail from touching head,
3620 * + 1 desc for skb->data,
3621 * + 1 desc for context descriptor,
3622 * otherwise try next time */
3623 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3624 /* this is a hard error */
3625 return NETDEV_TX_BUSY;
3629 * TODO: check that there currently is no other packet with
3630 * time stamping in the queue
3632 * When doing time stamping, keep the connection to the socket
3633 * a while longer: it is still needed by skb_hwtstamp_tx(),
3634 * called either in igb_tx_hwtstamp() or by our caller when
3635 * doing software time stamping.
3638 if (unlikely(shtx->hardware)) {
3639 shtx->in_progress = 1;
3640 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3643 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3644 tx_flags |= IGB_TX_FLAGS_VLAN;
3645 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3648 if (skb->protocol == htons(ETH_P_IP))
3649 tx_flags |= IGB_TX_FLAGS_IPV4;
3651 first = tx_ring->next_to_use;
3652 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3656 dev_kfree_skb_any(skb);
3657 return NETDEV_TX_OK;
3661 tx_flags |= IGB_TX_FLAGS_TSO;
3662 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3663 (skb->ip_summed == CHECKSUM_PARTIAL))
3664 tx_flags |= IGB_TX_FLAGS_CSUM;
3667 * count reflects descriptors mapped, if 0 then mapping error
3668 * has occured and we need to rewind the descriptor queue
3670 count = igb_tx_map_adv(adapter, tx_ring, skb, first);
3673 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3675 /* Make sure there is space in the ring for the next send. */
3676 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3678 dev_kfree_skb_any(skb);
3679 tx_ring->buffer_info[first].time_stamp = 0;
3680 tx_ring->next_to_use = first;
3683 return NETDEV_TX_OK;
3686 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3687 struct net_device *netdev)
3689 struct igb_adapter *adapter = netdev_priv(netdev);
3690 struct igb_ring *tx_ring;
3693 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3694 tx_ring = adapter->multi_tx_table[r_idx];
3696 /* This goes back to the question of how to logically map a tx queue
3697 * to a flow. Right now, performance is impacted slightly negatively
3698 * if using multiple tx queues. If the stack breaks away from a
3699 * single qdisc implementation, we can look at this again. */
3700 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
3704 * igb_tx_timeout - Respond to a Tx Hang
3705 * @netdev: network interface device structure
3707 static void igb_tx_timeout(struct net_device *netdev)
3709 struct igb_adapter *adapter = netdev_priv(netdev);
3710 struct e1000_hw *hw = &adapter->hw;
3712 /* Do the reset outside of interrupt context */
3713 adapter->tx_timeout_count++;
3714 schedule_work(&adapter->reset_task);
3716 (adapter->eims_enable_mask & ~adapter->eims_other));
3719 static void igb_reset_task(struct work_struct *work)
3721 struct igb_adapter *adapter;
3722 adapter = container_of(work, struct igb_adapter, reset_task);
3724 igb_reinit_locked(adapter);
3728 * igb_get_stats - Get System Network Statistics
3729 * @netdev: network interface device structure
3731 * Returns the address of the device statistics structure.
3732 * The statistics are actually updated from the timer callback.
3734 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3736 /* only return the current stats */
3737 return &netdev->stats;
3741 * igb_change_mtu - Change the Maximum Transfer Unit
3742 * @netdev: network interface device structure
3743 * @new_mtu: new value for maximum frame size
3745 * Returns 0 on success, negative on failure
3747 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3749 struct igb_adapter *adapter = netdev_priv(netdev);
3750 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3752 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3753 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3754 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3758 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3759 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3763 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3766 /* igb_down has a dependency on max_frame_size */
3767 adapter->max_frame_size = max_frame;
3768 if (netif_running(netdev))
3771 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3772 * means we reserve 2 more, this pushes us to allocate from the next
3774 * i.e. RXBUFFER_2048 --> size-4096 slab
3777 if (max_frame <= IGB_RXBUFFER_1024)
3778 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3779 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3780 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3782 adapter->rx_buffer_len = IGB_RXBUFFER_128;
3784 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3785 netdev->mtu, new_mtu);
3786 netdev->mtu = new_mtu;
3788 if (netif_running(netdev))
3793 clear_bit(__IGB_RESETTING, &adapter->state);
3799 * igb_update_stats - Update the board statistics counters
3800 * @adapter: board private structure
3803 void igb_update_stats(struct igb_adapter *adapter)
3805 struct net_device *netdev = adapter->netdev;
3806 struct e1000_hw *hw = &adapter->hw;
3807 struct pci_dev *pdev = adapter->pdev;
3810 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3813 * Prevent stats update while adapter is being reset, or if the pci
3814 * connection is down.
3816 if (adapter->link_speed == 0)
3818 if (pci_channel_offline(pdev))
3821 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3822 adapter->stats.gprc += rd32(E1000_GPRC);
3823 adapter->stats.gorc += rd32(E1000_GORCL);
3824 rd32(E1000_GORCH); /* clear GORCL */
3825 adapter->stats.bprc += rd32(E1000_BPRC);
3826 adapter->stats.mprc += rd32(E1000_MPRC);
3827 adapter->stats.roc += rd32(E1000_ROC);
3829 adapter->stats.prc64 += rd32(E1000_PRC64);
3830 adapter->stats.prc127 += rd32(E1000_PRC127);
3831 adapter->stats.prc255 += rd32(E1000_PRC255);
3832 adapter->stats.prc511 += rd32(E1000_PRC511);
3833 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3834 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3835 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3836 adapter->stats.sec += rd32(E1000_SEC);
3838 adapter->stats.mpc += rd32(E1000_MPC);
3839 adapter->stats.scc += rd32(E1000_SCC);
3840 adapter->stats.ecol += rd32(E1000_ECOL);
3841 adapter->stats.mcc += rd32(E1000_MCC);
3842 adapter->stats.latecol += rd32(E1000_LATECOL);
3843 adapter->stats.dc += rd32(E1000_DC);
3844 adapter->stats.rlec += rd32(E1000_RLEC);
3845 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3846 adapter->stats.xontxc += rd32(E1000_XONTXC);
3847 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3848 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3849 adapter->stats.fcruc += rd32(E1000_FCRUC);
3850 adapter->stats.gptc += rd32(E1000_GPTC);
3851 adapter->stats.gotc += rd32(E1000_GOTCL);
3852 rd32(E1000_GOTCH); /* clear GOTCL */
3853 adapter->stats.rnbc += rd32(E1000_RNBC);
3854 adapter->stats.ruc += rd32(E1000_RUC);
3855 adapter->stats.rfc += rd32(E1000_RFC);
3856 adapter->stats.rjc += rd32(E1000_RJC);
3857 adapter->stats.tor += rd32(E1000_TORH);
3858 adapter->stats.tot += rd32(E1000_TOTH);
3859 adapter->stats.tpr += rd32(E1000_TPR);
3861 adapter->stats.ptc64 += rd32(E1000_PTC64);
3862 adapter->stats.ptc127 += rd32(E1000_PTC127);
3863 adapter->stats.ptc255 += rd32(E1000_PTC255);
3864 adapter->stats.ptc511 += rd32(E1000_PTC511);
3865 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3866 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3868 adapter->stats.mptc += rd32(E1000_MPTC);
3869 adapter->stats.bptc += rd32(E1000_BPTC);
3871 /* used for adaptive IFS */
3873 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3874 adapter->stats.tpt += hw->mac.tx_packet_delta;
3875 hw->mac.collision_delta = rd32(E1000_COLC);
3876 adapter->stats.colc += hw->mac.collision_delta;
3878 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3879 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3880 adapter->stats.tncrs += rd32(E1000_TNCRS);
3881 adapter->stats.tsctc += rd32(E1000_TSCTC);
3882 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3884 adapter->stats.iac += rd32(E1000_IAC);
3885 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3886 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3887 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3888 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3889 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3890 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3891 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3892 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3894 /* Fill out the OS statistics structure */
3895 netdev->stats.multicast = adapter->stats.mprc;
3896 netdev->stats.collisions = adapter->stats.colc;
3900 if (hw->mac.type != e1000_82575) {
3902 u64 rqdpc_total = 0;
3904 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3905 * Queue Drop Packet Count) stats only gets incremented, if
3906 * the DROP_EN but it set (in the SRRCTL register for that
3907 * queue). If DROP_EN bit is NOT set, then the some what
3908 * equivalent count is stored in RNBC (not per queue basis).
3909 * Also note the drop count is due to lack of available
3912 for (i = 0; i < adapter->num_rx_queues; i++) {
3913 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3914 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3915 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3917 netdev->stats.rx_fifo_errors = rqdpc_total;
3920 /* Note RNBC (Receive No Buffers Count) is an not an exact
3921 * drop count as the hardware FIFO might save the day. Thats
3922 * one of the reason for saving it in rx_fifo_errors, as its
3923 * potentially not a true drop.
3925 netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
3927 /* RLEC on some newer hardware can be incorrect so build
3928 * our own version based on RUC and ROC */
3929 netdev->stats.rx_errors = adapter->stats.rxerrc +
3930 adapter->stats.crcerrs + adapter->stats.algnerrc +
3931 adapter->stats.ruc + adapter->stats.roc +
3932 adapter->stats.cexterr;
3933 netdev->stats.rx_length_errors = adapter->stats.ruc +
3935 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3936 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3937 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3940 netdev->stats.tx_errors = adapter->stats.ecol +
3941 adapter->stats.latecol;
3942 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3943 netdev->stats.tx_window_errors = adapter->stats.latecol;
3944 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3946 /* Tx Dropped needs to be maintained elsewhere */
3949 if (hw->phy.media_type == e1000_media_type_copper) {
3950 if ((adapter->link_speed == SPEED_1000) &&
3951 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3952 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3953 adapter->phy_stats.idle_errors += phy_tmp;
3957 /* Management Stats */
3958 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3959 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3960 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3963 static irqreturn_t igb_msix_other(int irq, void *data)
3965 struct igb_adapter *adapter = data;
3966 struct e1000_hw *hw = &adapter->hw;
3967 u32 icr = rd32(E1000_ICR);
3968 /* reading ICR causes bit 31 of EICR to be cleared */
3970 if (icr & E1000_ICR_DOUTSYNC) {
3971 /* HW is reporting DMA is out of sync */
3972 adapter->stats.doosync++;
3975 /* Check for a mailbox event */
3976 if (icr & E1000_ICR_VMMB)
3977 igb_msg_task(adapter);
3979 if (icr & E1000_ICR_LSC) {
3980 hw->mac.get_link_status = 1;
3981 /* guard against interrupt when we're going down */
3982 if (!test_bit(__IGB_DOWN, &adapter->state))
3983 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3986 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3987 wr32(E1000_EIMS, adapter->eims_other);
3992 static void igb_write_itr(struct igb_q_vector *q_vector)
3994 u32 itr_val = q_vector->itr_val & 0x7FFC;
3996 if (!q_vector->set_itr)
4002 if (q_vector->itr_shift)
4003 itr_val |= itr_val << q_vector->itr_shift;
4005 itr_val |= 0x8000000;
4007 writel(itr_val, q_vector->itr_register);
4008 q_vector->set_itr = 0;
4011 static irqreturn_t igb_msix_ring(int irq, void *data)
4013 struct igb_q_vector *q_vector = data;
4015 /* Write the ITR value calculated from the previous interrupt. */
4016 igb_write_itr(q_vector);
4018 napi_schedule(&q_vector->napi);
4023 #ifdef CONFIG_IGB_DCA
4024 static void igb_update_dca(struct igb_q_vector *q_vector)
4026 struct igb_adapter *adapter = q_vector->adapter;
4027 struct e1000_hw *hw = &adapter->hw;
4028 int cpu = get_cpu();
4030 if (q_vector->cpu == cpu)
4033 if (q_vector->tx_ring) {
4034 int q = q_vector->tx_ring->reg_idx;
4035 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4036 if (hw->mac.type == e1000_82575) {
4037 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4038 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4040 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4041 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4042 E1000_DCA_TXCTRL_CPUID_SHIFT;
4044 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4045 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4047 if (q_vector->rx_ring) {
4048 int q = q_vector->rx_ring->reg_idx;
4049 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4050 if (hw->mac.type == e1000_82575) {
4051 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4052 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4054 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4055 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4056 E1000_DCA_RXCTRL_CPUID_SHIFT;
4058 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4059 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4060 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4061 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4063 q_vector->cpu = cpu;
4068 static void igb_setup_dca(struct igb_adapter *adapter)
4070 struct e1000_hw *hw = &adapter->hw;
4073 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4076 /* Always use CB2 mode, difference is masked in the CB driver. */
4077 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4079 for (i = 0; i < adapter->num_q_vectors; i++) {
4080 struct igb_q_vector *q_vector = adapter->q_vector[i];
4082 igb_update_dca(q_vector);
4086 static int __igb_notify_dca(struct device *dev, void *data)
4088 struct net_device *netdev = dev_get_drvdata(dev);
4089 struct igb_adapter *adapter = netdev_priv(netdev);
4090 struct e1000_hw *hw = &adapter->hw;
4091 unsigned long event = *(unsigned long *)data;
4094 case DCA_PROVIDER_ADD:
4095 /* if already enabled, don't do it again */
4096 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4098 /* Always use CB2 mode, difference is masked
4099 * in the CB driver. */
4100 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4101 if (dca_add_requester(dev) == 0) {
4102 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4103 dev_info(&adapter->pdev->dev, "DCA enabled\n");
4104 igb_setup_dca(adapter);
4107 /* Fall Through since DCA is disabled. */
4108 case DCA_PROVIDER_REMOVE:
4109 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4110 /* without this a class_device is left
4111 * hanging around in the sysfs model */
4112 dca_remove_requester(dev);
4113 dev_info(&adapter->pdev->dev, "DCA disabled\n");
4114 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4115 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4123 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4128 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4131 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4133 #endif /* CONFIG_IGB_DCA */
4135 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4137 struct e1000_hw *hw = &adapter->hw;
4141 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4142 ping = E1000_PF_CONTROL_MSG;
4143 if (adapter->vf_data[i].clear_to_send)
4144 ping |= E1000_VT_MSGTYPE_CTS;
4145 igb_write_mbx(hw, &ping, 1, i);
4149 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4150 u32 *msgbuf, u32 vf)
4152 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4153 u16 *hash_list = (u16 *)&msgbuf[1];
4154 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4157 /* only up to 30 hash values supported */
4161 /* salt away the number of multi cast addresses assigned
4162 * to this VF for later use to restore when the PF multi cast
4165 vf_data->num_vf_mc_hashes = n;
4167 /* VFs are limited to using the MTA hash table for their multicast
4169 for (i = 0; i < n; i++)
4170 vf_data->vf_mc_hashes[i] = hash_list[i];
4172 /* Flush and reset the mta with the new values */
4173 igb_set_rx_mode(adapter->netdev);
4178 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4180 struct e1000_hw *hw = &adapter->hw;
4181 struct vf_data_storage *vf_data;
4184 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4185 vf_data = &adapter->vf_data[i];
4186 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4187 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4191 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4193 struct e1000_hw *hw = &adapter->hw;
4194 u32 pool_mask, reg, vid;
4197 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4199 /* Find the vlan filter for this id */
4200 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4201 reg = rd32(E1000_VLVF(i));
4203 /* remove the vf from the pool */
4206 /* if pool is empty then remove entry from vfta */
4207 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4208 (reg & E1000_VLVF_VLANID_ENABLE)) {
4210 vid = reg & E1000_VLVF_VLANID_MASK;
4211 igb_vfta_set(hw, vid, false);
4214 wr32(E1000_VLVF(i), reg);
4217 adapter->vf_data[vf].vlans_enabled = 0;
4220 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4222 struct e1000_hw *hw = &adapter->hw;
4225 /* It is an error to call this function when VFs are not enabled */
4226 if (!adapter->vfs_allocated_count)
4229 /* Find the vlan filter for this id */
4230 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4231 reg = rd32(E1000_VLVF(i));
4232 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4233 vid == (reg & E1000_VLVF_VLANID_MASK))
4238 if (i == E1000_VLVF_ARRAY_SIZE) {
4239 /* Did not find a matching VLAN ID entry that was
4240 * enabled. Search for a free filter entry, i.e.
4241 * one without the enable bit set
4243 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4244 reg = rd32(E1000_VLVF(i));
4245 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4249 if (i < E1000_VLVF_ARRAY_SIZE) {
4250 /* Found an enabled/available entry */
4251 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4253 /* if !enabled we need to set this up in vfta */
4254 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4255 /* add VID to filter table, if bit already set
4256 * PF must have added it outside of table */
4257 if (igb_vfta_set(hw, vid, true))
4258 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4259 adapter->vfs_allocated_count);
4260 reg |= E1000_VLVF_VLANID_ENABLE;
4262 reg &= ~E1000_VLVF_VLANID_MASK;
4265 wr32(E1000_VLVF(i), reg);
4267 /* do not modify RLPML for PF devices */
4268 if (vf >= adapter->vfs_allocated_count)
4271 if (!adapter->vf_data[vf].vlans_enabled) {
4273 reg = rd32(E1000_VMOLR(vf));
4274 size = reg & E1000_VMOLR_RLPML_MASK;
4276 reg &= ~E1000_VMOLR_RLPML_MASK;
4278 wr32(E1000_VMOLR(vf), reg);
4280 adapter->vf_data[vf].vlans_enabled++;
4285 if (i < E1000_VLVF_ARRAY_SIZE) {
4286 /* remove vf from the pool */
4287 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4288 /* if pool is empty then remove entry from vfta */
4289 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4291 igb_vfta_set(hw, vid, false);
4293 wr32(E1000_VLVF(i), reg);
4295 /* do not modify RLPML for PF devices */
4296 if (vf >= adapter->vfs_allocated_count)
4299 adapter->vf_data[vf].vlans_enabled--;
4300 if (!adapter->vf_data[vf].vlans_enabled) {
4302 reg = rd32(E1000_VMOLR(vf));
4303 size = reg & E1000_VMOLR_RLPML_MASK;
4305 reg &= ~E1000_VMOLR_RLPML_MASK;
4307 wr32(E1000_VMOLR(vf), reg);
4315 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4317 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4318 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4320 return igb_vlvf_set(adapter, vid, add, vf);
4323 static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4325 struct e1000_hw *hw = &adapter->hw;
4327 /* disable mailbox functionality for vf */
4328 adapter->vf_data[vf].clear_to_send = false;
4330 /* reset offloads to defaults */
4331 igb_set_vmolr(hw, vf);
4333 /* reset vlans for device */
4334 igb_clear_vf_vfta(adapter, vf);
4336 /* reset multicast table array for vf */
4337 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4339 /* Flush and reset the mta with the new values */
4340 igb_set_rx_mode(adapter->netdev);
4343 static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4345 struct e1000_hw *hw = &adapter->hw;
4346 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4347 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4349 u8 *addr = (u8 *)(&msgbuf[1]);
4351 /* process all the same items cleared in a function level reset */
4352 igb_vf_reset_event(adapter, vf);
4354 /* set vf mac address */
4355 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4357 /* enable transmit and receive for vf */
4358 reg = rd32(E1000_VFTE);
4359 wr32(E1000_VFTE, reg | (1 << vf));
4360 reg = rd32(E1000_VFRE);
4361 wr32(E1000_VFRE, reg | (1 << vf));
4363 /* enable mailbox functionality for vf */
4364 adapter->vf_data[vf].clear_to_send = true;
4366 /* reply to reset with ack and vf mac address */
4367 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4368 memcpy(addr, vf_mac, 6);
4369 igb_write_mbx(hw, msgbuf, 3, vf);
4372 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4374 unsigned char *addr = (char *)&msg[1];
4377 if (is_valid_ether_addr(addr))
4378 err = igb_set_vf_mac(adapter, vf, addr);
4384 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4386 struct e1000_hw *hw = &adapter->hw;
4387 u32 msg = E1000_VT_MSGTYPE_NACK;
4389 /* if device isn't clear to send it shouldn't be reading either */
4390 if (!adapter->vf_data[vf].clear_to_send)
4391 igb_write_mbx(hw, &msg, 1, vf);
4395 static void igb_msg_task(struct igb_adapter *adapter)
4397 struct e1000_hw *hw = &adapter->hw;
4400 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4401 /* process any reset requests */
4402 if (!igb_check_for_rst(hw, vf)) {
4403 adapter->vf_data[vf].clear_to_send = false;
4404 igb_vf_reset_event(adapter, vf);
4407 /* process any messages pending */
4408 if (!igb_check_for_msg(hw, vf))
4409 igb_rcv_msg_from_vf(adapter, vf);
4411 /* process any acks */
4412 if (!igb_check_for_ack(hw, vf))
4413 igb_rcv_ack_from_vf(adapter, vf);
4418 static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4420 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4421 u32 msgbuf[mbx_size];
4422 struct e1000_hw *hw = &adapter->hw;
4425 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4428 dev_err(&adapter->pdev->dev,
4429 "Error receiving message from VF\n");
4431 /* this is a message we already processed, do nothing */
4432 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4436 * until the vf completes a reset it should not be
4437 * allowed to start any configuration.
4440 if (msgbuf[0] == E1000_VF_RESET) {
4441 igb_vf_reset_msg(adapter, vf);
4446 if (!adapter->vf_data[vf].clear_to_send) {
4447 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4448 igb_write_mbx(hw, msgbuf, 1, vf);
4452 switch ((msgbuf[0] & 0xFFFF)) {
4453 case E1000_VF_SET_MAC_ADDR:
4454 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4456 case E1000_VF_SET_MULTICAST:
4457 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4459 case E1000_VF_SET_LPE:
4460 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4462 case E1000_VF_SET_VLAN:
4463 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4466 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4471 /* notify the VF of the results of what it sent us */
4473 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4475 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4477 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4479 igb_write_mbx(hw, msgbuf, 1, vf);
4485 * igb_set_uta - Set unicast filter table address
4486 * @adapter: board private structure
4488 * The unicast table address is a register array of 32-bit registers.
4489 * The table is meant to be used in a way similar to how the MTA is used
4490 * however due to certain limitations in the hardware it is necessary to
4491 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4492 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4494 static void igb_set_uta(struct igb_adapter *adapter)
4496 struct e1000_hw *hw = &adapter->hw;
4499 /* The UTA table only exists on 82576 hardware and newer */
4500 if (hw->mac.type < e1000_82576)
4503 /* we only need to do this if VMDq is enabled */
4504 if (!adapter->vfs_allocated_count)
4507 for (i = 0; i < hw->mac.uta_reg_count; i++)
4508 array_wr32(E1000_UTA, i, ~0);
4512 * igb_intr_msi - Interrupt Handler
4513 * @irq: interrupt number
4514 * @data: pointer to a network interface device structure
4516 static irqreturn_t igb_intr_msi(int irq, void *data)
4518 struct igb_adapter *adapter = data;
4519 struct igb_q_vector *q_vector = adapter->q_vector[0];
4520 struct e1000_hw *hw = &adapter->hw;
4521 /* read ICR disables interrupts using IAM */
4522 u32 icr = rd32(E1000_ICR);
4524 igb_write_itr(q_vector);
4526 if (icr & E1000_ICR_DOUTSYNC) {
4527 /* HW is reporting DMA is out of sync */
4528 adapter->stats.doosync++;
4531 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4532 hw->mac.get_link_status = 1;
4533 if (!test_bit(__IGB_DOWN, &adapter->state))
4534 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4537 napi_schedule(&q_vector->napi);
4543 * igb_intr - Legacy Interrupt Handler
4544 * @irq: interrupt number
4545 * @data: pointer to a network interface device structure
4547 static irqreturn_t igb_intr(int irq, void *data)
4549 struct igb_adapter *adapter = data;
4550 struct igb_q_vector *q_vector = adapter->q_vector[0];
4551 struct e1000_hw *hw = &adapter->hw;
4552 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4553 * need for the IMC write */
4554 u32 icr = rd32(E1000_ICR);
4556 return IRQ_NONE; /* Not our interrupt */
4558 igb_write_itr(q_vector);
4560 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4561 * not set, then the adapter didn't send an interrupt */
4562 if (!(icr & E1000_ICR_INT_ASSERTED))
4565 if (icr & E1000_ICR_DOUTSYNC) {
4566 /* HW is reporting DMA is out of sync */
4567 adapter->stats.doosync++;
4570 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4571 hw->mac.get_link_status = 1;
4572 /* guard against interrupt when we're going down */
4573 if (!test_bit(__IGB_DOWN, &adapter->state))
4574 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4577 napi_schedule(&q_vector->napi);
4582 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4584 struct igb_adapter *adapter = q_vector->adapter;
4585 struct e1000_hw *hw = &adapter->hw;
4587 if (adapter->itr_setting & 3) {
4588 if (!adapter->msix_entries)
4589 igb_set_itr(adapter);
4591 igb_update_ring_itr(q_vector);
4594 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4595 if (adapter->msix_entries)
4596 wr32(E1000_EIMS, q_vector->eims_value);
4598 igb_irq_enable(adapter);
4603 * igb_poll - NAPI Rx polling callback
4604 * @napi: napi polling structure
4605 * @budget: count of how many packets we should handle
4607 static int igb_poll(struct napi_struct *napi, int budget)
4609 struct igb_q_vector *q_vector = container_of(napi,
4610 struct igb_q_vector,
4612 int tx_clean_complete = 1, work_done = 0;
4614 #ifdef CONFIG_IGB_DCA
4615 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4616 igb_update_dca(q_vector);
4618 if (q_vector->tx_ring)
4619 tx_clean_complete = igb_clean_tx_irq(q_vector);
4621 if (q_vector->rx_ring)
4622 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4624 if (!tx_clean_complete)
4627 /* If not enough Rx work done, exit the polling mode */
4628 if (work_done < budget) {
4629 napi_complete(napi);
4630 igb_ring_irq_enable(q_vector);
4637 * igb_hwtstamp - utility function which checks for TX time stamp
4638 * @adapter: board private structure
4639 * @skb: packet that was just sent
4641 * If we were asked to do hardware stamping and such a time stamp is
4642 * available, then it must have been for this skb here because we only
4643 * allow only one such packet into the queue.
4645 static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4647 union skb_shared_tx *shtx = skb_tx(skb);
4648 struct e1000_hw *hw = &adapter->hw;
4650 if (unlikely(shtx->hardware)) {
4651 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4653 u64 regval = rd32(E1000_TXSTMPL);
4655 struct skb_shared_hwtstamps shhwtstamps;
4657 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4658 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4659 ns = timecounter_cyc2time(&adapter->clock,
4661 timecompare_update(&adapter->compare, ns);
4662 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4663 shhwtstamps.syststamp =
4664 timecompare_transform(&adapter->compare, ns);
4665 skb_tstamp_tx(skb, &shhwtstamps);
4671 * igb_clean_tx_irq - Reclaim resources after transmit completes
4672 * @q_vector: pointer to q_vector containing needed info
4673 * returns true if ring is completely cleaned
4675 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4677 struct igb_adapter *adapter = q_vector->adapter;
4678 struct igb_ring *tx_ring = q_vector->tx_ring;
4679 struct net_device *netdev = adapter->netdev;
4680 struct e1000_hw *hw = &adapter->hw;
4681 struct igb_buffer *buffer_info;
4682 struct sk_buff *skb;
4683 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4684 unsigned int total_bytes = 0, total_packets = 0;
4685 unsigned int i, eop, count = 0;
4686 bool cleaned = false;
4688 i = tx_ring->next_to_clean;
4689 eop = tx_ring->buffer_info[i].next_to_watch;
4690 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4692 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4693 (count < tx_ring->count)) {
4694 for (cleaned = false; !cleaned; count++) {
4695 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4696 buffer_info = &tx_ring->buffer_info[i];
4697 cleaned = (i == eop);
4698 skb = buffer_info->skb;
4701 unsigned int segs, bytecount;
4702 /* gso_segs is currently only valid for tcp */
4703 segs = skb_shinfo(skb)->gso_segs ?: 1;
4704 /* multiply data chunks by size of headers */
4705 bytecount = ((segs - 1) * skb_headlen(skb)) +
4707 total_packets += segs;
4708 total_bytes += bytecount;
4710 igb_tx_hwtstamp(adapter, skb);
4713 igb_unmap_and_free_tx_resource(adapter, buffer_info);
4714 tx_desc->wb.status = 0;
4717 if (i == tx_ring->count)
4720 eop = tx_ring->buffer_info[i].next_to_watch;
4721 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4724 tx_ring->next_to_clean = i;
4726 if (unlikely(count &&
4727 netif_carrier_ok(netdev) &&
4728 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
4729 /* Make sure that anybody stopping the queue after this
4730 * sees the new next_to_clean.
4733 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4734 !(test_bit(__IGB_DOWN, &adapter->state))) {
4735 netif_wake_subqueue(netdev, tx_ring->queue_index);
4736 ++adapter->restart_queue;
4740 if (tx_ring->detect_tx_hung) {
4741 /* Detect a transmit hang in hardware, this serializes the
4742 * check with the clearing of time_stamp and movement of i */
4743 tx_ring->detect_tx_hung = false;
4744 if (tx_ring->buffer_info[i].time_stamp &&
4745 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4746 (adapter->tx_timeout_factor * HZ))
4747 && !(rd32(E1000_STATUS) &
4748 E1000_STATUS_TXOFF)) {
4750 /* detected Tx unit hang */
4751 dev_err(&adapter->pdev->dev,
4752 "Detected Tx Unit Hang\n"
4756 " next_to_use <%x>\n"
4757 " next_to_clean <%x>\n"
4758 "buffer_info[next_to_clean]\n"
4759 " time_stamp <%lx>\n"
4760 " next_to_watch <%x>\n"
4762 " desc.status <%x>\n",
4763 tx_ring->queue_index,
4764 readl(adapter->hw.hw_addr + tx_ring->head),
4765 readl(adapter->hw.hw_addr + tx_ring->tail),
4766 tx_ring->next_to_use,
4767 tx_ring->next_to_clean,
4768 tx_ring->buffer_info[i].time_stamp,
4771 eop_desc->wb.status);
4772 netif_stop_subqueue(netdev, tx_ring->queue_index);
4775 tx_ring->total_bytes += total_bytes;
4776 tx_ring->total_packets += total_packets;
4777 tx_ring->tx_stats.bytes += total_bytes;
4778 tx_ring->tx_stats.packets += total_packets;
4779 netdev->stats.tx_bytes += total_bytes;
4780 netdev->stats.tx_packets += total_packets;
4781 return (count < tx_ring->count);
4785 * igb_receive_skb - helper function to handle rx indications
4786 * @q_vector: structure containing interrupt and ring information
4787 * @skb: packet to send up
4788 * @vlan_tag: vlan tag for packet
4790 static void igb_receive_skb(struct igb_q_vector *q_vector,
4791 struct sk_buff *skb,
4794 struct igb_adapter *adapter = q_vector->adapter;
4797 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4800 napi_gro_receive(&q_vector->napi, skb);
4803 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4804 u32 status_err, struct sk_buff *skb)
4806 skb->ip_summed = CHECKSUM_NONE;
4808 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4809 if ((status_err & E1000_RXD_STAT_IXSM) ||
4810 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
4812 /* TCP/UDP checksum error bit is set */
4814 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4816 * work around errata with sctp packets where the TCPE aka
4817 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4818 * packets, (aka let the stack check the crc32c)
4820 if (!((adapter->hw.mac.type == e1000_82576) &&
4822 adapter->hw_csum_err++;
4823 /* let the stack verify checksum errors */
4826 /* It must be a TCP or UDP packet with a valid checksum */
4827 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4828 skb->ip_summed = CHECKSUM_UNNECESSARY;
4830 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
4833 static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4834 union e1000_adv_rx_desc *rx_desc)
4836 /* HW will not DMA in data larger than the given buffer, even if it
4837 * parses the (NFS, of course) header to be larger. In that case, it
4838 * fills the header buffer and spills the rest into the page.
4840 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4841 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4842 if (hlen > adapter->rx_buffer_len)
4843 hlen = adapter->rx_buffer_len;
4847 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4848 int *work_done, int budget)
4850 struct igb_adapter *adapter = q_vector->adapter;
4851 struct net_device *netdev = adapter->netdev;
4852 struct igb_ring *rx_ring = q_vector->rx_ring;
4853 struct e1000_hw *hw = &adapter->hw;
4854 struct pci_dev *pdev = adapter->pdev;
4855 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4856 struct igb_buffer *buffer_info , *next_buffer;
4857 struct sk_buff *skb;
4858 bool cleaned = false;
4859 int cleaned_count = 0;
4860 unsigned int total_bytes = 0, total_packets = 0;
4866 i = rx_ring->next_to_clean;
4867 buffer_info = &rx_ring->buffer_info[i];
4868 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4869 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4871 while (staterr & E1000_RXD_STAT_DD) {
4872 if (*work_done >= budget)
4876 skb = buffer_info->skb;
4877 prefetch(skb->data - NET_IP_ALIGN);
4878 buffer_info->skb = NULL;
4881 if (i == rx_ring->count)
4883 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4885 next_buffer = &rx_ring->buffer_info[i];
4887 length = le16_to_cpu(rx_desc->wb.upper.length);
4891 if (buffer_info->dma) {
4892 pci_unmap_single(pdev, buffer_info->dma,
4893 adapter->rx_buffer_len,
4894 PCI_DMA_FROMDEVICE);
4895 buffer_info->dma = 0;
4896 if (adapter->rx_buffer_len >= IGB_RXBUFFER_1024) {
4897 skb_put(skb, length);
4900 skb_put(skb, igb_get_hlen(adapter, rx_desc));
4904 pci_unmap_page(pdev, buffer_info->page_dma,
4905 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
4906 buffer_info->page_dma = 0;
4908 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4910 buffer_info->page_offset,
4913 if (page_count(buffer_info->page) != 1)
4914 buffer_info->page = NULL;
4916 get_page(buffer_info->page);
4919 skb->data_len += length;
4921 skb->truesize += length;
4924 if (!(staterr & E1000_RXD_STAT_EOP)) {
4925 buffer_info->skb = next_buffer->skb;
4926 buffer_info->dma = next_buffer->dma;
4927 next_buffer->skb = skb;
4928 next_buffer->dma = 0;
4933 * If this bit is set, then the RX registers contain
4934 * the time stamp. No other packet will be time
4935 * stamped until we read these registers, so read the
4936 * registers to make them available again. Because
4937 * only one packet can be time stamped at a time, we
4938 * know that the register values must belong to this
4939 * one here and therefore we don't need to compare
4940 * any of the additional attributes stored for it.
4942 * If nothing went wrong, then it should have a
4943 * skb_shared_tx that we can turn into a
4944 * skb_shared_hwtstamps.
4946 * TODO: can time stamping be triggered (thus locking
4947 * the registers) without the packet reaching this point
4948 * here? In that case RX time stamping would get stuck.
4950 * TODO: in "time stamp all packets" mode this bit is
4951 * not set. Need a global flag for this mode and then
4952 * always read the registers. Cannot be done without
4955 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4958 struct skb_shared_hwtstamps *shhwtstamps =
4961 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4962 "igb: no RX time stamp available for time stamped packet");
4963 regval = rd32(E1000_RXSTMPL);
4964 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4965 ns = timecounter_cyc2time(&adapter->clock, regval);
4966 timecompare_update(&adapter->compare, ns);
4967 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4968 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4969 shhwtstamps->syststamp =
4970 timecompare_transform(&adapter->compare, ns);
4973 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4974 dev_kfree_skb_irq(skb);
4978 total_bytes += skb->len;
4981 igb_rx_checksum_adv(adapter, staterr, skb);
4983 skb->protocol = eth_type_trans(skb, netdev);
4984 skb_record_rx_queue(skb, rx_ring->queue_index);
4986 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
4987 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
4989 igb_receive_skb(q_vector, skb, vlan_tag);
4992 rx_desc->wb.upper.status_error = 0;
4994 /* return some buffers to hardware, one at a time is too slow */
4995 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
4996 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5000 /* use prefetched values */
5002 buffer_info = next_buffer;
5003 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5006 rx_ring->next_to_clean = i;
5007 cleaned_count = igb_desc_unused(rx_ring);
5010 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5012 rx_ring->total_packets += total_packets;
5013 rx_ring->total_bytes += total_bytes;
5014 rx_ring->rx_stats.packets += total_packets;
5015 rx_ring->rx_stats.bytes += total_bytes;
5016 netdev->stats.rx_bytes += total_bytes;
5017 netdev->stats.rx_packets += total_packets;
5022 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5023 * @adapter: address of board private structure
5025 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
5028 struct igb_adapter *adapter = rx_ring->q_vector->adapter;
5029 struct net_device *netdev = adapter->netdev;
5030 struct pci_dev *pdev = adapter->pdev;
5031 union e1000_adv_rx_desc *rx_desc;
5032 struct igb_buffer *buffer_info;
5033 struct sk_buff *skb;
5037 i = rx_ring->next_to_use;
5038 buffer_info = &rx_ring->buffer_info[i];
5040 bufsz = adapter->rx_buffer_len;
5042 while (cleaned_count--) {
5043 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5045 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5046 if (!buffer_info->page) {
5047 buffer_info->page = alloc_page(GFP_ATOMIC);
5048 if (!buffer_info->page) {
5049 adapter->alloc_rx_buff_failed++;
5052 buffer_info->page_offset = 0;
5054 buffer_info->page_offset ^= PAGE_SIZE / 2;
5056 buffer_info->page_dma =
5057 pci_map_page(pdev, buffer_info->page,
5058 buffer_info->page_offset,
5060 PCI_DMA_FROMDEVICE);
5063 if (!buffer_info->skb) {
5064 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5066 adapter->alloc_rx_buff_failed++;
5070 buffer_info->skb = skb;
5071 buffer_info->dma = pci_map_single(pdev, skb->data,
5073 PCI_DMA_FROMDEVICE);
5075 /* Refresh the desc even if buffer_addrs didn't change because
5076 * each write-back erases this info. */
5077 if (bufsz < IGB_RXBUFFER_1024) {
5078 rx_desc->read.pkt_addr =
5079 cpu_to_le64(buffer_info->page_dma);
5080 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5082 rx_desc->read.pkt_addr =
5083 cpu_to_le64(buffer_info->dma);
5084 rx_desc->read.hdr_addr = 0;
5088 if (i == rx_ring->count)
5090 buffer_info = &rx_ring->buffer_info[i];
5094 if (rx_ring->next_to_use != i) {
5095 rx_ring->next_to_use = i;
5097 i = (rx_ring->count - 1);
5101 /* Force memory writes to complete before letting h/w
5102 * know there are new descriptors to fetch. (Only
5103 * applicable for weak-ordered memory model archs,
5104 * such as IA-64). */
5106 writel(i, adapter->hw.hw_addr + rx_ring->tail);
5116 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5118 struct igb_adapter *adapter = netdev_priv(netdev);
5119 struct mii_ioctl_data *data = if_mii(ifr);
5121 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5126 data->phy_id = adapter->hw.phy.addr;
5129 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5141 * igb_hwtstamp_ioctl - control hardware time stamping
5146 * Outgoing time stamping can be enabled and disabled. Play nice and
5147 * disable it when requested, although it shouldn't case any overhead
5148 * when no packet needs it. At most one packet in the queue may be
5149 * marked for time stamping, otherwise it would be impossible to tell
5150 * for sure to which packet the hardware time stamp belongs.
5152 * Incoming time stamping has to be configured via the hardware
5153 * filters. Not all combinations are supported, in particular event
5154 * type has to be specified. Matching the kind of event packet is
5155 * not supported, with the exception of "all V2 events regardless of
5159 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5160 struct ifreq *ifr, int cmd)
5162 struct igb_adapter *adapter = netdev_priv(netdev);
5163 struct e1000_hw *hw = &adapter->hw;
5164 struct hwtstamp_config config;
5165 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5166 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
5167 u32 tsync_rx_ctl_type = 0;
5168 u32 tsync_rx_cfg = 0;
5171 short port = 319; /* PTP */
5174 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5177 /* reserved for future extensions */
5181 switch (config.tx_type) {
5182 case HWTSTAMP_TX_OFF:
5183 tsync_tx_ctl_bit = 0;
5185 case HWTSTAMP_TX_ON:
5186 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5192 switch (config.rx_filter) {
5193 case HWTSTAMP_FILTER_NONE:
5194 tsync_rx_ctl_bit = 0;
5196 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5197 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5198 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5199 case HWTSTAMP_FILTER_ALL:
5201 * register TSYNCRXCFG must be set, therefore it is not
5202 * possible to time stamp both Sync and Delay_Req messages
5203 * => fall back to time stamping all packets
5205 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
5206 config.rx_filter = HWTSTAMP_FILTER_ALL;
5208 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5209 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5210 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5213 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5214 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5215 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5218 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5219 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5220 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5221 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5224 config.rx_filter = HWTSTAMP_FILTER_SOME;
5226 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5227 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5228 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5229 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5232 config.rx_filter = HWTSTAMP_FILTER_SOME;
5234 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5235 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5236 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5237 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5238 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5245 /* enable/disable TX */
5246 regval = rd32(E1000_TSYNCTXCTL);
5247 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
5248 wr32(E1000_TSYNCTXCTL, regval);
5250 /* enable/disable RX, define which PTP packets are time stamped */
5251 regval = rd32(E1000_TSYNCRXCTL);
5252 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
5253 regval = (regval & ~0xE) | tsync_rx_ctl_type;
5254 wr32(E1000_TSYNCRXCTL, regval);
5255 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5258 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
5259 * (Ethertype to filter on)
5260 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5261 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5263 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5265 /* L4 Queue Filter[0]: only filter by source and destination port */
5266 wr32(E1000_SPQF0, htons(port));
5267 wr32(E1000_IMIREXT(0), is_l4 ?
5268 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5269 wr32(E1000_IMIR(0), is_l4 ?
5271 | (0<<16) /* immediate interrupt disabled */
5272 | 0 /* (1<<17) bit cleared: do not bypass
5273 destination port check */)
5275 wr32(E1000_FTQF0, is_l4 ?
5277 | (1<<15) /* VF not compared */
5278 | (1<<27) /* Enable Timestamping */
5279 | (7<<28) /* only source port filter enabled,
5280 source/target address and protocol
5282 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5287 adapter->hwtstamp_config = config;
5289 /* clear TX/RX time stamp registers, just to be sure */
5290 regval = rd32(E1000_TXSTMPH);
5291 regval = rd32(E1000_RXSTMPH);
5293 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5303 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5309 return igb_mii_ioctl(netdev, ifr, cmd);
5311 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5317 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5319 struct igb_adapter *adapter = hw->back;
5322 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5324 return -E1000_ERR_CONFIG;
5326 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5331 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5333 struct igb_adapter *adapter = hw->back;
5336 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5338 return -E1000_ERR_CONFIG;
5340 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5345 static void igb_vlan_rx_register(struct net_device *netdev,
5346 struct vlan_group *grp)
5348 struct igb_adapter *adapter = netdev_priv(netdev);
5349 struct e1000_hw *hw = &adapter->hw;
5352 igb_irq_disable(adapter);
5353 adapter->vlgrp = grp;
5356 /* enable VLAN tag insert/strip */
5357 ctrl = rd32(E1000_CTRL);
5358 ctrl |= E1000_CTRL_VME;
5359 wr32(E1000_CTRL, ctrl);
5361 /* enable VLAN receive filtering */
5362 rctl = rd32(E1000_RCTL);
5363 rctl &= ~E1000_RCTL_CFIEN;
5364 wr32(E1000_RCTL, rctl);
5365 igb_update_mng_vlan(adapter);
5367 /* disable VLAN tag insert/strip */
5368 ctrl = rd32(E1000_CTRL);
5369 ctrl &= ~E1000_CTRL_VME;
5370 wr32(E1000_CTRL, ctrl);
5372 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5373 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5374 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5378 igb_rlpml_set(adapter);
5380 if (!test_bit(__IGB_DOWN, &adapter->state))
5381 igb_irq_enable(adapter);
5384 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5386 struct igb_adapter *adapter = netdev_priv(netdev);
5387 struct e1000_hw *hw = &adapter->hw;
5388 int pf_id = adapter->vfs_allocated_count;
5390 if ((hw->mng_cookie.status &
5391 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5392 (vid == adapter->mng_vlan_id))
5395 /* add vid to vlvf if sr-iov is enabled,
5396 * if that fails add directly to filter table */
5397 if (igb_vlvf_set(adapter, vid, true, pf_id))
5398 igb_vfta_set(hw, vid, true);
5402 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5404 struct igb_adapter *adapter = netdev_priv(netdev);
5405 struct e1000_hw *hw = &adapter->hw;
5406 int pf_id = adapter->vfs_allocated_count;
5408 igb_irq_disable(adapter);
5409 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5411 if (!test_bit(__IGB_DOWN, &adapter->state))
5412 igb_irq_enable(adapter);
5414 if ((adapter->hw.mng_cookie.status &
5415 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5416 (vid == adapter->mng_vlan_id)) {
5417 /* release control to f/w */
5418 igb_release_hw_control(adapter);
5422 /* remove vid from vlvf if sr-iov is enabled,
5423 * if not in vlvf remove from vfta */
5424 if (igb_vlvf_set(adapter, vid, false, pf_id))
5425 igb_vfta_set(hw, vid, false);
5428 static void igb_restore_vlan(struct igb_adapter *adapter)
5430 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5432 if (adapter->vlgrp) {
5434 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5435 if (!vlan_group_get_device(adapter->vlgrp, vid))
5437 igb_vlan_rx_add_vid(adapter->netdev, vid);
5442 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5444 struct e1000_mac_info *mac = &adapter->hw.mac;
5449 case SPEED_10 + DUPLEX_HALF:
5450 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5452 case SPEED_10 + DUPLEX_FULL:
5453 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5455 case SPEED_100 + DUPLEX_HALF:
5456 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5458 case SPEED_100 + DUPLEX_FULL:
5459 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5461 case SPEED_1000 + DUPLEX_FULL:
5463 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5465 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5467 dev_err(&adapter->pdev->dev,
5468 "Unsupported Speed/Duplex configuration\n");
5474 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5476 struct net_device *netdev = pci_get_drvdata(pdev);
5477 struct igb_adapter *adapter = netdev_priv(netdev);
5478 struct e1000_hw *hw = &adapter->hw;
5479 u32 ctrl, rctl, status;
5480 u32 wufc = adapter->wol;
5485 netif_device_detach(netdev);
5487 if (netif_running(netdev))
5490 igb_clear_interrupt_scheme(adapter);
5493 retval = pci_save_state(pdev);
5498 status = rd32(E1000_STATUS);
5499 if (status & E1000_STATUS_LU)
5500 wufc &= ~E1000_WUFC_LNKC;
5503 igb_setup_rctl(adapter);
5504 igb_set_rx_mode(netdev);
5506 /* turn on all-multi mode if wake on multicast is enabled */
5507 if (wufc & E1000_WUFC_MC) {
5508 rctl = rd32(E1000_RCTL);
5509 rctl |= E1000_RCTL_MPE;
5510 wr32(E1000_RCTL, rctl);
5513 ctrl = rd32(E1000_CTRL);
5514 /* advertise wake from D3Cold */
5515 #define E1000_CTRL_ADVD3WUC 0x00100000
5516 /* phy power management enable */
5517 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5518 ctrl |= E1000_CTRL_ADVD3WUC;
5519 wr32(E1000_CTRL, ctrl);
5521 /* Allow time for pending master requests to run */
5522 igb_disable_pcie_master(&adapter->hw);
5524 wr32(E1000_WUC, E1000_WUC_PME_EN);
5525 wr32(E1000_WUFC, wufc);
5528 wr32(E1000_WUFC, 0);
5531 *enable_wake = wufc || adapter->en_mng_pt;
5533 igb_shutdown_serdes_link_82575(hw);
5535 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5536 * would have already happened in close and is redundant. */
5537 igb_release_hw_control(adapter);
5539 pci_disable_device(pdev);
5545 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5550 retval = __igb_shutdown(pdev, &wake);
5555 pci_prepare_to_sleep(pdev);
5557 pci_wake_from_d3(pdev, false);
5558 pci_set_power_state(pdev, PCI_D3hot);
5564 static int igb_resume(struct pci_dev *pdev)
5566 struct net_device *netdev = pci_get_drvdata(pdev);
5567 struct igb_adapter *adapter = netdev_priv(netdev);
5568 struct e1000_hw *hw = &adapter->hw;
5571 pci_set_power_state(pdev, PCI_D0);
5572 pci_restore_state(pdev);
5574 err = pci_enable_device_mem(pdev);
5577 "igb: Cannot enable PCI device from suspend\n");
5580 pci_set_master(pdev);
5582 pci_enable_wake(pdev, PCI_D3hot, 0);
5583 pci_enable_wake(pdev, PCI_D3cold, 0);
5585 if (igb_init_interrupt_scheme(adapter)) {
5586 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5590 /* e1000_power_up_phy(adapter); */
5594 /* let the f/w know that the h/w is now under the control of the
5596 igb_get_hw_control(adapter);
5598 wr32(E1000_WUS, ~0);
5600 if (netif_running(netdev)) {
5601 err = igb_open(netdev);
5606 netif_device_attach(netdev);
5612 static void igb_shutdown(struct pci_dev *pdev)
5616 __igb_shutdown(pdev, &wake);
5618 if (system_state == SYSTEM_POWER_OFF) {
5619 pci_wake_from_d3(pdev, wake);
5620 pci_set_power_state(pdev, PCI_D3hot);
5624 #ifdef CONFIG_NET_POLL_CONTROLLER
5626 * Polling 'interrupt' - used by things like netconsole to send skbs
5627 * without having to re-enable interrupts. It's not called while
5628 * the interrupt routine is executing.
5630 static void igb_netpoll(struct net_device *netdev)
5632 struct igb_adapter *adapter = netdev_priv(netdev);
5633 struct e1000_hw *hw = &adapter->hw;
5636 if (!adapter->msix_entries) {
5637 struct igb_q_vector *q_vector = adapter->q_vector[0];
5638 igb_irq_disable(adapter);
5639 napi_schedule(&q_vector->napi);
5643 for (i = 0; i < adapter->num_q_vectors; i++) {
5644 struct igb_q_vector *q_vector = adapter->q_vector[i];
5645 wr32(E1000_EIMC, q_vector->eims_value);
5646 napi_schedule(&q_vector->napi);
5649 #endif /* CONFIG_NET_POLL_CONTROLLER */
5652 * igb_io_error_detected - called when PCI error is detected
5653 * @pdev: Pointer to PCI device
5654 * @state: The current pci connection state
5656 * This function is called after a PCI bus error affecting
5657 * this device has been detected.
5659 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5660 pci_channel_state_t state)
5662 struct net_device *netdev = pci_get_drvdata(pdev);
5663 struct igb_adapter *adapter = netdev_priv(netdev);
5665 netif_device_detach(netdev);
5667 if (state == pci_channel_io_perm_failure)
5668 return PCI_ERS_RESULT_DISCONNECT;
5670 if (netif_running(netdev))
5672 pci_disable_device(pdev);
5674 /* Request a slot slot reset. */
5675 return PCI_ERS_RESULT_NEED_RESET;
5679 * igb_io_slot_reset - called after the pci bus has been reset.
5680 * @pdev: Pointer to PCI device
5682 * Restart the card from scratch, as if from a cold-boot. Implementation
5683 * resembles the first-half of the igb_resume routine.
5685 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5687 struct net_device *netdev = pci_get_drvdata(pdev);
5688 struct igb_adapter *adapter = netdev_priv(netdev);
5689 struct e1000_hw *hw = &adapter->hw;
5690 pci_ers_result_t result;
5693 if (pci_enable_device_mem(pdev)) {
5695 "Cannot re-enable PCI device after reset.\n");
5696 result = PCI_ERS_RESULT_DISCONNECT;
5698 pci_set_master(pdev);
5699 pci_restore_state(pdev);
5701 pci_enable_wake(pdev, PCI_D3hot, 0);
5702 pci_enable_wake(pdev, PCI_D3cold, 0);
5705 wr32(E1000_WUS, ~0);
5706 result = PCI_ERS_RESULT_RECOVERED;
5709 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5711 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5712 "failed 0x%0x\n", err);
5713 /* non-fatal, continue */
5720 * igb_io_resume - called when traffic can start flowing again.
5721 * @pdev: Pointer to PCI device
5723 * This callback is called when the error recovery driver tells us that
5724 * its OK to resume normal operation. Implementation resembles the
5725 * second-half of the igb_resume routine.
5727 static void igb_io_resume(struct pci_dev *pdev)
5729 struct net_device *netdev = pci_get_drvdata(pdev);
5730 struct igb_adapter *adapter = netdev_priv(netdev);
5732 if (netif_running(netdev)) {
5733 if (igb_up(adapter)) {
5734 dev_err(&pdev->dev, "igb_up failed after reset\n");
5739 netif_device_attach(netdev);
5741 /* let the f/w know that the h/w is now under the control of the
5743 igb_get_hw_control(adapter);
5746 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5749 u32 rar_low, rar_high;
5750 struct e1000_hw *hw = &adapter->hw;
5752 /* HW expects these in little endian so we reverse the byte order
5753 * from network order (big endian) to little endian
5755 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5756 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5757 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5759 /* Indicate to hardware the Address is Valid. */
5760 rar_high |= E1000_RAH_AV;
5762 if (hw->mac.type == e1000_82575)
5763 rar_high |= E1000_RAH_POOL_1 * qsel;
5765 rar_high |= E1000_RAH_POOL_1 << qsel;
5767 wr32(E1000_RAL(index), rar_low);
5769 wr32(E1000_RAH(index), rar_high);
5773 static int igb_set_vf_mac(struct igb_adapter *adapter,
5774 int vf, unsigned char *mac_addr)
5776 struct e1000_hw *hw = &adapter->hw;
5777 /* VF MAC addresses start at end of receive addresses and moves
5778 * torwards the first, as a result a collision should not be possible */
5779 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5781 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5783 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5788 static void igb_vmm_control(struct igb_adapter *adapter)
5790 struct e1000_hw *hw = &adapter->hw;
5793 if (!adapter->vfs_allocated_count)
5796 /* VF's need PF reset indication before they
5797 * can send/receive mail */
5798 reg_data = rd32(E1000_CTRL_EXT);
5799 reg_data |= E1000_CTRL_EXT_PFRSTD;
5800 wr32(E1000_CTRL_EXT, reg_data);
5802 igb_vmdq_set_loopback_pf(hw, true);
5803 igb_vmdq_set_replication_pf(hw, true);