1 /*******************************************************************************
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
33 * - Make needlessly global code static -- bunk@stusta.de
34 * - ethtool cleanup -- shemminger@osdl.org
35 * - Support for MODULE_VERSION -- linville@tuxdriver.com
36 * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
38 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
39 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
41 * - reset buffer_info->dma in Tx resource cleanup logic
43 * - sparse cleanup - shemminger@osdl.org
44 * - fix tx resource cleanup logic
47 char ixgb_driver_name[] = "ixgb";
48 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
50 #ifndef CONFIG_IXGB_NAPI
53 #define DRIVERNAPI "-NAPI"
55 #define DRV_VERSION "1.0.104-k4"DRIVERNAPI
56 char ixgb_driver_version[] = DRV_VERSION;
57 static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
59 /* ixgb_pci_tbl - PCI Device ID Table
61 * Wildcard entries (PCI_ANY_ID) should come last
62 * Last entry must be all 0s
64 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
65 * Class, Class Mask, private data (not used) }
67 static struct pci_device_id ixgb_pci_tbl[] = {
68 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
71 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
77 /* required last entry */
81 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
83 /* Local Function Prototypes */
85 int ixgb_up(struct ixgb_adapter *adapter);
86 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
87 void ixgb_reset(struct ixgb_adapter *adapter);
88 int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
89 int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
90 void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
91 void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
92 void ixgb_update_stats(struct ixgb_adapter *adapter);
94 static int ixgb_init_module(void);
95 static void ixgb_exit_module(void);
96 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
97 static void __devexit ixgb_remove(struct pci_dev *pdev);
98 static int ixgb_sw_init(struct ixgb_adapter *adapter);
99 static int ixgb_open(struct net_device *netdev);
100 static int ixgb_close(struct net_device *netdev);
101 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
102 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
103 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
104 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
105 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
106 static void ixgb_set_multi(struct net_device *netdev);
107 static void ixgb_watchdog(unsigned long data);
108 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
109 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
110 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
111 static int ixgb_set_mac(struct net_device *netdev, void *p);
112 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
113 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
115 #ifdef CONFIG_IXGB_NAPI
116 static int ixgb_clean(struct net_device *netdev, int *budget);
117 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
118 int *work_done, int work_to_do);
120 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
122 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
123 void ixgb_set_ethtool_ops(struct net_device *netdev);
124 static void ixgb_tx_timeout(struct net_device *dev);
125 static void ixgb_tx_timeout_task(struct net_device *dev);
126 static void ixgb_vlan_rx_register(struct net_device *netdev,
127 struct vlan_group *grp);
128 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
129 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
130 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
132 #ifdef CONFIG_NET_POLL_CONTROLLER
133 /* for netdump / net console */
134 static void ixgb_netpoll(struct net_device *dev);
137 /* Exported from other modules */
139 extern void ixgb_check_options(struct ixgb_adapter *adapter);
141 static struct pci_driver ixgb_driver = {
142 .name = ixgb_driver_name,
143 .id_table = ixgb_pci_tbl,
145 .remove = __devexit_p(ixgb_remove),
148 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
149 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION);
153 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
154 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
155 module_param(debug, int, 0);
156 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
158 /* some defines for controlling descriptor fetches in h/w */
159 #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
160 #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
162 #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
163 * is pushed this many descriptors
167 * ixgb_init_module - Driver Registration Routine
169 * ixgb_init_module is the first routine called when the driver is
170 * loaded. All it does is register with the PCI subsystem.
174 ixgb_init_module(void)
176 printk(KERN_INFO "%s - version %s\n",
177 ixgb_driver_string, ixgb_driver_version);
179 printk(KERN_INFO "%s\n", ixgb_copyright);
181 return pci_module_init(&ixgb_driver);
184 module_init(ixgb_init_module);
187 * ixgb_exit_module - Driver Exit Cleanup Routine
189 * ixgb_exit_module is called just before the driver is removed
194 ixgb_exit_module(void)
196 pci_unregister_driver(&ixgb_driver);
199 module_exit(ixgb_exit_module);
202 * ixgb_irq_disable - Mask off interrupt generation on the NIC
203 * @adapter: board private structure
207 ixgb_irq_disable(struct ixgb_adapter *adapter)
209 atomic_inc(&adapter->irq_sem);
210 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
211 IXGB_WRITE_FLUSH(&adapter->hw);
212 synchronize_irq(adapter->pdev->irq);
216 * ixgb_irq_enable - Enable default interrupt generation settings
217 * @adapter: board private structure
221 ixgb_irq_enable(struct ixgb_adapter *adapter)
223 if(atomic_dec_and_test(&adapter->irq_sem)) {
224 IXGB_WRITE_REG(&adapter->hw, IMS,
225 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
227 IXGB_WRITE_FLUSH(&adapter->hw);
232 ixgb_up(struct ixgb_adapter *adapter)
234 struct net_device *netdev = adapter->netdev;
236 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
237 struct ixgb_hw *hw = &adapter->hw;
239 /* hardware has been reset, we need to reload some things */
241 ixgb_set_multi(netdev);
243 ixgb_restore_vlan(adapter);
245 ixgb_configure_tx(adapter);
246 ixgb_setup_rctl(adapter);
247 ixgb_configure_rx(adapter);
248 ixgb_alloc_rx_buffers(adapter);
250 /* disable interrupts and get the hardware into a known state */
251 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
253 #ifdef CONFIG_PCI_MSI
255 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
256 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
257 adapter->have_msi = TRUE;
260 adapter->have_msi = FALSE;
261 else if((err = pci_enable_msi(adapter->pdev))) {
263 "Unable to allocate MSI interrupt Error: %d\n", err);
264 adapter->have_msi = FALSE;
265 /* proceed to try to request regular interrupt */
270 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
271 SA_SHIRQ | SA_SAMPLE_RANDOM,
272 netdev->name, netdev))) {
274 "Unable to allocate interrupt Error: %d\n", err);
278 if((hw->max_frame_size != max_frame) ||
279 (hw->max_frame_size !=
280 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
282 hw->max_frame_size = max_frame;
284 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
286 if(hw->max_frame_size >
287 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
288 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
290 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
291 ctrl0 |= IXGB_CTRL0_JFE;
292 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
297 mod_timer(&adapter->watchdog_timer, jiffies);
299 #ifdef CONFIG_IXGB_NAPI
300 netif_poll_enable(netdev);
302 ixgb_irq_enable(adapter);
308 ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
310 struct net_device *netdev = adapter->netdev;
312 ixgb_irq_disable(adapter);
313 free_irq(adapter->pdev->irq, netdev);
314 #ifdef CONFIG_PCI_MSI
315 if(adapter->have_msi == TRUE)
316 pci_disable_msi(adapter->pdev);
320 del_timer_sync(&adapter->watchdog_timer);
321 #ifdef CONFIG_IXGB_NAPI
322 netif_poll_disable(netdev);
324 adapter->link_speed = 0;
325 adapter->link_duplex = 0;
326 netif_carrier_off(netdev);
327 netif_stop_queue(netdev);
330 ixgb_clean_tx_ring(adapter);
331 ixgb_clean_rx_ring(adapter);
335 ixgb_reset(struct ixgb_adapter *adapter)
338 ixgb_adapter_stop(&adapter->hw);
339 if(!ixgb_init_hw(&adapter->hw))
340 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
344 * ixgb_probe - Device Initialization Routine
345 * @pdev: PCI device information struct
346 * @ent: entry in ixgb_pci_tbl
348 * Returns 0 on success, negative on failure
350 * ixgb_probe initializes an adapter identified by a pci_dev structure.
351 * The OS initialization, configuring of the adapter private structure,
352 * and a hardware reset occur.
356 ixgb_probe(struct pci_dev *pdev,
357 const struct pci_device_id *ent)
359 struct net_device *netdev = NULL;
360 struct ixgb_adapter *adapter;
361 static int cards_found = 0;
362 unsigned long mmio_start;
368 if((err = pci_enable_device(pdev)))
371 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
372 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
375 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
376 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
378 "ixgb: No usable DMA configuration, aborting\n");
384 if((err = pci_request_regions(pdev, ixgb_driver_name)))
385 goto err_request_regions;
387 pci_set_master(pdev);
389 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
392 goto err_alloc_etherdev;
395 SET_MODULE_OWNER(netdev);
396 SET_NETDEV_DEV(netdev, &pdev->dev);
398 pci_set_drvdata(pdev, netdev);
399 adapter = netdev_priv(netdev);
400 adapter->netdev = netdev;
401 adapter->pdev = pdev;
402 adapter->hw.back = adapter;
403 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
405 mmio_start = pci_resource_start(pdev, BAR_0);
406 mmio_len = pci_resource_len(pdev, BAR_0);
408 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
409 if(!adapter->hw.hw_addr) {
414 for(i = BAR_1; i <= BAR_5; i++) {
415 if(pci_resource_len(pdev, i) == 0)
417 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
418 adapter->hw.io_base = pci_resource_start(pdev, i);
423 netdev->open = &ixgb_open;
424 netdev->stop = &ixgb_close;
425 netdev->hard_start_xmit = &ixgb_xmit_frame;
426 netdev->get_stats = &ixgb_get_stats;
427 netdev->set_multicast_list = &ixgb_set_multi;
428 netdev->set_mac_address = &ixgb_set_mac;
429 netdev->change_mtu = &ixgb_change_mtu;
430 ixgb_set_ethtool_ops(netdev);
431 netdev->tx_timeout = &ixgb_tx_timeout;
432 netdev->watchdog_timeo = 5 * HZ;
433 #ifdef CONFIG_IXGB_NAPI
434 netdev->poll = &ixgb_clean;
437 netdev->vlan_rx_register = ixgb_vlan_rx_register;
438 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
439 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
440 #ifdef CONFIG_NET_POLL_CONTROLLER
441 netdev->poll_controller = ixgb_netpoll;
444 strcpy(netdev->name, pci_name(pdev));
445 netdev->mem_start = mmio_start;
446 netdev->mem_end = mmio_start + mmio_len;
447 netdev->base_addr = adapter->hw.io_base;
449 adapter->bd_number = cards_found;
450 adapter->link_speed = 0;
451 adapter->link_duplex = 0;
453 /* setup the private structure */
455 if((err = ixgb_sw_init(adapter)))
458 netdev->features = NETIF_F_SG |
462 NETIF_F_HW_VLAN_FILTER;
464 netdev->features |= NETIF_F_TSO;
467 netdev->features |= NETIF_F_LLTX;
471 netdev->features |= NETIF_F_HIGHDMA;
473 /* make sure the EEPROM is good */
475 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
476 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
481 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
482 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
484 if(!is_valid_ether_addr(netdev->perm_addr)) {
485 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
490 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
492 init_timer(&adapter->watchdog_timer);
493 adapter->watchdog_timer.function = &ixgb_watchdog;
494 adapter->watchdog_timer.data = (unsigned long)adapter;
496 INIT_WORK(&adapter->tx_timeout_task,
497 (void (*)(void *))ixgb_tx_timeout_task, netdev);
499 strcpy(netdev->name, "eth%d");
500 if((err = register_netdev(netdev)))
503 /* we're going to reset, so assume we have no link for now */
505 netif_carrier_off(netdev);
506 netif_stop_queue(netdev);
508 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
509 ixgb_check_options(adapter);
510 /* reset the hardware with the new settings */
520 iounmap(adapter->hw.hw_addr);
524 pci_release_regions(pdev);
527 pci_disable_device(pdev);
532 * ixgb_remove - Device Removal Routine
533 * @pdev: PCI device information struct
535 * ixgb_remove is called by the PCI subsystem to alert the driver
536 * that it should release a PCI device. The could be caused by a
537 * Hot-Plug event, or because the driver is going to be removed from
541 static void __devexit
542 ixgb_remove(struct pci_dev *pdev)
544 struct net_device *netdev = pci_get_drvdata(pdev);
545 struct ixgb_adapter *adapter = netdev_priv(netdev);
547 unregister_netdev(netdev);
549 iounmap(adapter->hw.hw_addr);
550 pci_release_regions(pdev);
556 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
557 * @adapter: board private structure to initialize
559 * ixgb_sw_init initializes the Adapter private data structure.
560 * Fields are initialized based on PCI device information and
561 * OS network device settings (MTU size).
565 ixgb_sw_init(struct ixgb_adapter *adapter)
567 struct ixgb_hw *hw = &adapter->hw;
568 struct net_device *netdev = adapter->netdev;
569 struct pci_dev *pdev = adapter->pdev;
571 /* PCI config space info */
573 hw->vendor_id = pdev->vendor;
574 hw->device_id = pdev->device;
575 hw->subsystem_vendor_id = pdev->subsystem_vendor;
576 hw->subsystem_id = pdev->subsystem_device;
578 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
580 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
582 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
583 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
584 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
585 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
586 hw->mac_type = ixgb_82597;
588 /* should never have loaded on this device */
589 DPRINTK(PROBE, ERR, "unsupported device id\n");
592 /* enable flow control to be programmed */
595 atomic_set(&adapter->irq_sem, 1);
596 spin_lock_init(&adapter->tx_lock);
602 * ixgb_open - Called when a network interface is made active
603 * @netdev: network interface device structure
605 * Returns 0 on success, negative value on failure
607 * The open entry point is called when a network interface is made
608 * active by the system (IFF_UP). At this point all resources needed
609 * for transmit and receive operations are allocated, the interrupt
610 * handler is registered with the OS, the watchdog timer is started,
611 * and the stack is notified that the interface is ready.
615 ixgb_open(struct net_device *netdev)
617 struct ixgb_adapter *adapter = netdev_priv(netdev);
620 /* allocate transmit descriptors */
622 if((err = ixgb_setup_tx_resources(adapter)))
625 /* allocate receive descriptors */
627 if((err = ixgb_setup_rx_resources(adapter)))
630 if((err = ixgb_up(adapter)))
636 ixgb_free_rx_resources(adapter);
638 ixgb_free_tx_resources(adapter);
646 * ixgb_close - Disables a network interface
647 * @netdev: network interface device structure
649 * Returns 0, this is not allowed to fail
651 * The close entry point is called when an interface is de-activated
652 * by the OS. The hardware is still under the drivers control, but
653 * needs to be disabled. A global MAC reset is issued to stop the
654 * hardware, and all transmit and receive resources are freed.
658 ixgb_close(struct net_device *netdev)
660 struct ixgb_adapter *adapter = netdev_priv(netdev);
662 ixgb_down(adapter, TRUE);
664 ixgb_free_tx_resources(adapter);
665 ixgb_free_rx_resources(adapter);
671 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
672 * @adapter: board private structure
674 * Return 0 on success, negative on failure
678 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
680 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
681 struct pci_dev *pdev = adapter->pdev;
684 size = sizeof(struct ixgb_buffer) * txdr->count;
685 txdr->buffer_info = vmalloc(size);
686 if(!txdr->buffer_info) {
688 "Unable to allocate transmit descriptor ring memory\n");
691 memset(txdr->buffer_info, 0, size);
693 /* round up to nearest 4K */
695 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
696 IXGB_ROUNDUP(txdr->size, 4096);
698 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
700 vfree(txdr->buffer_info);
702 "Unable to allocate transmit descriptor memory\n");
705 memset(txdr->desc, 0, txdr->size);
707 txdr->next_to_use = 0;
708 txdr->next_to_clean = 0;
714 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
715 * @adapter: board private structure
717 * Configure the Tx unit of the MAC after a reset.
721 ixgb_configure_tx(struct ixgb_adapter *adapter)
723 uint64_t tdba = adapter->tx_ring.dma;
724 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
726 struct ixgb_hw *hw = &adapter->hw;
728 /* Setup the Base and Length of the Tx Descriptor Ring
729 * tx_ring.dma can be either a 32 or 64 bit value
732 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
733 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
735 IXGB_WRITE_REG(hw, TDLEN, tdlen);
737 /* Setup the HW Tx Head and Tail descriptor pointers */
739 IXGB_WRITE_REG(hw, TDH, 0);
740 IXGB_WRITE_REG(hw, TDT, 0);
742 /* don't set up txdctl, it induces performance problems if configured
744 /* Set the Tx Interrupt Delay register */
746 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
748 /* Program the Transmit Control Register */
750 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
751 IXGB_WRITE_REG(hw, TCTL, tctl);
753 /* Setup Transmit Descriptor Settings for this adapter */
754 adapter->tx_cmd_type =
756 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
760 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
761 * @adapter: board private structure
763 * Returns 0 on success, negative on failure
767 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
769 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
770 struct pci_dev *pdev = adapter->pdev;
773 size = sizeof(struct ixgb_buffer) * rxdr->count;
774 rxdr->buffer_info = vmalloc(size);
775 if(!rxdr->buffer_info) {
777 "Unable to allocate receive descriptor ring\n");
780 memset(rxdr->buffer_info, 0, size);
782 /* Round up to nearest 4K */
784 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
785 IXGB_ROUNDUP(rxdr->size, 4096);
787 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
790 vfree(rxdr->buffer_info);
792 "Unable to allocate receive descriptors\n");
795 memset(rxdr->desc, 0, rxdr->size);
797 rxdr->next_to_clean = 0;
798 rxdr->next_to_use = 0;
804 * ixgb_setup_rctl - configure the receive control register
805 * @adapter: Board private structure
809 ixgb_setup_rctl(struct ixgb_adapter *adapter)
813 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
815 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
818 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
819 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
820 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
822 rctl |= IXGB_RCTL_SECRC;
824 switch (adapter->rx_buffer_len) {
825 case IXGB_RXBUFFER_2048:
827 rctl |= IXGB_RCTL_BSIZE_2048;
829 case IXGB_RXBUFFER_4096:
830 rctl |= IXGB_RCTL_BSIZE_4096;
832 case IXGB_RXBUFFER_8192:
833 rctl |= IXGB_RCTL_BSIZE_8192;
835 case IXGB_RXBUFFER_16384:
836 rctl |= IXGB_RCTL_BSIZE_16384;
840 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
844 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
845 * @adapter: board private structure
847 * Configure the Rx unit of the MAC after a reset.
851 ixgb_configure_rx(struct ixgb_adapter *adapter)
853 uint64_t rdba = adapter->rx_ring.dma;
854 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
855 struct ixgb_hw *hw = &adapter->hw;
860 /* make sure receives are disabled while setting up the descriptors */
862 rctl = IXGB_READ_REG(hw, RCTL);
863 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
865 /* set the Receive Delay Timer Register */
867 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
869 /* Setup the Base and Length of the Rx Descriptor Ring */
871 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
872 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
874 IXGB_WRITE_REG(hw, RDLEN, rdlen);
876 /* Setup the HW Rx Head and Tail Descriptor Pointers */
877 IXGB_WRITE_REG(hw, RDH, 0);
878 IXGB_WRITE_REG(hw, RDT, 0);
880 /* set up pre-fetching of receive buffers so we get some before we
881 * run out (default hardware behavior is to run out before fetching
882 * more). This sets up to fetch if HTHRESH rx descriptors are avail
883 * and the descriptors in hw cache are below PTHRESH. This avoids
884 * the hardware behavior of fetching <=512 descriptors in a single
885 * burst that pre-empts all other activity, usually causing fifo
887 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
888 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
889 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
890 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
891 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
893 /* Enable Receive Checksum Offload for TCP and UDP */
894 if(adapter->rx_csum == TRUE) {
895 rxcsum = IXGB_READ_REG(hw, RXCSUM);
896 rxcsum |= IXGB_RXCSUM_TUOFL;
897 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
900 /* Enable Receives */
902 IXGB_WRITE_REG(hw, RCTL, rctl);
906 * ixgb_free_tx_resources - Free Tx Resources
907 * @adapter: board private structure
909 * Free all transmit software resources
913 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
915 struct pci_dev *pdev = adapter->pdev;
917 ixgb_clean_tx_ring(adapter);
919 vfree(adapter->tx_ring.buffer_info);
920 adapter->tx_ring.buffer_info = NULL;
922 pci_free_consistent(pdev, adapter->tx_ring.size,
923 adapter->tx_ring.desc, adapter->tx_ring.dma);
925 adapter->tx_ring.desc = NULL;
929 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
930 struct ixgb_buffer *buffer_info)
932 struct pci_dev *pdev = adapter->pdev;
934 if (buffer_info->dma)
935 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
938 if (buffer_info->skb)
939 dev_kfree_skb_any(buffer_info->skb);
941 buffer_info->skb = NULL;
942 buffer_info->dma = 0;
943 buffer_info->time_stamp = 0;
944 /* these fields must always be initialized in tx
945 * buffer_info->length = 0;
946 * buffer_info->next_to_watch = 0; */
950 * ixgb_clean_tx_ring - Free Tx Buffers
951 * @adapter: board private structure
955 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
957 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
958 struct ixgb_buffer *buffer_info;
962 /* Free all the Tx ring sk_buffs */
964 for(i = 0; i < tx_ring->count; i++) {
965 buffer_info = &tx_ring->buffer_info[i];
966 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
969 size = sizeof(struct ixgb_buffer) * tx_ring->count;
970 memset(tx_ring->buffer_info, 0, size);
972 /* Zero out the descriptor ring */
974 memset(tx_ring->desc, 0, tx_ring->size);
976 tx_ring->next_to_use = 0;
977 tx_ring->next_to_clean = 0;
979 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
980 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
984 * ixgb_free_rx_resources - Free Rx Resources
985 * @adapter: board private structure
987 * Free all receive software resources
991 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
993 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
994 struct pci_dev *pdev = adapter->pdev;
996 ixgb_clean_rx_ring(adapter);
998 vfree(rx_ring->buffer_info);
999 rx_ring->buffer_info = NULL;
1001 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1003 rx_ring->desc = NULL;
1007 * ixgb_clean_rx_ring - Free Rx Buffers
1008 * @adapter: board private structure
1012 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1014 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1015 struct ixgb_buffer *buffer_info;
1016 struct pci_dev *pdev = adapter->pdev;
1020 /* Free all the Rx ring sk_buffs */
1022 for(i = 0; i < rx_ring->count; i++) {
1023 buffer_info = &rx_ring->buffer_info[i];
1024 if(buffer_info->skb) {
1026 pci_unmap_single(pdev,
1028 buffer_info->length,
1029 PCI_DMA_FROMDEVICE);
1031 dev_kfree_skb(buffer_info->skb);
1033 buffer_info->skb = NULL;
1037 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1038 memset(rx_ring->buffer_info, 0, size);
1040 /* Zero out the descriptor ring */
1042 memset(rx_ring->desc, 0, rx_ring->size);
1044 rx_ring->next_to_clean = 0;
1045 rx_ring->next_to_use = 0;
1047 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1048 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1052 * ixgb_set_mac - Change the Ethernet Address of the NIC
1053 * @netdev: network interface device structure
1054 * @p: pointer to an address structure
1056 * Returns 0 on success, negative on failure
1060 ixgb_set_mac(struct net_device *netdev, void *p)
1062 struct ixgb_adapter *adapter = netdev_priv(netdev);
1063 struct sockaddr *addr = p;
1065 if(!is_valid_ether_addr(addr->sa_data))
1066 return -EADDRNOTAVAIL;
1068 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1070 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1076 * ixgb_set_multi - Multicast and Promiscuous mode set
1077 * @netdev: network interface device structure
1079 * The set_multi entry point is called whenever the multicast address
1080 * list or the network interface flags are updated. This routine is
1081 * responsible for configuring the hardware for proper multicast,
1082 * promiscuous mode, and all-multi behavior.
1086 ixgb_set_multi(struct net_device *netdev)
1088 struct ixgb_adapter *adapter = netdev_priv(netdev);
1089 struct ixgb_hw *hw = &adapter->hw;
1090 struct dev_mc_list *mc_ptr;
1094 /* Check for Promiscuous and All Multicast modes */
1096 rctl = IXGB_READ_REG(hw, RCTL);
1098 if(netdev->flags & IFF_PROMISC) {
1099 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1100 } else if(netdev->flags & IFF_ALLMULTI) {
1101 rctl |= IXGB_RCTL_MPE;
1102 rctl &= ~IXGB_RCTL_UPE;
1104 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1107 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1108 rctl |= IXGB_RCTL_MPE;
1109 IXGB_WRITE_REG(hw, RCTL, rctl);
1111 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1113 IXGB_WRITE_REG(hw, RCTL, rctl);
1115 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1116 i++, mc_ptr = mc_ptr->next)
1117 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1118 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1120 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1125 * ixgb_watchdog - Timer Call-back
1126 * @data: pointer to netdev cast into an unsigned long
1130 ixgb_watchdog(unsigned long data)
1132 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1133 struct net_device *netdev = adapter->netdev;
1134 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1136 ixgb_check_for_link(&adapter->hw);
1138 if (ixgb_check_for_bad_link(&adapter->hw)) {
1139 /* force the reset path */
1140 netif_stop_queue(netdev);
1143 if(adapter->hw.link_up) {
1144 if(!netif_carrier_ok(netdev)) {
1146 "NIC Link is Up 10000 Mbps Full Duplex\n");
1147 adapter->link_speed = 10000;
1148 adapter->link_duplex = FULL_DUPLEX;
1149 netif_carrier_on(netdev);
1150 netif_wake_queue(netdev);
1153 if(netif_carrier_ok(netdev)) {
1154 adapter->link_speed = 0;
1155 adapter->link_duplex = 0;
1156 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1157 netif_carrier_off(netdev);
1158 netif_stop_queue(netdev);
1163 ixgb_update_stats(adapter);
1165 if(!netif_carrier_ok(netdev)) {
1166 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1167 /* We've lost link, so the controller stops DMA,
1168 * but we've got queued Tx work that's never going
1169 * to get done, so reset controller to flush Tx.
1170 * (Do the reset outside of interrupt context). */
1171 schedule_work(&adapter->tx_timeout_task);
1175 /* Force detection of hung controller every watchdog period */
1176 adapter->detect_tx_hung = TRUE;
1178 /* generate an interrupt to force clean up of any stragglers */
1179 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1181 /* Reset the timer */
1182 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1185 #define IXGB_TX_FLAGS_CSUM 0x00000001
1186 #define IXGB_TX_FLAGS_VLAN 0x00000002
1187 #define IXGB_TX_FLAGS_TSO 0x00000004
1190 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1193 struct ixgb_context_desc *context_desc;
1195 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1196 uint16_t ipcse, tucse, mss;
1199 if(likely(skb_shinfo(skb)->tso_size)) {
1200 if (skb_header_cloned(skb)) {
1201 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1206 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1207 mss = skb_shinfo(skb)->tso_size;
1208 skb->nh.iph->tot_len = 0;
1209 skb->nh.iph->check = 0;
1210 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1213 ipcss = skb->nh.raw - skb->data;
1214 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1215 ipcse = skb->h.raw - skb->data - 1;
1216 tucss = skb->h.raw - skb->data;
1217 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1220 i = adapter->tx_ring.next_to_use;
1221 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1223 context_desc->ipcss = ipcss;
1224 context_desc->ipcso = ipcso;
1225 context_desc->ipcse = cpu_to_le16(ipcse);
1226 context_desc->tucss = tucss;
1227 context_desc->tucso = tucso;
1228 context_desc->tucse = cpu_to_le16(tucse);
1229 context_desc->mss = cpu_to_le16(mss);
1230 context_desc->hdr_len = hdr_len;
1231 context_desc->status = 0;
1232 context_desc->cmd_type_len = cpu_to_le32(
1233 IXGB_CONTEXT_DESC_TYPE
1234 | IXGB_CONTEXT_DESC_CMD_TSE
1235 | IXGB_CONTEXT_DESC_CMD_IP
1236 | IXGB_CONTEXT_DESC_CMD_TCP
1237 | IXGB_CONTEXT_DESC_CMD_IDE
1238 | (skb->len - (hdr_len)));
1241 if(++i == adapter->tx_ring.count) i = 0;
1242 adapter->tx_ring.next_to_use = i;
1251 static inline boolean_t
1252 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1254 struct ixgb_context_desc *context_desc;
1258 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1259 css = skb->h.raw - skb->data;
1260 cso = (skb->h.raw + skb->csum) - skb->data;
1262 i = adapter->tx_ring.next_to_use;
1263 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1265 context_desc->tucss = css;
1266 context_desc->tucso = cso;
1267 context_desc->tucse = 0;
1268 /* zero out any previously existing data in one instruction */
1269 *(uint32_t *)&(context_desc->ipcss) = 0;
1270 context_desc->status = 0;
1271 context_desc->hdr_len = 0;
1272 context_desc->mss = 0;
1273 context_desc->cmd_type_len =
1274 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1275 | IXGB_TX_DESC_CMD_IDE);
1277 if(++i == adapter->tx_ring.count) i = 0;
1278 adapter->tx_ring.next_to_use = i;
1286 #define IXGB_MAX_TXD_PWR 14
1287 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1290 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1293 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1294 struct ixgb_buffer *buffer_info;
1296 unsigned int offset = 0, size, count = 0, i;
1297 unsigned int mss = skb_shinfo(skb)->tso_size;
1299 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1302 len -= skb->data_len;
1304 i = tx_ring->next_to_use;
1307 buffer_info = &tx_ring->buffer_info[i];
1308 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1309 /* Workaround for premature desc write-backs
1310 * in TSO mode. Append 4-byte sentinel desc */
1311 if(unlikely(mss && !nr_frags && size == len && size > 8))
1314 buffer_info->length = size;
1316 pci_map_single(adapter->pdev,
1320 buffer_info->time_stamp = jiffies;
1321 buffer_info->next_to_watch = 0;
1326 if(++i == tx_ring->count) i = 0;
1329 for(f = 0; f < nr_frags; f++) {
1330 struct skb_frag_struct *frag;
1332 frag = &skb_shinfo(skb)->frags[f];
1337 buffer_info = &tx_ring->buffer_info[i];
1338 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1339 /* Workaround for premature desc write-backs
1340 * in TSO mode. Append 4-byte sentinel desc */
1341 if(unlikely(mss && (f == (nr_frags-1)) && (size == len)
1345 buffer_info->length = size;
1347 pci_map_page(adapter->pdev,
1349 frag->page_offset + offset,
1352 buffer_info->time_stamp = jiffies;
1353 buffer_info->next_to_watch = 0;
1358 if(++i == tx_ring->count) i = 0;
1361 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1362 tx_ring->buffer_info[i].skb = skb;
1363 tx_ring->buffer_info[first].next_to_watch = i;
1369 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1371 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1372 struct ixgb_tx_desc *tx_desc = NULL;
1373 struct ixgb_buffer *buffer_info;
1374 uint32_t cmd_type_len = adapter->tx_cmd_type;
1379 if(tx_flags & IXGB_TX_FLAGS_TSO) {
1380 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1381 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1384 if(tx_flags & IXGB_TX_FLAGS_CSUM)
1385 popts |= IXGB_TX_DESC_POPTS_TXSM;
1387 if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1388 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1391 i = tx_ring->next_to_use;
1394 buffer_info = &tx_ring->buffer_info[i];
1395 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1396 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1397 tx_desc->cmd_type_len =
1398 cpu_to_le32(cmd_type_len | buffer_info->length);
1399 tx_desc->status = status;
1400 tx_desc->popts = popts;
1401 tx_desc->vlan = cpu_to_le16(vlan_id);
1403 if(++i == tx_ring->count) i = 0;
1406 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1407 | IXGB_TX_DESC_CMD_RS );
1409 /* Force memory writes to complete before letting h/w
1410 * know there are new descriptors to fetch. (Only
1411 * applicable for weak-ordered memory model archs,
1412 * such as IA-64). */
1415 tx_ring->next_to_use = i;
1416 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1419 /* Tx Descriptors needed, worst case */
1420 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1421 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1422 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1423 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \
1424 /* one more for TSO workaround */ + 1
1427 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1429 struct ixgb_adapter *adapter = netdev_priv(netdev);
1431 unsigned int tx_flags = 0;
1432 unsigned long flags;
1437 dev_kfree_skb_any(skb);
1442 local_irq_save(flags);
1443 if (!spin_trylock(&adapter->tx_lock)) {
1444 /* Collision - tell upper layer to requeue */
1445 local_irq_restore(flags);
1446 return NETDEV_TX_LOCKED;
1449 spin_lock_irqsave(&adapter->tx_lock, flags);
1452 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1453 netif_stop_queue(netdev);
1454 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1455 return NETDEV_TX_BUSY;
1458 #ifndef NETIF_F_LLTX
1459 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1462 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1463 tx_flags |= IXGB_TX_FLAGS_VLAN;
1464 vlan_id = vlan_tx_tag_get(skb);
1467 first = adapter->tx_ring.next_to_use;
1469 tso = ixgb_tso(adapter, skb);
1471 dev_kfree_skb_any(skb);
1473 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1475 return NETDEV_TX_OK;
1479 tx_flags |= IXGB_TX_FLAGS_TSO;
1480 else if(ixgb_tx_csum(adapter, skb))
1481 tx_flags |= IXGB_TX_FLAGS_CSUM;
1483 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1486 netdev->trans_start = jiffies;
1489 /* Make sure there is space in the ring for the next send. */
1490 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
1491 netif_stop_queue(netdev);
1493 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1496 return NETDEV_TX_OK;
1500 * ixgb_tx_timeout - Respond to a Tx Hang
1501 * @netdev: network interface device structure
1505 ixgb_tx_timeout(struct net_device *netdev)
1507 struct ixgb_adapter *adapter = netdev_priv(netdev);
1509 /* Do the reset outside of interrupt context */
1510 schedule_work(&adapter->tx_timeout_task);
1514 ixgb_tx_timeout_task(struct net_device *netdev)
1516 struct ixgb_adapter *adapter = netdev_priv(netdev);
1518 adapter->tx_timeout_count++;
1519 ixgb_down(adapter, TRUE);
1524 * ixgb_get_stats - Get System Network Statistics
1525 * @netdev: network interface device structure
1527 * Returns the address of the device statistics structure.
1528 * The statistics are actually updated from the timer callback.
1531 static struct net_device_stats *
1532 ixgb_get_stats(struct net_device *netdev)
1534 struct ixgb_adapter *adapter = netdev_priv(netdev);
1536 return &adapter->net_stats;
1540 * ixgb_change_mtu - Change the Maximum Transfer Unit
1541 * @netdev: network interface device structure
1542 * @new_mtu: new value for maximum frame size
1544 * Returns 0 on success, negative on failure
1548 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1550 struct ixgb_adapter *adapter = netdev_priv(netdev);
1551 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1552 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1555 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1556 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1557 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1561 if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1562 || (max_frame <= IXGB_RXBUFFER_2048)) {
1563 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1565 } else if(max_frame <= IXGB_RXBUFFER_4096) {
1566 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1568 } else if(max_frame <= IXGB_RXBUFFER_8192) {
1569 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1572 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1575 netdev->mtu = new_mtu;
1577 if(old_max_frame != max_frame && netif_running(netdev)) {
1579 ixgb_down(adapter, TRUE);
1587 * ixgb_update_stats - Update the board statistics counters.
1588 * @adapter: board private structure
1592 ixgb_update_stats(struct ixgb_adapter *adapter)
1594 struct net_device *netdev = adapter->netdev;
1596 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1597 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1598 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1599 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1600 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1601 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1603 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1604 /* fix up multicast stats by removing broadcasts */
1608 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1609 adapter->stats.mprch += (multi >> 32);
1610 adapter->stats.bprcl += bcast_l;
1611 adapter->stats.bprch += bcast_h;
1613 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1614 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1615 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1616 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1618 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1619 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1620 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1621 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1622 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1623 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1624 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1625 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1626 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1627 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1628 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1629 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1630 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1631 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1632 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1633 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1634 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1635 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1636 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1637 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1638 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1639 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1640 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1641 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1642 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1643 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1644 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1645 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1646 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1647 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1648 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1649 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1650 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1651 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1652 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1653 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1654 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1655 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1656 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1657 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1658 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1659 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1660 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1661 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1662 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1663 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1664 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1665 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1666 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1667 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1668 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1669 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1670 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1671 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1672 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1673 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1675 /* Fill out the OS statistics structure */
1677 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1678 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1679 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1680 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1681 adapter->net_stats.multicast = adapter->stats.mprcl;
1682 adapter->net_stats.collisions = 0;
1684 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1685 * with a length in the type/len field */
1686 adapter->net_stats.rx_errors =
1687 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1688 adapter->stats.ruc +
1689 adapter->stats.roc /*+ adapter->stats.rlec */ +
1690 adapter->stats.icbc +
1691 adapter->stats.ecbc + adapter->stats.mpc;
1694 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1697 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1698 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1699 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1700 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1702 adapter->net_stats.tx_errors = 0;
1703 adapter->net_stats.rx_frame_errors = 0;
1704 adapter->net_stats.tx_aborted_errors = 0;
1705 adapter->net_stats.tx_carrier_errors = 0;
1706 adapter->net_stats.tx_fifo_errors = 0;
1707 adapter->net_stats.tx_heartbeat_errors = 0;
1708 adapter->net_stats.tx_window_errors = 0;
1711 #define IXGB_MAX_INTR 10
1713 * ixgb_intr - Interrupt Handler
1714 * @irq: interrupt number
1715 * @data: pointer to a network interface device structure
1716 * @pt_regs: CPU registers structure
1720 ixgb_intr(int irq, void *data, struct pt_regs *regs)
1722 struct net_device *netdev = data;
1723 struct ixgb_adapter *adapter = netdev_priv(netdev);
1724 struct ixgb_hw *hw = &adapter->hw;
1725 uint32_t icr = IXGB_READ_REG(hw, ICR);
1726 #ifndef CONFIG_IXGB_NAPI
1731 return IRQ_NONE; /* Not our interrupt */
1733 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1734 mod_timer(&adapter->watchdog_timer, jiffies);
1737 #ifdef CONFIG_IXGB_NAPI
1738 if(netif_rx_schedule_prep(netdev)) {
1740 /* Disable interrupts and register for poll. The flush
1741 of the posted write is intentionally left out.
1744 atomic_inc(&adapter->irq_sem);
1745 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1746 __netif_rx_schedule(netdev);
1749 /* yes, that is actually a & and it is meant to make sure that
1750 * every pass through this for loop checks both receive and
1751 * transmit queues for completed descriptors, intended to
1752 * avoid starvation issues and assist tx/rx fairness. */
1753 for(i = 0; i < IXGB_MAX_INTR; i++)
1754 if(!ixgb_clean_rx_irq(adapter) &
1755 !ixgb_clean_tx_irq(adapter))
1761 #ifdef CONFIG_IXGB_NAPI
1763 * ixgb_clean - NAPI Rx polling callback
1764 * @adapter: board private structure
1768 ixgb_clean(struct net_device *netdev, int *budget)
1770 struct ixgb_adapter *adapter = netdev_priv(netdev);
1771 int work_to_do = min(*budget, netdev->quota);
1775 tx_cleaned = ixgb_clean_tx_irq(adapter);
1776 ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1778 *budget -= work_done;
1779 netdev->quota -= work_done;
1781 /* if no Tx and not enough Rx work done, exit the polling mode */
1782 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1783 netif_rx_complete(netdev);
1784 ixgb_irq_enable(adapter);
1793 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1794 * @adapter: board private structure
1798 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1800 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1801 struct net_device *netdev = adapter->netdev;
1802 struct ixgb_tx_desc *tx_desc, *eop_desc;
1803 struct ixgb_buffer *buffer_info;
1804 unsigned int i, eop;
1805 boolean_t cleaned = FALSE;
1807 i = tx_ring->next_to_clean;
1808 eop = tx_ring->buffer_info[i].next_to_watch;
1809 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1811 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1813 for(cleaned = FALSE; !cleaned; ) {
1814 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1815 buffer_info = &tx_ring->buffer_info[i];
1818 & (IXGB_TX_DESC_POPTS_TXSM |
1819 IXGB_TX_DESC_POPTS_IXSM))
1820 adapter->hw_csum_tx_good++;
1822 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1824 *(uint32_t *)&(tx_desc->status) = 0;
1826 cleaned = (i == eop);
1827 if(++i == tx_ring->count) i = 0;
1830 eop = tx_ring->buffer_info[i].next_to_watch;
1831 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1834 tx_ring->next_to_clean = i;
1836 spin_lock(&adapter->tx_lock);
1837 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1838 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1840 netif_wake_queue(netdev);
1842 spin_unlock(&adapter->tx_lock);
1844 if(adapter->detect_tx_hung) {
1845 /* detect a transmit hang in hardware, this serializes the
1846 * check with the clearing of time_stamp and movement of i */
1847 adapter->detect_tx_hung = FALSE;
1848 if (tx_ring->buffer_info[eop].dma &&
1849 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1850 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1851 IXGB_STATUS_TXOFF)) {
1852 /* detected Tx unit hang */
1853 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
1856 " next_to_use <%x>\n"
1857 " next_to_clean <%x>\n"
1858 "buffer_info[next_to_clean]\n"
1859 " time_stamp <%lx>\n"
1860 " next_to_watch <%x>\n"
1862 " next_to_watch.status <%x>\n",
1863 IXGB_READ_REG(&adapter->hw, TDH),
1864 IXGB_READ_REG(&adapter->hw, TDT),
1865 tx_ring->next_to_use,
1866 tx_ring->next_to_clean,
1867 tx_ring->buffer_info[eop].time_stamp,
1871 netif_stop_queue(netdev);
1879 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1880 * @adapter: board private structure
1881 * @rx_desc: receive descriptor
1882 * @sk_buff: socket buffer with received data
1886 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1887 struct ixgb_rx_desc *rx_desc,
1888 struct sk_buff *skb)
1890 /* Ignore Checksum bit is set OR
1891 * TCP Checksum has not been calculated
1893 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1894 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1895 skb->ip_summed = CHECKSUM_NONE;
1899 /* At this point we know the hardware did the TCP checksum */
1900 /* now look at the TCP checksum error bit */
1901 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1902 /* let the stack verify checksum errors */
1903 skb->ip_summed = CHECKSUM_NONE;
1904 adapter->hw_csum_rx_error++;
1906 /* TCP checksum is good */
1907 skb->ip_summed = CHECKSUM_UNNECESSARY;
1908 adapter->hw_csum_rx_good++;
1913 * ixgb_clean_rx_irq - Send received data up the network stack,
1914 * @adapter: board private structure
1918 #ifdef CONFIG_IXGB_NAPI
1919 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1921 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1924 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1925 struct net_device *netdev = adapter->netdev;
1926 struct pci_dev *pdev = adapter->pdev;
1927 struct ixgb_rx_desc *rx_desc, *next_rxd;
1928 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1931 boolean_t cleaned = FALSE;
1933 i = rx_ring->next_to_clean;
1934 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1935 buffer_info = &rx_ring->buffer_info[i];
1937 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1938 struct sk_buff *skb, *next_skb;
1941 #ifdef CONFIG_IXGB_NAPI
1942 if(*work_done >= work_to_do)
1947 status = rx_desc->status;
1948 skb = buffer_info->skb;
1949 buffer_info->skb = NULL;
1951 prefetch(skb->data);
1953 if(++i == rx_ring->count) i = 0;
1954 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1957 if((j = i + 1) == rx_ring->count) j = 0;
1958 next2_buffer = &rx_ring->buffer_info[j];
1959 prefetch(next2_buffer);
1961 next_buffer = &rx_ring->buffer_info[i];
1962 next_skb = next_buffer->skb;
1967 pci_unmap_single(pdev,
1969 buffer_info->length,
1970 PCI_DMA_FROMDEVICE);
1972 length = le16_to_cpu(rx_desc->length);
1974 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1976 /* All receives must fit into a single buffer */
1978 IXGB_DBG("Receive packet consumed multiple buffers "
1979 "length<%x>\n", length);
1981 dev_kfree_skb_irq(skb);
1985 if (unlikely(rx_desc->errors
1986 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1987 | IXGB_RX_DESC_ERRORS_P |
1988 IXGB_RX_DESC_ERRORS_RXE))) {
1990 dev_kfree_skb_irq(skb);
1994 /* code added for copybreak, this should improve
1995 * performance for small packets with large amounts
1996 * of reassembly being done in the stack */
1997 #define IXGB_CB_LENGTH 256
1998 if (length < IXGB_CB_LENGTH) {
1999 struct sk_buff *new_skb =
2000 dev_alloc_skb(length + NET_IP_ALIGN);
2002 skb_reserve(new_skb, NET_IP_ALIGN);
2003 new_skb->dev = netdev;
2004 memcpy(new_skb->data - NET_IP_ALIGN,
2005 skb->data - NET_IP_ALIGN,
2006 length + NET_IP_ALIGN);
2007 /* save the skb in buffer_info as good */
2008 buffer_info->skb = skb;
2012 /* end copybreak code */
2015 skb_put(skb, length);
2017 /* Receive Checksum Offload */
2018 ixgb_rx_checksum(adapter, rx_desc, skb);
2020 skb->protocol = eth_type_trans(skb, netdev);
2021 #ifdef CONFIG_IXGB_NAPI
2022 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2023 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2024 le16_to_cpu(rx_desc->special) &
2025 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
2027 netif_receive_skb(skb);
2029 #else /* CONFIG_IXGB_NAPI */
2030 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2031 vlan_hwaccel_rx(skb, adapter->vlgrp,
2032 le16_to_cpu(rx_desc->special) &
2033 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
2037 #endif /* CONFIG_IXGB_NAPI */
2038 netdev->last_rx = jiffies;
2041 /* clean up descriptor, might be written over by hw */
2042 rx_desc->status = 0;
2044 /* use prefetched values */
2046 buffer_info = next_buffer;
2049 rx_ring->next_to_clean = i;
2051 ixgb_alloc_rx_buffers(adapter);
2057 * ixgb_alloc_rx_buffers - Replace used receive buffers
2058 * @adapter: address of board private structure
2062 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2064 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2065 struct net_device *netdev = adapter->netdev;
2066 struct pci_dev *pdev = adapter->pdev;
2067 struct ixgb_rx_desc *rx_desc;
2068 struct ixgb_buffer *buffer_info;
2069 struct sk_buff *skb;
2071 int num_group_tail_writes;
2074 i = rx_ring->next_to_use;
2075 buffer_info = &rx_ring->buffer_info[i];
2076 cleancount = IXGB_DESC_UNUSED(rx_ring);
2078 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
2080 /* leave three descriptors unused */
2081 while(--cleancount > 2) {
2082 /* recycle! its good for you */
2083 if (!(skb = buffer_info->skb))
2084 skb = dev_alloc_skb(adapter->rx_buffer_len
2091 if (unlikely(!skb)) {
2092 /* Better luck next round */
2093 adapter->alloc_rx_buff_failed++;
2097 /* Make buffer alignment 2 beyond a 16 byte boundary
2098 * this will result in a 16 byte aligned IP header after
2099 * the 14 byte MAC header is removed
2101 skb_reserve(skb, NET_IP_ALIGN);
2105 buffer_info->skb = skb;
2106 buffer_info->length = adapter->rx_buffer_len;
2108 buffer_info->dma = pci_map_single(pdev,
2110 adapter->rx_buffer_len,
2111 PCI_DMA_FROMDEVICE);
2113 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2114 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2115 /* guarantee DD bit not set now before h/w gets descriptor
2116 * this is the rest of the workaround for h/w double
2118 rx_desc->status = 0;
2121 if(++i == rx_ring->count) i = 0;
2122 buffer_info = &rx_ring->buffer_info[i];
2125 if (likely(rx_ring->next_to_use != i)) {
2126 rx_ring->next_to_use = i;
2127 if (unlikely(i-- == 0))
2128 i = (rx_ring->count - 1);
2130 /* Force memory writes to complete before letting h/w
2131 * know there are new descriptors to fetch. (Only
2132 * applicable for weak-ordered memory model archs, such
2135 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2140 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2142 * @param netdev network interface device structure
2143 * @param grp indicates to enable or disable tagging/stripping
2146 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2148 struct ixgb_adapter *adapter = netdev_priv(netdev);
2149 uint32_t ctrl, rctl;
2151 ixgb_irq_disable(adapter);
2152 adapter->vlgrp = grp;
2155 /* enable VLAN tag insert/strip */
2156 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2157 ctrl |= IXGB_CTRL0_VME;
2158 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2160 /* enable VLAN receive filtering */
2162 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2163 rctl |= IXGB_RCTL_VFE;
2164 rctl &= ~IXGB_RCTL_CFIEN;
2165 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2167 /* disable VLAN tag insert/strip */
2169 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2170 ctrl &= ~IXGB_CTRL0_VME;
2171 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2173 /* disable VLAN filtering */
2175 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2176 rctl &= ~IXGB_RCTL_VFE;
2177 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2180 ixgb_irq_enable(adapter);
2184 ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2186 struct ixgb_adapter *adapter = netdev_priv(netdev);
2187 uint32_t vfta, index;
2189 /* add VID to filter table */
2191 index = (vid >> 5) & 0x7F;
2192 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2193 vfta |= (1 << (vid & 0x1F));
2194 ixgb_write_vfta(&adapter->hw, index, vfta);
2198 ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2200 struct ixgb_adapter *adapter = netdev_priv(netdev);
2201 uint32_t vfta, index;
2203 ixgb_irq_disable(adapter);
2206 adapter->vlgrp->vlan_devices[vid] = NULL;
2208 ixgb_irq_enable(adapter);
2210 /* remove VID from filter table*/
2212 index = (vid >> 5) & 0x7F;
2213 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2214 vfta &= ~(1 << (vid & 0x1F));
2215 ixgb_write_vfta(&adapter->hw, index, vfta);
2219 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2221 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2223 if(adapter->vlgrp) {
2225 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2226 if(!adapter->vlgrp->vlan_devices[vid])
2228 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2233 #ifdef CONFIG_NET_POLL_CONTROLLER
2235 * Polling 'interrupt' - used by things like netconsole to send skbs
2236 * without having to re-enable interrupts. It's not called while
2237 * the interrupt routine is executing.
2240 static void ixgb_netpoll(struct net_device *dev)
2242 struct ixgb_adapter *adapter = dev->priv;
2244 disable_irq(adapter->pdev->irq);
2245 ixgb_intr(adapter->pdev->irq, dev, NULL);
2246 enable_irq(adapter->pdev->irq);