1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/init.h>
32 #include <linux/pci.h>
33 #include <linux/vmalloc.h>
34 #include <linux/pagemap.h>
35 #include <linux/delay.h>
36 #include <linux/netdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/mii.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <linux/cpu.h>
45 #include <linux/smp.h>
46 #include <linux/pm_qos_params.h>
47 #include <linux/aer.h>
51 #define DRV_VERSION "0.3.3.4-k2"
52 char e1000e_driver_name[] = "e1000e";
53 const char e1000e_driver_version[] = DRV_VERSION;
55 static const struct e1000_info *e1000_info_tbl[] = {
56 [board_82571] = &e1000_82571_info,
57 [board_82572] = &e1000_82572_info,
58 [board_82573] = &e1000_82573_info,
59 [board_82574] = &e1000_82574_info,
60 [board_82583] = &e1000_82583_info,
61 [board_80003es2lan] = &e1000_es2_info,
62 [board_ich8lan] = &e1000_ich8_info,
63 [board_ich9lan] = &e1000_ich9_info,
64 [board_ich10lan] = &e1000_ich10_info,
69 * e1000_get_hw_dev_name - return device name string
70 * used by hardware layer to print debugging information
72 char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
74 return hw->adapter->netdev->name;
79 * e1000_desc_unused - calculate if we have unused descriptors
81 static int e1000_desc_unused(struct e1000_ring *ring)
83 if (ring->next_to_clean > ring->next_to_use)
84 return ring->next_to_clean - ring->next_to_use - 1;
86 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
90 * e1000_receive_skb - helper function to handle Rx indications
91 * @adapter: board private structure
92 * @status: descriptor status field as written by hardware
93 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
94 * @skb: pointer to sk_buff to be indicated to stack
96 static void e1000_receive_skb(struct e1000_adapter *adapter,
97 struct net_device *netdev,
99 u8 status, __le16 vlan)
101 skb->protocol = eth_type_trans(skb, netdev);
103 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
104 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
105 le16_to_cpu(vlan), skb);
107 napi_gro_receive(&adapter->napi, skb);
111 * e1000_rx_checksum - Receive Checksum Offload for 82543
112 * @adapter: board private structure
113 * @status_err: receive descriptor status and error fields
114 * @csum: receive descriptor csum field
115 * @sk_buff: socket buffer with received data
117 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
118 u32 csum, struct sk_buff *skb)
120 u16 status = (u16)status_err;
121 u8 errors = (u8)(status_err >> 24);
122 skb->ip_summed = CHECKSUM_NONE;
124 /* Ignore Checksum bit is set */
125 if (status & E1000_RXD_STAT_IXSM)
127 /* TCP/UDP checksum error bit is set */
128 if (errors & E1000_RXD_ERR_TCPE) {
129 /* let the stack verify checksum errors */
130 adapter->hw_csum_err++;
134 /* TCP/UDP Checksum has not been calculated */
135 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
138 /* It must be a TCP or UDP packet with a valid checksum */
139 if (status & E1000_RXD_STAT_TCPCS) {
140 /* TCP checksum is good */
141 skb->ip_summed = CHECKSUM_UNNECESSARY;
144 * IP fragment with UDP payload
145 * Hardware complements the payload checksum, so we undo it
146 * and then put the value in host order for further stack use.
148 __sum16 sum = (__force __sum16)htons(csum);
149 skb->csum = csum_unfold(~sum);
150 skb->ip_summed = CHECKSUM_COMPLETE;
152 adapter->hw_csum_good++;
156 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
157 * @adapter: address of board private structure
159 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
162 struct net_device *netdev = adapter->netdev;
163 struct pci_dev *pdev = adapter->pdev;
164 struct e1000_ring *rx_ring = adapter->rx_ring;
165 struct e1000_rx_desc *rx_desc;
166 struct e1000_buffer *buffer_info;
169 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
171 i = rx_ring->next_to_use;
172 buffer_info = &rx_ring->buffer_info[i];
174 while (cleaned_count--) {
175 skb = buffer_info->skb;
181 skb = netdev_alloc_skb(netdev, bufsz);
183 /* Better luck next round */
184 adapter->alloc_rx_buff_failed++;
189 * Make buffer alignment 2 beyond a 16 byte boundary
190 * this will result in a 16 byte aligned IP header after
191 * the 14 byte MAC header is removed
193 skb_reserve(skb, NET_IP_ALIGN);
195 buffer_info->skb = skb;
197 buffer_info->dma = pci_map_single(pdev, skb->data,
198 adapter->rx_buffer_len,
200 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
201 dev_err(&pdev->dev, "RX DMA map failed\n");
202 adapter->rx_dma_failed++;
206 rx_desc = E1000_RX_DESC(*rx_ring, i);
207 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
210 if (i == rx_ring->count)
212 buffer_info = &rx_ring->buffer_info[i];
215 if (rx_ring->next_to_use != i) {
216 rx_ring->next_to_use = i;
218 i = (rx_ring->count - 1);
221 * Force memory writes to complete before letting h/w
222 * know there are new descriptors to fetch. (Only
223 * applicable for weak-ordered memory model archs,
227 writel(i, adapter->hw.hw_addr + rx_ring->tail);
232 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
233 * @adapter: address of board private structure
235 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
238 struct net_device *netdev = adapter->netdev;
239 struct pci_dev *pdev = adapter->pdev;
240 union e1000_rx_desc_packet_split *rx_desc;
241 struct e1000_ring *rx_ring = adapter->rx_ring;
242 struct e1000_buffer *buffer_info;
243 struct e1000_ps_page *ps_page;
247 i = rx_ring->next_to_use;
248 buffer_info = &rx_ring->buffer_info[i];
250 while (cleaned_count--) {
251 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
253 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
254 ps_page = &buffer_info->ps_pages[j];
255 if (j >= adapter->rx_ps_pages) {
256 /* all unused desc entries get hw null ptr */
257 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
260 if (!ps_page->page) {
261 ps_page->page = alloc_page(GFP_ATOMIC);
262 if (!ps_page->page) {
263 adapter->alloc_rx_buff_failed++;
266 ps_page->dma = pci_map_page(pdev,
270 if (pci_dma_mapping_error(pdev, ps_page->dma)) {
271 dev_err(&adapter->pdev->dev,
272 "RX DMA page map failed\n");
273 adapter->rx_dma_failed++;
278 * Refresh the desc even if buffer_addrs
279 * didn't change because each write-back
282 rx_desc->read.buffer_addr[j+1] =
283 cpu_to_le64(ps_page->dma);
286 skb = netdev_alloc_skb(netdev,
287 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
290 adapter->alloc_rx_buff_failed++;
295 * Make buffer alignment 2 beyond a 16 byte boundary
296 * this will result in a 16 byte aligned IP header after
297 * the 14 byte MAC header is removed
299 skb_reserve(skb, NET_IP_ALIGN);
301 buffer_info->skb = skb;
302 buffer_info->dma = pci_map_single(pdev, skb->data,
303 adapter->rx_ps_bsize0,
305 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
306 dev_err(&pdev->dev, "RX DMA map failed\n");
307 adapter->rx_dma_failed++;
309 dev_kfree_skb_any(skb);
310 buffer_info->skb = NULL;
314 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
317 if (i == rx_ring->count)
319 buffer_info = &rx_ring->buffer_info[i];
323 if (rx_ring->next_to_use != i) {
324 rx_ring->next_to_use = i;
327 i = (rx_ring->count - 1);
330 * Force memory writes to complete before letting h/w
331 * know there are new descriptors to fetch. (Only
332 * applicable for weak-ordered memory model archs,
337 * Hardware increments by 16 bytes, but packet split
338 * descriptors are 32 bytes...so we increment tail
341 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
347 * @adapter: address of board private structure
348 * @cleaned_count: number of buffers to allocate this pass
351 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
354 struct net_device *netdev = adapter->netdev;
355 struct pci_dev *pdev = adapter->pdev;
356 struct e1000_rx_desc *rx_desc;
357 struct e1000_ring *rx_ring = adapter->rx_ring;
358 struct e1000_buffer *buffer_info;
361 unsigned int bufsz = 256 -
362 16 /* for skb_reserve */ -
365 i = rx_ring->next_to_use;
366 buffer_info = &rx_ring->buffer_info[i];
368 while (cleaned_count--) {
369 skb = buffer_info->skb;
375 skb = netdev_alloc_skb(netdev, bufsz);
376 if (unlikely(!skb)) {
377 /* Better luck next round */
378 adapter->alloc_rx_buff_failed++;
382 /* Make buffer alignment 2 beyond a 16 byte boundary
383 * this will result in a 16 byte aligned IP header after
384 * the 14 byte MAC header is removed
386 skb_reserve(skb, NET_IP_ALIGN);
388 buffer_info->skb = skb;
390 /* allocate a new page if necessary */
391 if (!buffer_info->page) {
392 buffer_info->page = alloc_page(GFP_ATOMIC);
393 if (unlikely(!buffer_info->page)) {
394 adapter->alloc_rx_buff_failed++;
399 if (!buffer_info->dma)
400 buffer_info->dma = pci_map_page(pdev,
401 buffer_info->page, 0,
405 rx_desc = E1000_RX_DESC(*rx_ring, i);
406 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
408 if (unlikely(++i == rx_ring->count))
410 buffer_info = &rx_ring->buffer_info[i];
413 if (likely(rx_ring->next_to_use != i)) {
414 rx_ring->next_to_use = i;
415 if (unlikely(i-- == 0))
416 i = (rx_ring->count - 1);
418 /* Force memory writes to complete before letting h/w
419 * know there are new descriptors to fetch. (Only
420 * applicable for weak-ordered memory model archs,
423 writel(i, adapter->hw.hw_addr + rx_ring->tail);
428 * e1000_clean_rx_irq - Send received data up the network stack; legacy
429 * @adapter: board private structure
431 * the return value indicates whether actual cleaning was done, there
432 * is no guarantee that everything was cleaned
434 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
435 int *work_done, int work_to_do)
437 struct net_device *netdev = adapter->netdev;
438 struct pci_dev *pdev = adapter->pdev;
439 struct e1000_ring *rx_ring = adapter->rx_ring;
440 struct e1000_rx_desc *rx_desc, *next_rxd;
441 struct e1000_buffer *buffer_info, *next_buffer;
444 int cleaned_count = 0;
446 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
448 i = rx_ring->next_to_clean;
449 rx_desc = E1000_RX_DESC(*rx_ring, i);
450 buffer_info = &rx_ring->buffer_info[i];
452 while (rx_desc->status & E1000_RXD_STAT_DD) {
456 if (*work_done >= work_to_do)
460 status = rx_desc->status;
461 skb = buffer_info->skb;
462 buffer_info->skb = NULL;
464 prefetch(skb->data - NET_IP_ALIGN);
467 if (i == rx_ring->count)
469 next_rxd = E1000_RX_DESC(*rx_ring, i);
472 next_buffer = &rx_ring->buffer_info[i];
476 pci_unmap_single(pdev,
478 adapter->rx_buffer_len,
480 buffer_info->dma = 0;
482 length = le16_to_cpu(rx_desc->length);
484 /* !EOP means multiple descriptors were used to store a single
485 * packet, also make sure the frame isn't just CRC only */
486 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
487 /* All receives must fit into a single buffer */
488 e_dbg("%s: Receive packet consumed multiple buffers\n",
491 buffer_info->skb = skb;
495 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
497 buffer_info->skb = skb;
501 /* adjust length to remove Ethernet CRC */
502 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
505 total_rx_bytes += length;
509 * code added for copybreak, this should improve
510 * performance for small packets with large amounts
511 * of reassembly being done in the stack
513 if (length < copybreak) {
514 struct sk_buff *new_skb =
515 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
517 skb_reserve(new_skb, NET_IP_ALIGN);
518 skb_copy_to_linear_data_offset(new_skb,
524 /* save the skb in buffer_info as good */
525 buffer_info->skb = skb;
528 /* else just continue with the old one */
530 /* end copybreak code */
531 skb_put(skb, length);
533 /* Receive Checksum Offload */
534 e1000_rx_checksum(adapter,
536 ((u32)(rx_desc->errors) << 24),
537 le16_to_cpu(rx_desc->csum), skb);
539 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
544 /* return some buffers to hardware, one at a time is too slow */
545 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
546 adapter->alloc_rx_buf(adapter, cleaned_count);
550 /* use prefetched values */
552 buffer_info = next_buffer;
554 rx_ring->next_to_clean = i;
556 cleaned_count = e1000_desc_unused(rx_ring);
558 adapter->alloc_rx_buf(adapter, cleaned_count);
560 adapter->total_rx_bytes += total_rx_bytes;
561 adapter->total_rx_packets += total_rx_packets;
562 adapter->net_stats.rx_bytes += total_rx_bytes;
563 adapter->net_stats.rx_packets += total_rx_packets;
567 static void e1000_put_txbuf(struct e1000_adapter *adapter,
568 struct e1000_buffer *buffer_info)
570 buffer_info->dma = 0;
571 if (buffer_info->skb) {
572 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
574 dev_kfree_skb_any(buffer_info->skb);
575 buffer_info->skb = NULL;
577 buffer_info->time_stamp = 0;
580 static void e1000_print_tx_hang(struct e1000_adapter *adapter)
582 struct e1000_ring *tx_ring = adapter->tx_ring;
583 unsigned int i = tx_ring->next_to_clean;
584 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
585 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
587 /* detected Tx unit hang */
588 e_err("Detected Tx Unit Hang:\n"
591 " next_to_use <%x>\n"
592 " next_to_clean <%x>\n"
593 "buffer_info[next_to_clean]:\n"
594 " time_stamp <%lx>\n"
595 " next_to_watch <%x>\n"
597 " next_to_watch.status <%x>\n",
598 readl(adapter->hw.hw_addr + tx_ring->head),
599 readl(adapter->hw.hw_addr + tx_ring->tail),
600 tx_ring->next_to_use,
601 tx_ring->next_to_clean,
602 tx_ring->buffer_info[eop].time_stamp,
605 eop_desc->upper.fields.status);
609 * e1000_clean_tx_irq - Reclaim resources after transmit completes
610 * @adapter: board private structure
612 * the return value indicates whether actual cleaning was done, there
613 * is no guarantee that everything was cleaned
615 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
617 struct net_device *netdev = adapter->netdev;
618 struct e1000_hw *hw = &adapter->hw;
619 struct e1000_ring *tx_ring = adapter->tx_ring;
620 struct e1000_tx_desc *tx_desc, *eop_desc;
621 struct e1000_buffer *buffer_info;
623 unsigned int count = 0;
625 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
627 i = tx_ring->next_to_clean;
628 eop = tx_ring->buffer_info[i].next_to_watch;
629 eop_desc = E1000_TX_DESC(*tx_ring, eop);
631 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
632 (count < tx_ring->count)) {
633 for (cleaned = 0; !cleaned; count++) {
634 tx_desc = E1000_TX_DESC(*tx_ring, i);
635 buffer_info = &tx_ring->buffer_info[i];
636 cleaned = (i == eop);
639 struct sk_buff *skb = buffer_info->skb;
640 unsigned int segs, bytecount;
641 segs = skb_shinfo(skb)->gso_segs ?: 1;
642 /* multiply data chunks by size of headers */
643 bytecount = ((segs - 1) * skb_headlen(skb)) +
645 total_tx_packets += segs;
646 total_tx_bytes += bytecount;
649 e1000_put_txbuf(adapter, buffer_info);
650 tx_desc->upper.data = 0;
653 if (i == tx_ring->count)
657 eop = tx_ring->buffer_info[i].next_to_watch;
658 eop_desc = E1000_TX_DESC(*tx_ring, eop);
661 tx_ring->next_to_clean = i;
663 #define TX_WAKE_THRESHOLD 32
664 if (cleaned && netif_carrier_ok(netdev) &&
665 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
666 /* Make sure that anybody stopping the queue after this
667 * sees the new next_to_clean.
671 if (netif_queue_stopped(netdev) &&
672 !(test_bit(__E1000_DOWN, &adapter->state))) {
673 netif_wake_queue(netdev);
674 ++adapter->restart_queue;
678 if (adapter->detect_tx_hung) {
679 /* Detect a transmit hang in hardware, this serializes the
680 * check with the clearing of time_stamp and movement of i */
681 adapter->detect_tx_hung = 0;
682 if (tx_ring->buffer_info[i].time_stamp &&
683 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
684 + (adapter->tx_timeout_factor * HZ))
685 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
686 e1000_print_tx_hang(adapter);
687 netif_stop_queue(netdev);
690 adapter->total_tx_bytes += total_tx_bytes;
691 adapter->total_tx_packets += total_tx_packets;
692 adapter->net_stats.tx_bytes += total_tx_bytes;
693 adapter->net_stats.tx_packets += total_tx_packets;
694 return (count < tx_ring->count);
698 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
699 * @adapter: board private structure
701 * the return value indicates whether actual cleaning was done, there
702 * is no guarantee that everything was cleaned
704 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
705 int *work_done, int work_to_do)
707 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
708 struct net_device *netdev = adapter->netdev;
709 struct pci_dev *pdev = adapter->pdev;
710 struct e1000_ring *rx_ring = adapter->rx_ring;
711 struct e1000_buffer *buffer_info, *next_buffer;
712 struct e1000_ps_page *ps_page;
716 int cleaned_count = 0;
718 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
720 i = rx_ring->next_to_clean;
721 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
722 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
723 buffer_info = &rx_ring->buffer_info[i];
725 while (staterr & E1000_RXD_STAT_DD) {
726 if (*work_done >= work_to_do)
729 skb = buffer_info->skb;
731 /* in the packet split case this is header only */
732 prefetch(skb->data - NET_IP_ALIGN);
735 if (i == rx_ring->count)
737 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
740 next_buffer = &rx_ring->buffer_info[i];
744 pci_unmap_single(pdev, buffer_info->dma,
745 adapter->rx_ps_bsize0,
747 buffer_info->dma = 0;
749 if (!(staterr & E1000_RXD_STAT_EOP)) {
750 e_dbg("%s: Packet Split buffers didn't pick up the "
751 "full packet\n", netdev->name);
752 dev_kfree_skb_irq(skb);
756 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
757 dev_kfree_skb_irq(skb);
761 length = le16_to_cpu(rx_desc->wb.middle.length0);
764 e_dbg("%s: Last part of the packet spanning multiple "
765 "descriptors\n", netdev->name);
766 dev_kfree_skb_irq(skb);
771 skb_put(skb, length);
775 * this looks ugly, but it seems compiler issues make it
776 * more efficient than reusing j
778 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
781 * page alloc/put takes too long and effects small packet
782 * throughput, so unsplit small packets and save the alloc/put
783 * only valid in softirq (napi) context to call kmap_*
785 if (l1 && (l1 <= copybreak) &&
786 ((length + l1) <= adapter->rx_ps_bsize0)) {
789 ps_page = &buffer_info->ps_pages[0];
792 * there is no documentation about how to call
793 * kmap_atomic, so we can't hold the mapping
796 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
797 PAGE_SIZE, PCI_DMA_FROMDEVICE);
798 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
799 memcpy(skb_tail_pointer(skb), vaddr, l1);
800 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
801 pci_dma_sync_single_for_device(pdev, ps_page->dma,
802 PAGE_SIZE, PCI_DMA_FROMDEVICE);
805 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
813 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
814 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
818 ps_page = &buffer_info->ps_pages[j];
819 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
822 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
823 ps_page->page = NULL;
825 skb->data_len += length;
826 skb->truesize += length;
829 /* strip the ethernet crc, problem is we're using pages now so
830 * this whole operation can get a little cpu intensive
832 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
833 pskb_trim(skb, skb->len - 4);
836 total_rx_bytes += skb->len;
839 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
840 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
842 if (rx_desc->wb.upper.header_status &
843 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
844 adapter->rx_hdr_split++;
846 e1000_receive_skb(adapter, netdev, skb,
847 staterr, rx_desc->wb.middle.vlan);
850 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
851 buffer_info->skb = NULL;
853 /* return some buffers to hardware, one at a time is too slow */
854 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
855 adapter->alloc_rx_buf(adapter, cleaned_count);
859 /* use prefetched values */
861 buffer_info = next_buffer;
863 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
865 rx_ring->next_to_clean = i;
867 cleaned_count = e1000_desc_unused(rx_ring);
869 adapter->alloc_rx_buf(adapter, cleaned_count);
871 adapter->total_rx_bytes += total_rx_bytes;
872 adapter->total_rx_packets += total_rx_packets;
873 adapter->net_stats.rx_bytes += total_rx_bytes;
874 adapter->net_stats.rx_packets += total_rx_packets;
879 * e1000_consume_page - helper function
881 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
886 skb->data_len += length;
887 skb->truesize += length;
891 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
892 * @adapter: board private structure
894 * the return value indicates whether actual cleaning was done, there
895 * is no guarantee that everything was cleaned
898 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
899 int *work_done, int work_to_do)
901 struct net_device *netdev = adapter->netdev;
902 struct pci_dev *pdev = adapter->pdev;
903 struct e1000_ring *rx_ring = adapter->rx_ring;
904 struct e1000_rx_desc *rx_desc, *next_rxd;
905 struct e1000_buffer *buffer_info, *next_buffer;
908 int cleaned_count = 0;
909 bool cleaned = false;
910 unsigned int total_rx_bytes=0, total_rx_packets=0;
912 i = rx_ring->next_to_clean;
913 rx_desc = E1000_RX_DESC(*rx_ring, i);
914 buffer_info = &rx_ring->buffer_info[i];
916 while (rx_desc->status & E1000_RXD_STAT_DD) {
920 if (*work_done >= work_to_do)
924 status = rx_desc->status;
925 skb = buffer_info->skb;
926 buffer_info->skb = NULL;
929 if (i == rx_ring->count)
931 next_rxd = E1000_RX_DESC(*rx_ring, i);
934 next_buffer = &rx_ring->buffer_info[i];
938 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
940 buffer_info->dma = 0;
942 length = le16_to_cpu(rx_desc->length);
944 /* errors is only valid for DD + EOP descriptors */
945 if (unlikely((status & E1000_RXD_STAT_EOP) &&
946 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
947 /* recycle both page and skb */
948 buffer_info->skb = skb;
949 /* an error means any chain goes out the window
951 if (rx_ring->rx_skb_top)
952 dev_kfree_skb(rx_ring->rx_skb_top);
953 rx_ring->rx_skb_top = NULL;
957 #define rxtop rx_ring->rx_skb_top
958 if (!(status & E1000_RXD_STAT_EOP)) {
959 /* this descriptor is only the beginning (or middle) */
961 /* this is the beginning of a chain */
963 skb_fill_page_desc(rxtop, 0, buffer_info->page,
966 /* this is the middle of a chain */
967 skb_fill_page_desc(rxtop,
968 skb_shinfo(rxtop)->nr_frags,
969 buffer_info->page, 0, length);
970 /* re-use the skb, only consumed the page */
971 buffer_info->skb = skb;
973 e1000_consume_page(buffer_info, rxtop, length);
977 /* end of the chain */
978 skb_fill_page_desc(rxtop,
979 skb_shinfo(rxtop)->nr_frags,
980 buffer_info->page, 0, length);
981 /* re-use the current skb, we only consumed the
983 buffer_info->skb = skb;
986 e1000_consume_page(buffer_info, skb, length);
988 /* no chain, got EOP, this buf is the packet
989 * copybreak to save the put_page/alloc_page */
990 if (length <= copybreak &&
991 skb_tailroom(skb) >= length) {
993 vaddr = kmap_atomic(buffer_info->page,
994 KM_SKB_DATA_SOFTIRQ);
995 memcpy(skb_tail_pointer(skb), vaddr,
998 KM_SKB_DATA_SOFTIRQ);
999 /* re-use the page, so don't erase
1000 * buffer_info->page */
1001 skb_put(skb, length);
1003 skb_fill_page_desc(skb, 0,
1004 buffer_info->page, 0,
1006 e1000_consume_page(buffer_info, skb,
1012 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1013 e1000_rx_checksum(adapter,
1015 ((u32)(rx_desc->errors) << 24),
1016 le16_to_cpu(rx_desc->csum), skb);
1018 /* probably a little skewed due to removing CRC */
1019 total_rx_bytes += skb->len;
1022 /* eth type trans needs skb->data to point to something */
1023 if (!pskb_may_pull(skb, ETH_HLEN)) {
1024 e_err("pskb_may_pull failed.\n");
1029 e1000_receive_skb(adapter, netdev, skb, status,
1033 rx_desc->status = 0;
1035 /* return some buffers to hardware, one at a time is too slow */
1036 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1037 adapter->alloc_rx_buf(adapter, cleaned_count);
1041 /* use prefetched values */
1043 buffer_info = next_buffer;
1045 rx_ring->next_to_clean = i;
1047 cleaned_count = e1000_desc_unused(rx_ring);
1049 adapter->alloc_rx_buf(adapter, cleaned_count);
1051 adapter->total_rx_bytes += total_rx_bytes;
1052 adapter->total_rx_packets += total_rx_packets;
1053 adapter->net_stats.rx_bytes += total_rx_bytes;
1054 adapter->net_stats.rx_packets += total_rx_packets;
1059 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1060 * @adapter: board private structure
1062 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1064 struct e1000_ring *rx_ring = adapter->rx_ring;
1065 struct e1000_buffer *buffer_info;
1066 struct e1000_ps_page *ps_page;
1067 struct pci_dev *pdev = adapter->pdev;
1070 /* Free all the Rx ring sk_buffs */
1071 for (i = 0; i < rx_ring->count; i++) {
1072 buffer_info = &rx_ring->buffer_info[i];
1073 if (buffer_info->dma) {
1074 if (adapter->clean_rx == e1000_clean_rx_irq)
1075 pci_unmap_single(pdev, buffer_info->dma,
1076 adapter->rx_buffer_len,
1077 PCI_DMA_FROMDEVICE);
1078 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1079 pci_unmap_page(pdev, buffer_info->dma,
1081 PCI_DMA_FROMDEVICE);
1082 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1083 pci_unmap_single(pdev, buffer_info->dma,
1084 adapter->rx_ps_bsize0,
1085 PCI_DMA_FROMDEVICE);
1086 buffer_info->dma = 0;
1089 if (buffer_info->page) {
1090 put_page(buffer_info->page);
1091 buffer_info->page = NULL;
1094 if (buffer_info->skb) {
1095 dev_kfree_skb(buffer_info->skb);
1096 buffer_info->skb = NULL;
1099 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1100 ps_page = &buffer_info->ps_pages[j];
1103 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1104 PCI_DMA_FROMDEVICE);
1106 put_page(ps_page->page);
1107 ps_page->page = NULL;
1111 /* there also may be some cached data from a chained receive */
1112 if (rx_ring->rx_skb_top) {
1113 dev_kfree_skb(rx_ring->rx_skb_top);
1114 rx_ring->rx_skb_top = NULL;
1117 /* Zero out the descriptor ring */
1118 memset(rx_ring->desc, 0, rx_ring->size);
1120 rx_ring->next_to_clean = 0;
1121 rx_ring->next_to_use = 0;
1123 writel(0, adapter->hw.hw_addr + rx_ring->head);
1124 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1127 static void e1000e_downshift_workaround(struct work_struct *work)
1129 struct e1000_adapter *adapter = container_of(work,
1130 struct e1000_adapter, downshift_task);
1132 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1136 * e1000_intr_msi - Interrupt Handler
1137 * @irq: interrupt number
1138 * @data: pointer to a network interface device structure
1140 static irqreturn_t e1000_intr_msi(int irq, void *data)
1142 struct net_device *netdev = data;
1143 struct e1000_adapter *adapter = netdev_priv(netdev);
1144 struct e1000_hw *hw = &adapter->hw;
1145 u32 icr = er32(ICR);
1148 * read ICR disables interrupts using IAM
1151 if (icr & E1000_ICR_LSC) {
1152 hw->mac.get_link_status = 1;
1154 * ICH8 workaround-- Call gig speed drop workaround on cable
1155 * disconnect (LSC) before accessing any PHY registers
1157 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1158 (!(er32(STATUS) & E1000_STATUS_LU)))
1159 schedule_work(&adapter->downshift_task);
1162 * 80003ES2LAN workaround-- For packet buffer work-around on
1163 * link down event; disable receives here in the ISR and reset
1164 * adapter in watchdog
1166 if (netif_carrier_ok(netdev) &&
1167 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1168 /* disable receives */
1169 u32 rctl = er32(RCTL);
1170 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1171 adapter->flags |= FLAG_RX_RESTART_NOW;
1173 /* guard against interrupt when we're going down */
1174 if (!test_bit(__E1000_DOWN, &adapter->state))
1175 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1178 if (napi_schedule_prep(&adapter->napi)) {
1179 adapter->total_tx_bytes = 0;
1180 adapter->total_tx_packets = 0;
1181 adapter->total_rx_bytes = 0;
1182 adapter->total_rx_packets = 0;
1183 __napi_schedule(&adapter->napi);
1190 * e1000_intr - Interrupt Handler
1191 * @irq: interrupt number
1192 * @data: pointer to a network interface device structure
1194 static irqreturn_t e1000_intr(int irq, void *data)
1196 struct net_device *netdev = data;
1197 struct e1000_adapter *adapter = netdev_priv(netdev);
1198 struct e1000_hw *hw = &adapter->hw;
1199 u32 rctl, icr = er32(ICR);
1202 return IRQ_NONE; /* Not our interrupt */
1205 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1206 * not set, then the adapter didn't send an interrupt
1208 if (!(icr & E1000_ICR_INT_ASSERTED))
1212 * Interrupt Auto-Mask...upon reading ICR,
1213 * interrupts are masked. No need for the
1217 if (icr & E1000_ICR_LSC) {
1218 hw->mac.get_link_status = 1;
1220 * ICH8 workaround-- Call gig speed drop workaround on cable
1221 * disconnect (LSC) before accessing any PHY registers
1223 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1224 (!(er32(STATUS) & E1000_STATUS_LU)))
1225 schedule_work(&adapter->downshift_task);
1228 * 80003ES2LAN workaround--
1229 * For packet buffer work-around on link down event;
1230 * disable receives here in the ISR and
1231 * reset adapter in watchdog
1233 if (netif_carrier_ok(netdev) &&
1234 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1235 /* disable receives */
1237 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1238 adapter->flags |= FLAG_RX_RESTART_NOW;
1240 /* guard against interrupt when we're going down */
1241 if (!test_bit(__E1000_DOWN, &adapter->state))
1242 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1245 if (napi_schedule_prep(&adapter->napi)) {
1246 adapter->total_tx_bytes = 0;
1247 adapter->total_tx_packets = 0;
1248 adapter->total_rx_bytes = 0;
1249 adapter->total_rx_packets = 0;
1250 __napi_schedule(&adapter->napi);
1256 static irqreturn_t e1000_msix_other(int irq, void *data)
1258 struct net_device *netdev = data;
1259 struct e1000_adapter *adapter = netdev_priv(netdev);
1260 struct e1000_hw *hw = &adapter->hw;
1261 u32 icr = er32(ICR);
1263 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1264 ew32(IMS, E1000_IMS_OTHER);
1268 if (icr & adapter->eiac_mask)
1269 ew32(ICS, (icr & adapter->eiac_mask));
1271 if (icr & E1000_ICR_OTHER) {
1272 if (!(icr & E1000_ICR_LSC))
1273 goto no_link_interrupt;
1274 hw->mac.get_link_status = 1;
1275 /* guard against interrupt when we're going down */
1276 if (!test_bit(__E1000_DOWN, &adapter->state))
1277 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1281 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1287 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1289 struct net_device *netdev = data;
1290 struct e1000_adapter *adapter = netdev_priv(netdev);
1291 struct e1000_hw *hw = &adapter->hw;
1292 struct e1000_ring *tx_ring = adapter->tx_ring;
1295 adapter->total_tx_bytes = 0;
1296 adapter->total_tx_packets = 0;
1298 if (!e1000_clean_tx_irq(adapter))
1299 /* Ring was not completely cleaned, so fire another interrupt */
1300 ew32(ICS, tx_ring->ims_val);
1305 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1307 struct net_device *netdev = data;
1308 struct e1000_adapter *adapter = netdev_priv(netdev);
1310 /* Write the ITR value calculated at the end of the
1311 * previous interrupt.
1313 if (adapter->rx_ring->set_itr) {
1314 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1315 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1316 adapter->rx_ring->set_itr = 0;
1319 if (napi_schedule_prep(&adapter->napi)) {
1320 adapter->total_rx_bytes = 0;
1321 adapter->total_rx_packets = 0;
1322 __napi_schedule(&adapter->napi);
1328 * e1000_configure_msix - Configure MSI-X hardware
1330 * e1000_configure_msix sets up the hardware to properly
1331 * generate MSI-X interrupts.
1333 static void e1000_configure_msix(struct e1000_adapter *adapter)
1335 struct e1000_hw *hw = &adapter->hw;
1336 struct e1000_ring *rx_ring = adapter->rx_ring;
1337 struct e1000_ring *tx_ring = adapter->tx_ring;
1339 u32 ctrl_ext, ivar = 0;
1341 adapter->eiac_mask = 0;
1343 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1344 if (hw->mac.type == e1000_82574) {
1345 u32 rfctl = er32(RFCTL);
1346 rfctl |= E1000_RFCTL_ACK_DIS;
1350 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1351 /* Configure Rx vector */
1352 rx_ring->ims_val = E1000_IMS_RXQ0;
1353 adapter->eiac_mask |= rx_ring->ims_val;
1354 if (rx_ring->itr_val)
1355 writel(1000000000 / (rx_ring->itr_val * 256),
1356 hw->hw_addr + rx_ring->itr_register);
1358 writel(1, hw->hw_addr + rx_ring->itr_register);
1359 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1361 /* Configure Tx vector */
1362 tx_ring->ims_val = E1000_IMS_TXQ0;
1364 if (tx_ring->itr_val)
1365 writel(1000000000 / (tx_ring->itr_val * 256),
1366 hw->hw_addr + tx_ring->itr_register);
1368 writel(1, hw->hw_addr + tx_ring->itr_register);
1369 adapter->eiac_mask |= tx_ring->ims_val;
1370 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1372 /* set vector for Other Causes, e.g. link changes */
1374 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1375 if (rx_ring->itr_val)
1376 writel(1000000000 / (rx_ring->itr_val * 256),
1377 hw->hw_addr + E1000_EITR_82574(vector));
1379 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1381 /* Cause Tx interrupts on every write back */
1386 /* enable MSI-X PBA support */
1387 ctrl_ext = er32(CTRL_EXT);
1388 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1390 /* Auto-Mask Other interrupts upon ICR read */
1391 #define E1000_EIAC_MASK_82574 0x01F00000
1392 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1393 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1394 ew32(CTRL_EXT, ctrl_ext);
1398 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1400 if (adapter->msix_entries) {
1401 pci_disable_msix(adapter->pdev);
1402 kfree(adapter->msix_entries);
1403 adapter->msix_entries = NULL;
1404 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1405 pci_disable_msi(adapter->pdev);
1406 adapter->flags &= ~FLAG_MSI_ENABLED;
1413 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1415 * Attempt to configure interrupts using the best available
1416 * capabilities of the hardware and kernel.
1418 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1424 switch (adapter->int_mode) {
1425 case E1000E_INT_MODE_MSIX:
1426 if (adapter->flags & FLAG_HAS_MSIX) {
1427 numvecs = 3; /* RxQ0, TxQ0 and other */
1428 adapter->msix_entries = kcalloc(numvecs,
1429 sizeof(struct msix_entry),
1431 if (adapter->msix_entries) {
1432 for (i = 0; i < numvecs; i++)
1433 adapter->msix_entries[i].entry = i;
1435 err = pci_enable_msix(adapter->pdev,
1436 adapter->msix_entries,
1441 /* MSI-X failed, so fall through and try MSI */
1442 e_err("Failed to initialize MSI-X interrupts. "
1443 "Falling back to MSI interrupts.\n");
1444 e1000e_reset_interrupt_capability(adapter);
1446 adapter->int_mode = E1000E_INT_MODE_MSI;
1448 case E1000E_INT_MODE_MSI:
1449 if (!pci_enable_msi(adapter->pdev)) {
1450 adapter->flags |= FLAG_MSI_ENABLED;
1452 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1453 e_err("Failed to initialize MSI interrupts. Falling "
1454 "back to legacy interrupts.\n");
1457 case E1000E_INT_MODE_LEGACY:
1458 /* Don't do anything; this is the system default */
1466 * e1000_request_msix - Initialize MSI-X interrupts
1468 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1471 static int e1000_request_msix(struct e1000_adapter *adapter)
1473 struct net_device *netdev = adapter->netdev;
1474 int err = 0, vector = 0;
1476 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1477 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1479 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1480 err = request_irq(adapter->msix_entries[vector].vector,
1481 &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1485 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1486 adapter->rx_ring->itr_val = adapter->itr;
1489 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1490 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1492 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1493 err = request_irq(adapter->msix_entries[vector].vector,
1494 &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1498 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1499 adapter->tx_ring->itr_val = adapter->itr;
1502 err = request_irq(adapter->msix_entries[vector].vector,
1503 &e1000_msix_other, 0, netdev->name, netdev);
1507 e1000_configure_msix(adapter);
1514 * e1000_request_irq - initialize interrupts
1516 * Attempts to configure interrupts using the best available
1517 * capabilities of the hardware and kernel.
1519 static int e1000_request_irq(struct e1000_adapter *adapter)
1521 struct net_device *netdev = adapter->netdev;
1524 if (adapter->msix_entries) {
1525 err = e1000_request_msix(adapter);
1528 /* fall back to MSI */
1529 e1000e_reset_interrupt_capability(adapter);
1530 adapter->int_mode = E1000E_INT_MODE_MSI;
1531 e1000e_set_interrupt_capability(adapter);
1533 if (adapter->flags & FLAG_MSI_ENABLED) {
1534 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
1535 netdev->name, netdev);
1539 /* fall back to legacy interrupt */
1540 e1000e_reset_interrupt_capability(adapter);
1541 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1544 err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
1545 netdev->name, netdev);
1547 e_err("Unable to allocate interrupt, Error: %d\n", err);
1552 static void e1000_free_irq(struct e1000_adapter *adapter)
1554 struct net_device *netdev = adapter->netdev;
1556 if (adapter->msix_entries) {
1559 free_irq(adapter->msix_entries[vector].vector, netdev);
1562 free_irq(adapter->msix_entries[vector].vector, netdev);
1565 /* Other Causes interrupt vector */
1566 free_irq(adapter->msix_entries[vector].vector, netdev);
1570 free_irq(adapter->pdev->irq, netdev);
1574 * e1000_irq_disable - Mask off interrupt generation on the NIC
1576 static void e1000_irq_disable(struct e1000_adapter *adapter)
1578 struct e1000_hw *hw = &adapter->hw;
1581 if (adapter->msix_entries)
1582 ew32(EIAC_82574, 0);
1584 synchronize_irq(adapter->pdev->irq);
1588 * e1000_irq_enable - Enable default interrupt generation settings
1590 static void e1000_irq_enable(struct e1000_adapter *adapter)
1592 struct e1000_hw *hw = &adapter->hw;
1594 if (adapter->msix_entries) {
1595 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1596 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1598 ew32(IMS, IMS_ENABLE_MASK);
1604 * e1000_get_hw_control - get control of the h/w from f/w
1605 * @adapter: address of board private structure
1607 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1608 * For ASF and Pass Through versions of f/w this means that
1609 * the driver is loaded. For AMT version (only with 82573)
1610 * of the f/w this means that the network i/f is open.
1612 static void e1000_get_hw_control(struct e1000_adapter *adapter)
1614 struct e1000_hw *hw = &adapter->hw;
1618 /* Let firmware know the driver has taken over */
1619 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1621 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1622 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1623 ctrl_ext = er32(CTRL_EXT);
1624 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1629 * e1000_release_hw_control - release control of the h/w to f/w
1630 * @adapter: address of board private structure
1632 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1633 * For ASF and Pass Through versions of f/w this means that the
1634 * driver is no longer loaded. For AMT version (only with 82573) i
1635 * of the f/w this means that the network i/f is closed.
1638 static void e1000_release_hw_control(struct e1000_adapter *adapter)
1640 struct e1000_hw *hw = &adapter->hw;
1644 /* Let firmware taken over control of h/w */
1645 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1647 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1648 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1649 ctrl_ext = er32(CTRL_EXT);
1650 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1655 * @e1000_alloc_ring - allocate memory for a ring structure
1657 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1658 struct e1000_ring *ring)
1660 struct pci_dev *pdev = adapter->pdev;
1662 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1671 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1672 * @adapter: board private structure
1674 * Return 0 on success, negative on failure
1676 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1678 struct e1000_ring *tx_ring = adapter->tx_ring;
1679 int err = -ENOMEM, size;
1681 size = sizeof(struct e1000_buffer) * tx_ring->count;
1682 tx_ring->buffer_info = vmalloc(size);
1683 if (!tx_ring->buffer_info)
1685 memset(tx_ring->buffer_info, 0, size);
1687 /* round up to nearest 4K */
1688 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1689 tx_ring->size = ALIGN(tx_ring->size, 4096);
1691 err = e1000_alloc_ring_dma(adapter, tx_ring);
1695 tx_ring->next_to_use = 0;
1696 tx_ring->next_to_clean = 0;
1700 vfree(tx_ring->buffer_info);
1701 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1706 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1707 * @adapter: board private structure
1709 * Returns 0 on success, negative on failure
1711 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1713 struct e1000_ring *rx_ring = adapter->rx_ring;
1714 struct e1000_buffer *buffer_info;
1715 int i, size, desc_len, err = -ENOMEM;
1717 size = sizeof(struct e1000_buffer) * rx_ring->count;
1718 rx_ring->buffer_info = vmalloc(size);
1719 if (!rx_ring->buffer_info)
1721 memset(rx_ring->buffer_info, 0, size);
1723 for (i = 0; i < rx_ring->count; i++) {
1724 buffer_info = &rx_ring->buffer_info[i];
1725 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1726 sizeof(struct e1000_ps_page),
1728 if (!buffer_info->ps_pages)
1732 desc_len = sizeof(union e1000_rx_desc_packet_split);
1734 /* Round up to nearest 4K */
1735 rx_ring->size = rx_ring->count * desc_len;
1736 rx_ring->size = ALIGN(rx_ring->size, 4096);
1738 err = e1000_alloc_ring_dma(adapter, rx_ring);
1742 rx_ring->next_to_clean = 0;
1743 rx_ring->next_to_use = 0;
1744 rx_ring->rx_skb_top = NULL;
1749 for (i = 0; i < rx_ring->count; i++) {
1750 buffer_info = &rx_ring->buffer_info[i];
1751 kfree(buffer_info->ps_pages);
1754 vfree(rx_ring->buffer_info);
1755 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1760 * e1000_clean_tx_ring - Free Tx Buffers
1761 * @adapter: board private structure
1763 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1765 struct e1000_ring *tx_ring = adapter->tx_ring;
1766 struct e1000_buffer *buffer_info;
1770 for (i = 0; i < tx_ring->count; i++) {
1771 buffer_info = &tx_ring->buffer_info[i];
1772 e1000_put_txbuf(adapter, buffer_info);
1775 size = sizeof(struct e1000_buffer) * tx_ring->count;
1776 memset(tx_ring->buffer_info, 0, size);
1778 memset(tx_ring->desc, 0, tx_ring->size);
1780 tx_ring->next_to_use = 0;
1781 tx_ring->next_to_clean = 0;
1783 writel(0, adapter->hw.hw_addr + tx_ring->head);
1784 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1788 * e1000e_free_tx_resources - Free Tx Resources per Queue
1789 * @adapter: board private structure
1791 * Free all transmit software resources
1793 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1795 struct pci_dev *pdev = adapter->pdev;
1796 struct e1000_ring *tx_ring = adapter->tx_ring;
1798 e1000_clean_tx_ring(adapter);
1800 vfree(tx_ring->buffer_info);
1801 tx_ring->buffer_info = NULL;
1803 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1805 tx_ring->desc = NULL;
1809 * e1000e_free_rx_resources - Free Rx Resources
1810 * @adapter: board private structure
1812 * Free all receive software resources
1815 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1817 struct pci_dev *pdev = adapter->pdev;
1818 struct e1000_ring *rx_ring = adapter->rx_ring;
1821 e1000_clean_rx_ring(adapter);
1823 for (i = 0; i < rx_ring->count; i++) {
1824 kfree(rx_ring->buffer_info[i].ps_pages);
1827 vfree(rx_ring->buffer_info);
1828 rx_ring->buffer_info = NULL;
1830 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1832 rx_ring->desc = NULL;
1836 * e1000_update_itr - update the dynamic ITR value based on statistics
1837 * @adapter: pointer to adapter
1838 * @itr_setting: current adapter->itr
1839 * @packets: the number of packets during this measurement interval
1840 * @bytes: the number of bytes during this measurement interval
1842 * Stores a new ITR value based on packets and byte
1843 * counts during the last interrupt. The advantage of per interrupt
1844 * computation is faster updates and more accurate ITR for the current
1845 * traffic pattern. Constants in this function were computed
1846 * based on theoretical maximum wire speed and thresholds were set based
1847 * on testing data as well as attempting to minimize response time
1848 * while increasing bulk throughput. This functionality is controlled
1849 * by the InterruptThrottleRate module parameter.
1851 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1852 u16 itr_setting, int packets,
1855 unsigned int retval = itr_setting;
1858 goto update_itr_done;
1860 switch (itr_setting) {
1861 case lowest_latency:
1862 /* handle TSO and jumbo frames */
1863 if (bytes/packets > 8000)
1864 retval = bulk_latency;
1865 else if ((packets < 5) && (bytes > 512)) {
1866 retval = low_latency;
1869 case low_latency: /* 50 usec aka 20000 ints/s */
1870 if (bytes > 10000) {
1871 /* this if handles the TSO accounting */
1872 if (bytes/packets > 8000) {
1873 retval = bulk_latency;
1874 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
1875 retval = bulk_latency;
1876 } else if ((packets > 35)) {
1877 retval = lowest_latency;
1879 } else if (bytes/packets > 2000) {
1880 retval = bulk_latency;
1881 } else if (packets <= 2 && bytes < 512) {
1882 retval = lowest_latency;
1885 case bulk_latency: /* 250 usec aka 4000 ints/s */
1886 if (bytes > 25000) {
1888 retval = low_latency;
1890 } else if (bytes < 6000) {
1891 retval = low_latency;
1900 static void e1000_set_itr(struct e1000_adapter *adapter)
1902 struct e1000_hw *hw = &adapter->hw;
1904 u32 new_itr = adapter->itr;
1906 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1907 if (adapter->link_speed != SPEED_1000) {
1913 adapter->tx_itr = e1000_update_itr(adapter,
1915 adapter->total_tx_packets,
1916 adapter->total_tx_bytes);
1917 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1918 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1919 adapter->tx_itr = low_latency;
1921 adapter->rx_itr = e1000_update_itr(adapter,
1923 adapter->total_rx_packets,
1924 adapter->total_rx_bytes);
1925 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1926 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1927 adapter->rx_itr = low_latency;
1929 current_itr = max(adapter->rx_itr, adapter->tx_itr);
1931 switch (current_itr) {
1932 /* counts and packets in update_itr are dependent on these numbers */
1933 case lowest_latency:
1937 new_itr = 20000; /* aka hwitr = ~200 */
1947 if (new_itr != adapter->itr) {
1949 * this attempts to bias the interrupt rate towards Bulk
1950 * by adding intermediate steps when interrupt rate is
1953 new_itr = new_itr > adapter->itr ?
1954 min(adapter->itr + (new_itr >> 2), new_itr) :
1956 adapter->itr = new_itr;
1957 adapter->rx_ring->itr_val = new_itr;
1958 if (adapter->msix_entries)
1959 adapter->rx_ring->set_itr = 1;
1961 ew32(ITR, 1000000000 / (new_itr * 256));
1966 * e1000_alloc_queues - Allocate memory for all rings
1967 * @adapter: board private structure to initialize
1969 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1971 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1972 if (!adapter->tx_ring)
1975 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1976 if (!adapter->rx_ring)
1981 e_err("Unable to allocate memory for queues\n");
1982 kfree(adapter->rx_ring);
1983 kfree(adapter->tx_ring);
1988 * e1000_clean - NAPI Rx polling callback
1989 * @napi: struct associated with this polling callback
1990 * @budget: amount of packets driver is allowed to process this poll
1992 static int e1000_clean(struct napi_struct *napi, int budget)
1994 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1995 struct e1000_hw *hw = &adapter->hw;
1996 struct net_device *poll_dev = adapter->netdev;
1997 int tx_cleaned = 0, work_done = 0;
1999 adapter = netdev_priv(poll_dev);
2001 if (adapter->msix_entries &&
2002 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2005 tx_cleaned = e1000_clean_tx_irq(adapter);
2008 adapter->clean_rx(adapter, &work_done, budget);
2013 /* If budget not fully consumed, exit the polling mode */
2014 if (work_done < budget) {
2015 if (adapter->itr_setting & 3)
2016 e1000_set_itr(adapter);
2017 napi_complete(napi);
2018 if (adapter->msix_entries)
2019 ew32(IMS, adapter->rx_ring->ims_val);
2021 e1000_irq_enable(adapter);
2027 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2029 struct e1000_adapter *adapter = netdev_priv(netdev);
2030 struct e1000_hw *hw = &adapter->hw;
2033 /* don't update vlan cookie if already programmed */
2034 if ((adapter->hw.mng_cookie.status &
2035 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2036 (vid == adapter->mng_vlan_id))
2038 /* add VID to filter table */
2039 index = (vid >> 5) & 0x7F;
2040 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2041 vfta |= (1 << (vid & 0x1F));
2042 e1000e_write_vfta(hw, index, vfta);
2045 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2047 struct e1000_adapter *adapter = netdev_priv(netdev);
2048 struct e1000_hw *hw = &adapter->hw;
2051 if (!test_bit(__E1000_DOWN, &adapter->state))
2052 e1000_irq_disable(adapter);
2053 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2055 if (!test_bit(__E1000_DOWN, &adapter->state))
2056 e1000_irq_enable(adapter);
2058 if ((adapter->hw.mng_cookie.status &
2059 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2060 (vid == adapter->mng_vlan_id)) {
2061 /* release control to f/w */
2062 e1000_release_hw_control(adapter);
2066 /* remove VID from filter table */
2067 index = (vid >> 5) & 0x7F;
2068 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2069 vfta &= ~(1 << (vid & 0x1F));
2070 e1000e_write_vfta(hw, index, vfta);
2073 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2075 struct net_device *netdev = adapter->netdev;
2076 u16 vid = adapter->hw.mng_cookie.vlan_id;
2077 u16 old_vid = adapter->mng_vlan_id;
2079 if (!adapter->vlgrp)
2082 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2083 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2084 if (adapter->hw.mng_cookie.status &
2085 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2086 e1000_vlan_rx_add_vid(netdev, vid);
2087 adapter->mng_vlan_id = vid;
2090 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2092 !vlan_group_get_device(adapter->vlgrp, old_vid))
2093 e1000_vlan_rx_kill_vid(netdev, old_vid);
2095 adapter->mng_vlan_id = vid;
2100 static void e1000_vlan_rx_register(struct net_device *netdev,
2101 struct vlan_group *grp)
2103 struct e1000_adapter *adapter = netdev_priv(netdev);
2104 struct e1000_hw *hw = &adapter->hw;
2107 if (!test_bit(__E1000_DOWN, &adapter->state))
2108 e1000_irq_disable(adapter);
2109 adapter->vlgrp = grp;
2112 /* enable VLAN tag insert/strip */
2114 ctrl |= E1000_CTRL_VME;
2117 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2118 /* enable VLAN receive filtering */
2120 rctl &= ~E1000_RCTL_CFIEN;
2122 e1000_update_mng_vlan(adapter);
2125 /* disable VLAN tag insert/strip */
2127 ctrl &= ~E1000_CTRL_VME;
2130 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2131 if (adapter->mng_vlan_id !=
2132 (u16)E1000_MNG_VLAN_NONE) {
2133 e1000_vlan_rx_kill_vid(netdev,
2134 adapter->mng_vlan_id);
2135 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2140 if (!test_bit(__E1000_DOWN, &adapter->state))
2141 e1000_irq_enable(adapter);
2144 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2148 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2150 if (!adapter->vlgrp)
2153 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2154 if (!vlan_group_get_device(adapter->vlgrp, vid))
2156 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2160 static void e1000_init_manageability(struct e1000_adapter *adapter)
2162 struct e1000_hw *hw = &adapter->hw;
2165 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2171 * enable receiving management packets to the host. this will probably
2172 * generate destination unreachable messages from the host OS, but
2173 * the packets will be handled on SMBUS
2175 manc |= E1000_MANC_EN_MNG2HOST;
2176 manc2h = er32(MANC2H);
2177 #define E1000_MNG2HOST_PORT_623 (1 << 5)
2178 #define E1000_MNG2HOST_PORT_664 (1 << 6)
2179 manc2h |= E1000_MNG2HOST_PORT_623;
2180 manc2h |= E1000_MNG2HOST_PORT_664;
2181 ew32(MANC2H, manc2h);
2186 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2187 * @adapter: board private structure
2189 * Configure the Tx unit of the MAC after a reset.
2191 static void e1000_configure_tx(struct e1000_adapter *adapter)
2193 struct e1000_hw *hw = &adapter->hw;
2194 struct e1000_ring *tx_ring = adapter->tx_ring;
2196 u32 tdlen, tctl, tipg, tarc;
2199 /* Setup the HW Tx Head and Tail descriptor pointers */
2200 tdba = tx_ring->dma;
2201 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2202 ew32(TDBAL, (tdba & DMA_32BIT_MASK));
2203 ew32(TDBAH, (tdba >> 32));
2207 tx_ring->head = E1000_TDH;
2208 tx_ring->tail = E1000_TDT;
2210 /* Set the default values for the Tx Inter Packet Gap timer */
2211 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2212 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2213 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2215 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2216 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2218 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2219 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2222 /* Set the Tx Interrupt Delay register */
2223 ew32(TIDV, adapter->tx_int_delay);
2224 /* Tx irq moderation */
2225 ew32(TADV, adapter->tx_abs_int_delay);
2227 /* Program the Transmit Control Register */
2229 tctl &= ~E1000_TCTL_CT;
2230 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2231 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2233 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2234 tarc = er32(TARC(0));
2236 * set the speed mode bit, we'll clear it if we're not at
2237 * gigabit link later
2239 #define SPEED_MODE_BIT (1 << 21)
2240 tarc |= SPEED_MODE_BIT;
2241 ew32(TARC(0), tarc);
2244 /* errata: program both queues to unweighted RR */
2245 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2246 tarc = er32(TARC(0));
2248 ew32(TARC(0), tarc);
2249 tarc = er32(TARC(1));
2251 ew32(TARC(1), tarc);
2254 e1000e_config_collision_dist(hw);
2256 /* Setup Transmit Descriptor Settings for eop descriptor */
2257 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2259 /* only set IDE if we are delaying interrupts using the timers */
2260 if (adapter->tx_int_delay)
2261 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2263 /* enable Report Status bit */
2264 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2268 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
2272 * e1000_setup_rctl - configure the receive control registers
2273 * @adapter: Board private structure
2275 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2276 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2277 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2279 struct e1000_hw *hw = &adapter->hw;
2284 /* Program MC offset vector base */
2286 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2287 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2288 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2289 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2291 /* Do not Store bad packets */
2292 rctl &= ~E1000_RCTL_SBP;
2294 /* Enable Long Packet receive */
2295 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2296 rctl &= ~E1000_RCTL_LPE;
2298 rctl |= E1000_RCTL_LPE;
2300 /* Some systems expect that the CRC is included in SMBUS traffic. The
2301 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2302 * host memory when this is enabled
2304 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2305 rctl |= E1000_RCTL_SECRC;
2307 /* Setup buffer sizes */
2308 rctl &= ~E1000_RCTL_SZ_4096;
2309 rctl |= E1000_RCTL_BSEX;
2310 switch (adapter->rx_buffer_len) {
2312 rctl |= E1000_RCTL_SZ_256;
2313 rctl &= ~E1000_RCTL_BSEX;
2316 rctl |= E1000_RCTL_SZ_512;
2317 rctl &= ~E1000_RCTL_BSEX;
2320 rctl |= E1000_RCTL_SZ_1024;
2321 rctl &= ~E1000_RCTL_BSEX;
2325 rctl |= E1000_RCTL_SZ_2048;
2326 rctl &= ~E1000_RCTL_BSEX;
2329 rctl |= E1000_RCTL_SZ_4096;
2332 rctl |= E1000_RCTL_SZ_8192;
2335 rctl |= E1000_RCTL_SZ_16384;
2340 * 82571 and greater support packet-split where the protocol
2341 * header is placed in skb->data and the packet data is
2342 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2343 * In the case of a non-split, skb->data is linearly filled,
2344 * followed by the page buffers. Therefore, skb->data is
2345 * sized to hold the largest protocol header.
2347 * allocations using alloc_page take too long for regular MTU
2348 * so only enable packet split for jumbo frames
2350 * Using pages when the page size is greater than 16k wastes
2351 * a lot of memory, since we allocate 3 pages at all times
2354 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2355 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2356 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2357 adapter->rx_ps_pages = pages;
2359 adapter->rx_ps_pages = 0;
2361 if (adapter->rx_ps_pages) {
2362 /* Configure extra packet-split registers */
2363 rfctl = er32(RFCTL);
2364 rfctl |= E1000_RFCTL_EXTEN;
2366 * disable packet split support for IPv6 extension headers,
2367 * because some malformed IPv6 headers can hang the Rx
2369 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2370 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2374 /* Enable Packet split descriptors */
2375 rctl |= E1000_RCTL_DTYP_PS;
2377 psrctl |= adapter->rx_ps_bsize0 >>
2378 E1000_PSRCTL_BSIZE0_SHIFT;
2380 switch (adapter->rx_ps_pages) {
2382 psrctl |= PAGE_SIZE <<
2383 E1000_PSRCTL_BSIZE3_SHIFT;
2385 psrctl |= PAGE_SIZE <<
2386 E1000_PSRCTL_BSIZE2_SHIFT;
2388 psrctl |= PAGE_SIZE >>
2389 E1000_PSRCTL_BSIZE1_SHIFT;
2393 ew32(PSRCTL, psrctl);
2397 /* just started the receive unit, no need to restart */
2398 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2402 * e1000_configure_rx - Configure Receive Unit after Reset
2403 * @adapter: board private structure
2405 * Configure the Rx unit of the MAC after a reset.
2407 static void e1000_configure_rx(struct e1000_adapter *adapter)
2409 struct e1000_hw *hw = &adapter->hw;
2410 struct e1000_ring *rx_ring = adapter->rx_ring;
2412 u32 rdlen, rctl, rxcsum, ctrl_ext;
2414 if (adapter->rx_ps_pages) {
2415 /* this is a 32 byte descriptor */
2416 rdlen = rx_ring->count *
2417 sizeof(union e1000_rx_desc_packet_split);
2418 adapter->clean_rx = e1000_clean_rx_irq_ps;
2419 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2420 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2421 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2422 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2423 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2425 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2426 adapter->clean_rx = e1000_clean_rx_irq;
2427 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2430 /* disable receives while setting up the descriptors */
2432 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2436 /* set the Receive Delay Timer Register */
2437 ew32(RDTR, adapter->rx_int_delay);
2439 /* irq moderation */
2440 ew32(RADV, adapter->rx_abs_int_delay);
2441 if (adapter->itr_setting != 0)
2442 ew32(ITR, 1000000000 / (adapter->itr * 256));
2444 ctrl_ext = er32(CTRL_EXT);
2445 /* Reset delay timers after every interrupt */
2446 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2447 /* Auto-Mask interrupts upon ICR access */
2448 ctrl_ext |= E1000_CTRL_EXT_IAME;
2449 ew32(IAM, 0xffffffff);
2450 ew32(CTRL_EXT, ctrl_ext);
2454 * Setup the HW Rx Head and Tail Descriptor Pointers and
2455 * the Base and Length of the Rx Descriptor Ring
2457 rdba = rx_ring->dma;
2458 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
2459 ew32(RDBAH, (rdba >> 32));
2463 rx_ring->head = E1000_RDH;
2464 rx_ring->tail = E1000_RDT;
2466 /* Enable Receive Checksum Offload for TCP and UDP */
2467 rxcsum = er32(RXCSUM);
2468 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2469 rxcsum |= E1000_RXCSUM_TUOFL;
2472 * IPv4 payload checksum for UDP fragments must be
2473 * used in conjunction with packet-split.
2475 if (adapter->rx_ps_pages)
2476 rxcsum |= E1000_RXCSUM_IPPCSE;
2478 rxcsum &= ~E1000_RXCSUM_TUOFL;
2479 /* no need to clear IPPCSE as it defaults to 0 */
2481 ew32(RXCSUM, rxcsum);
2484 * Enable early receives on supported devices, only takes effect when
2485 * packet size is equal or larger than the specified value (in 8 byte
2486 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2488 if ((adapter->flags & FLAG_HAS_ERT) &&
2489 (adapter->netdev->mtu > ETH_DATA_LEN)) {
2490 u32 rxdctl = er32(RXDCTL(0));
2491 ew32(RXDCTL(0), rxdctl | 0x3);
2492 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2494 * With jumbo frames and early-receive enabled, excessive
2495 * C4->C2 latencies result in dropped transactions.
2497 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2498 e1000e_driver_name, 55);
2500 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2502 PM_QOS_DEFAULT_VALUE);
2505 /* Enable Receives */
2510 * e1000_update_mc_addr_list - Update Multicast addresses
2511 * @hw: pointer to the HW structure
2512 * @mc_addr_list: array of multicast addresses to program
2513 * @mc_addr_count: number of multicast addresses to program
2514 * @rar_used_count: the first RAR register free to program
2515 * @rar_count: total number of supported Receive Address Registers
2517 * Updates the Receive Address Registers and Multicast Table Array.
2518 * The caller must have a packed mc_addr_list of multicast addresses.
2519 * The parameter rar_count will usually be hw->mac.rar_entry_count
2520 * unless there are workarounds that change this. Currently no func pointer
2521 * exists and all implementations are handled in the generic version of this
2524 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2525 u32 mc_addr_count, u32 rar_used_count,
2528 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2529 rar_used_count, rar_count);
2533 * e1000_set_multi - Multicast and Promiscuous mode set
2534 * @netdev: network interface device structure
2536 * The set_multi entry point is called whenever the multicast address
2537 * list or the network interface flags are updated. This routine is
2538 * responsible for configuring the hardware for proper multicast,
2539 * promiscuous mode, and all-multi behavior.
2541 static void e1000_set_multi(struct net_device *netdev)
2543 struct e1000_adapter *adapter = netdev_priv(netdev);
2544 struct e1000_hw *hw = &adapter->hw;
2545 struct e1000_mac_info *mac = &hw->mac;
2546 struct dev_mc_list *mc_ptr;
2551 /* Check for Promiscuous and All Multicast modes */
2555 if (netdev->flags & IFF_PROMISC) {
2556 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2557 rctl &= ~E1000_RCTL_VFE;
2559 if (netdev->flags & IFF_ALLMULTI) {
2560 rctl |= E1000_RCTL_MPE;
2561 rctl &= ~E1000_RCTL_UPE;
2563 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2565 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
2566 rctl |= E1000_RCTL_VFE;
2571 if (netdev->mc_count) {
2572 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
2576 /* prepare a packed array of only addresses. */
2577 mc_ptr = netdev->mc_list;
2579 for (i = 0; i < netdev->mc_count; i++) {
2582 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2584 mc_ptr = mc_ptr->next;
2587 e1000_update_mc_addr_list(hw, mta_list, i, 1,
2588 mac->rar_entry_count);
2592 * if we're called from probe, we might not have
2593 * anything to do here, so clear out the list
2595 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
2600 * e1000_configure - configure the hardware for Rx and Tx
2601 * @adapter: private board structure
2603 static void e1000_configure(struct e1000_adapter *adapter)
2605 e1000_set_multi(adapter->netdev);
2607 e1000_restore_vlan(adapter);
2608 e1000_init_manageability(adapter);
2610 e1000_configure_tx(adapter);
2611 e1000_setup_rctl(adapter);
2612 e1000_configure_rx(adapter);
2613 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
2617 * e1000e_power_up_phy - restore link in case the phy was powered down
2618 * @adapter: address of board private structure
2620 * The phy may be powered down to save power and turn off link when the
2621 * driver is unloaded and wake on lan is not enabled (among others)
2622 * *** this routine MUST be followed by a call to e1000e_reset ***
2624 void e1000e_power_up_phy(struct e1000_adapter *adapter)
2628 /* Just clear the power down bit to wake the phy back up */
2629 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
2631 * According to the manual, the phy will retain its
2632 * settings across a power-down/up cycle
2634 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
2635 mii_reg &= ~MII_CR_POWER_DOWN;
2636 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
2639 adapter->hw.mac.ops.setup_link(&adapter->hw);
2643 * e1000_power_down_phy - Power down the PHY
2645 * Power down the PHY so no link is implied when interface is down
2646 * The PHY cannot be powered down is management or WoL is active
2648 static void e1000_power_down_phy(struct e1000_adapter *adapter)
2650 struct e1000_hw *hw = &adapter->hw;
2653 /* WoL is enabled */
2657 /* non-copper PHY? */
2658 if (adapter->hw.phy.media_type != e1000_media_type_copper)
2661 /* reset is blocked because of a SoL/IDER session */
2662 if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
2665 /* manageability (AMT) is enabled */
2666 if (er32(MANC) & E1000_MANC_SMBUS_EN)
2669 /* power down the PHY */
2670 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2671 mii_reg |= MII_CR_POWER_DOWN;
2672 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2677 * e1000e_reset - bring the hardware into a known good state
2679 * This function boots the hardware and enables some settings that
2680 * require a configuration cycle of the hardware - those cannot be
2681 * set/changed during runtime. After reset the device needs to be
2682 * properly configured for Rx, Tx etc.
2684 void e1000e_reset(struct e1000_adapter *adapter)
2686 struct e1000_mac_info *mac = &adapter->hw.mac;
2687 struct e1000_fc_info *fc = &adapter->hw.fc;
2688 struct e1000_hw *hw = &adapter->hw;
2689 u32 tx_space, min_tx_space, min_rx_space;
2690 u32 pba = adapter->pba;
2693 /* reset Packet Buffer Allocation to default */
2696 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
2698 * To maintain wire speed transmits, the Tx FIFO should be
2699 * large enough to accommodate two full transmit packets,
2700 * rounded up to the next 1KB and expressed in KB. Likewise,
2701 * the Rx FIFO should be large enough to accommodate at least
2702 * one full receive packet and is similarly rounded up and
2706 /* upper 16 bits has Tx packet buffer allocation size in KB */
2707 tx_space = pba >> 16;
2708 /* lower 16 bits has Rx packet buffer allocation size in KB */
2711 * the Tx fifo also stores 16 bytes of information about the tx
2712 * but don't include ethernet FCS because hardware appends it
2714 min_tx_space = (adapter->max_frame_size +
2715 sizeof(struct e1000_tx_desc) -
2717 min_tx_space = ALIGN(min_tx_space, 1024);
2718 min_tx_space >>= 10;
2719 /* software strips receive CRC, so leave room for it */
2720 min_rx_space = adapter->max_frame_size;
2721 min_rx_space = ALIGN(min_rx_space, 1024);
2722 min_rx_space >>= 10;
2725 * If current Tx allocation is less than the min Tx FIFO size,
2726 * and the min Tx FIFO size is less than the current Rx FIFO
2727 * allocation, take space away from current Rx allocation
2729 if ((tx_space < min_tx_space) &&
2730 ((min_tx_space - tx_space) < pba)) {
2731 pba -= min_tx_space - tx_space;
2734 * if short on Rx space, Rx wins and must trump tx
2735 * adjustment or use Early Receive if available
2737 if ((pba < min_rx_space) &&
2738 (!(adapter->flags & FLAG_HAS_ERT)))
2739 /* ERT enabled in e1000_configure_rx */
2748 * flow control settings
2750 * The high water mark must be low enough to fit one full frame
2751 * (or the size used for early receive) above it in the Rx FIFO.
2752 * Set it to the lower of:
2753 * - 90% of the Rx FIFO size, and
2754 * - the full Rx FIFO size minus the early receive size (for parts
2755 * with ERT support assuming ERT set to E1000_ERT_2048), or
2756 * - the full Rx FIFO size minus one full frame
2758 if (adapter->flags & FLAG_HAS_ERT)
2759 hwm = min(((pba << 10) * 9 / 10),
2760 ((pba << 10) - (E1000_ERT_2048 << 3)));
2762 hwm = min(((pba << 10) * 9 / 10),
2763 ((pba << 10) - adapter->max_frame_size));
2765 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
2766 fc->low_water = fc->high_water - 8;
2768 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2769 fc->pause_time = 0xFFFF;
2771 fc->pause_time = E1000_FC_PAUSE_TIME;
2773 fc->current_mode = fc->requested_mode;
2775 /* Allow time for pending master requests to run */
2776 mac->ops.reset_hw(hw);
2779 * For parts with AMT enabled, let the firmware know
2780 * that the network interface is in control
2782 if (adapter->flags & FLAG_HAS_AMT)
2783 e1000_get_hw_control(adapter);
2787 if (mac->ops.init_hw(hw))
2788 e_err("Hardware Error\n");
2790 e1000_update_mng_vlan(adapter);
2792 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2793 ew32(VET, ETH_P_8021Q);
2795 e1000e_reset_adaptive(hw);
2796 e1000_get_phy_info(hw);
2798 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2801 * speed up time to link by disabling smart power down, ignore
2802 * the return value of this function because there is nothing
2803 * different we would do if it failed
2805 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2806 phy_data &= ~IGP02E1000_PM_SPD;
2807 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2811 int e1000e_up(struct e1000_adapter *adapter)
2813 struct e1000_hw *hw = &adapter->hw;
2815 /* hardware has been reset, we need to reload some things */
2816 e1000_configure(adapter);
2818 clear_bit(__E1000_DOWN, &adapter->state);
2820 napi_enable(&adapter->napi);
2821 if (adapter->msix_entries)
2822 e1000_configure_msix(adapter);
2823 e1000_irq_enable(adapter);
2825 /* fire a link change interrupt to start the watchdog */
2826 ew32(ICS, E1000_ICS_LSC);
2830 void e1000e_down(struct e1000_adapter *adapter)
2832 struct net_device *netdev = adapter->netdev;
2833 struct e1000_hw *hw = &adapter->hw;
2837 * signal that we're down so the interrupt handler does not
2838 * reschedule our watchdog timer
2840 set_bit(__E1000_DOWN, &adapter->state);
2842 /* disable receives in the hardware */
2844 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2845 /* flush and sleep below */
2847 netif_tx_stop_all_queues(netdev);
2849 /* disable transmits in the hardware */
2851 tctl &= ~E1000_TCTL_EN;
2853 /* flush both disables and wait for them to finish */
2857 napi_disable(&adapter->napi);
2858 e1000_irq_disable(adapter);
2860 del_timer_sync(&adapter->watchdog_timer);
2861 del_timer_sync(&adapter->phy_info_timer);
2863 netdev->tx_queue_len = adapter->tx_queue_len;
2864 netif_carrier_off(netdev);
2865 adapter->link_speed = 0;
2866 adapter->link_duplex = 0;
2868 if (!pci_channel_offline(adapter->pdev))
2869 e1000e_reset(adapter);
2870 e1000_clean_tx_ring(adapter);
2871 e1000_clean_rx_ring(adapter);
2874 * TODO: for power management, we could drop the link and
2875 * pci_disable_device here.
2879 void e1000e_reinit_locked(struct e1000_adapter *adapter)
2882 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2884 e1000e_down(adapter);
2886 clear_bit(__E1000_RESETTING, &adapter->state);
2890 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2891 * @adapter: board private structure to initialize
2893 * e1000_sw_init initializes the Adapter private data structure.
2894 * Fields are initialized based on PCI device information and
2895 * OS network device settings (MTU size).
2897 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2899 struct net_device *netdev = adapter->netdev;
2901 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2902 adapter->rx_ps_bsize0 = 128;
2903 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2904 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2906 e1000e_set_interrupt_capability(adapter);
2908 if (e1000_alloc_queues(adapter))
2911 /* Explicitly disable IRQ since the NIC can be in any state. */
2912 e1000_irq_disable(adapter);
2914 set_bit(__E1000_DOWN, &adapter->state);
2919 * e1000_intr_msi_test - Interrupt Handler
2920 * @irq: interrupt number
2921 * @data: pointer to a network interface device structure
2923 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2925 struct net_device *netdev = data;
2926 struct e1000_adapter *adapter = netdev_priv(netdev);
2927 struct e1000_hw *hw = &adapter->hw;
2928 u32 icr = er32(ICR);
2930 e_dbg("%s: icr is %08X\n", netdev->name, icr);
2931 if (icr & E1000_ICR_RXSEQ) {
2932 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2940 * e1000_test_msi_interrupt - Returns 0 for successful test
2941 * @adapter: board private struct
2943 * code flow taken from tg3.c
2945 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2947 struct net_device *netdev = adapter->netdev;
2948 struct e1000_hw *hw = &adapter->hw;
2951 /* poll_enable hasn't been called yet, so don't need disable */
2952 /* clear any pending events */
2955 /* free the real vector and request a test handler */
2956 e1000_free_irq(adapter);
2957 e1000e_reset_interrupt_capability(adapter);
2959 /* Assume that the test fails, if it succeeds then the test
2960 * MSI irq handler will unset this flag */
2961 adapter->flags |= FLAG_MSI_TEST_FAILED;
2963 err = pci_enable_msi(adapter->pdev);
2965 goto msi_test_failed;
2967 err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
2968 netdev->name, netdev);
2970 pci_disable_msi(adapter->pdev);
2971 goto msi_test_failed;
2976 e1000_irq_enable(adapter);
2978 /* fire an unusual interrupt on the test handler */
2979 ew32(ICS, E1000_ICS_RXSEQ);
2983 e1000_irq_disable(adapter);
2987 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
2988 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2990 e_info("MSI interrupt test failed!\n");
2993 free_irq(adapter->pdev->irq, netdev);
2994 pci_disable_msi(adapter->pdev);
2997 goto msi_test_failed;
2999 /* okay so the test worked, restore settings */
3000 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
3002 e1000e_set_interrupt_capability(adapter);
3003 e1000_request_irq(adapter);
3008 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3009 * @adapter: board private struct
3011 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3013 static int e1000_test_msi(struct e1000_adapter *adapter)
3018 if (!(adapter->flags & FLAG_MSI_ENABLED))
3021 /* disable SERR in case the MSI write causes a master abort */
3022 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3023 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3024 pci_cmd & ~PCI_COMMAND_SERR);
3026 err = e1000_test_msi_interrupt(adapter);
3028 /* restore previous setting of command word */
3029 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3035 /* EIO means MSI test failed */
3039 /* back to INTx mode */
3040 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3042 e1000_free_irq(adapter);
3044 err = e1000_request_irq(adapter);
3050 * e1000_open - Called when a network interface is made active
3051 * @netdev: network interface device structure
3053 * Returns 0 on success, negative value on failure
3055 * The open entry point is called when a network interface is made
3056 * active by the system (IFF_UP). At this point all resources needed
3057 * for transmit and receive operations are allocated, the interrupt
3058 * handler is registered with the OS, the watchdog timer is started,
3059 * and the stack is notified that the interface is ready.
3061 static int e1000_open(struct net_device *netdev)
3063 struct e1000_adapter *adapter = netdev_priv(netdev);
3064 struct e1000_hw *hw = &adapter->hw;
3067 /* disallow open during test */
3068 if (test_bit(__E1000_TESTING, &adapter->state))
3071 /* allocate transmit descriptors */
3072 err = e1000e_setup_tx_resources(adapter);
3076 /* allocate receive descriptors */
3077 err = e1000e_setup_rx_resources(adapter);
3081 e1000e_power_up_phy(adapter);
3083 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3084 if ((adapter->hw.mng_cookie.status &
3085 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3086 e1000_update_mng_vlan(adapter);
3089 * If AMT is enabled, let the firmware know that the network
3090 * interface is now open
3092 if (adapter->flags & FLAG_HAS_AMT)
3093 e1000_get_hw_control(adapter);
3096 * before we allocate an interrupt, we must be ready to handle it.
3097 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3098 * as soon as we call pci_request_irq, so we have to setup our
3099 * clean_rx handler before we do so.
3101 e1000_configure(adapter);
3103 err = e1000_request_irq(adapter);
3108 * Work around PCIe errata with MSI interrupts causing some chipsets to
3109 * ignore e1000e MSI messages, which means we need to test our MSI
3112 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3113 err = e1000_test_msi(adapter);
3115 e_err("Interrupt allocation failed\n");
3120 /* From here on the code is the same as e1000e_up() */
3121 clear_bit(__E1000_DOWN, &adapter->state);
3123 napi_enable(&adapter->napi);
3125 e1000_irq_enable(adapter);
3127 netif_tx_start_all_queues(netdev);
3129 /* fire a link status change interrupt to start the watchdog */
3130 ew32(ICS, E1000_ICS_LSC);
3135 e1000_release_hw_control(adapter);
3136 e1000_power_down_phy(adapter);
3137 e1000e_free_rx_resources(adapter);
3139 e1000e_free_tx_resources(adapter);
3141 e1000e_reset(adapter);
3147 * e1000_close - Disables a network interface
3148 * @netdev: network interface device structure
3150 * Returns 0, this is not allowed to fail
3152 * The close entry point is called when an interface is de-activated
3153 * by the OS. The hardware is still under the drivers control, but
3154 * needs to be disabled. A global MAC reset is issued to stop the
3155 * hardware, and all transmit and receive resources are freed.
3157 static int e1000_close(struct net_device *netdev)
3159 struct e1000_adapter *adapter = netdev_priv(netdev);
3161 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3162 e1000e_down(adapter);
3163 e1000_power_down_phy(adapter);
3164 e1000_free_irq(adapter);
3166 e1000e_free_tx_resources(adapter);
3167 e1000e_free_rx_resources(adapter);
3170 * kill manageability vlan ID if supported, but not if a vlan with
3171 * the same ID is registered on the host OS (let 8021q kill it)
3173 if ((adapter->hw.mng_cookie.status &
3174 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3176 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3177 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3180 * If AMT is enabled, let the firmware know that the network
3181 * interface is now closed
3183 if (adapter->flags & FLAG_HAS_AMT)
3184 e1000_release_hw_control(adapter);
3189 * e1000_set_mac - Change the Ethernet Address of the NIC
3190 * @netdev: network interface device structure
3191 * @p: pointer to an address structure
3193 * Returns 0 on success, negative on failure
3195 static int e1000_set_mac(struct net_device *netdev, void *p)
3197 struct e1000_adapter *adapter = netdev_priv(netdev);
3198 struct sockaddr *addr = p;
3200 if (!is_valid_ether_addr(addr->sa_data))
3201 return -EADDRNOTAVAIL;
3203 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3204 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3206 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3208 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3209 /* activate the work around */
3210 e1000e_set_laa_state_82571(&adapter->hw, 1);
3213 * Hold a copy of the LAA in RAR[14] This is done so that
3214 * between the time RAR[0] gets clobbered and the time it
3215 * gets fixed (in e1000_watchdog), the actual LAA is in one
3216 * of the RARs and no incoming packets directed to this port
3217 * are dropped. Eventually the LAA will be in RAR[0] and
3220 e1000e_rar_set(&adapter->hw,
3221 adapter->hw.mac.addr,
3222 adapter->hw.mac.rar_entry_count - 1);
3229 * e1000e_update_phy_task - work thread to update phy
3230 * @work: pointer to our work struct
3232 * this worker thread exists because we must acquire a
3233 * semaphore to read the phy, which we could msleep while
3234 * waiting for it, and we can't msleep in a timer.
3236 static void e1000e_update_phy_task(struct work_struct *work)
3238 struct e1000_adapter *adapter = container_of(work,
3239 struct e1000_adapter, update_phy_task);
3240 e1000_get_phy_info(&adapter->hw);
3244 * Need to wait a few seconds after link up to get diagnostic information from
3247 static void e1000_update_phy_info(unsigned long data)
3249 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3250 schedule_work(&adapter->update_phy_task);
3254 * e1000e_update_stats - Update the board statistics counters
3255 * @adapter: board private structure
3257 void e1000e_update_stats(struct e1000_adapter *adapter)
3259 struct e1000_hw *hw = &adapter->hw;
3260 struct pci_dev *pdev = adapter->pdev;
3263 * Prevent stats update while adapter is being reset, or if the pci
3264 * connection is down.
3266 if (adapter->link_speed == 0)
3268 if (pci_channel_offline(pdev))
3271 adapter->stats.crcerrs += er32(CRCERRS);
3272 adapter->stats.gprc += er32(GPRC);
3273 adapter->stats.gorc += er32(GORCL);
3274 er32(GORCH); /* Clear gorc */
3275 adapter->stats.bprc += er32(BPRC);
3276 adapter->stats.mprc += er32(MPRC);
3277 adapter->stats.roc += er32(ROC);
3279 adapter->stats.mpc += er32(MPC);
3280 adapter->stats.scc += er32(SCC);
3281 adapter->stats.ecol += er32(ECOL);
3282 adapter->stats.mcc += er32(MCC);
3283 adapter->stats.latecol += er32(LATECOL);
3284 adapter->stats.dc += er32(DC);
3285 adapter->stats.xonrxc += er32(XONRXC);
3286 adapter->stats.xontxc += er32(XONTXC);
3287 adapter->stats.xoffrxc += er32(XOFFRXC);
3288 adapter->stats.xofftxc += er32(XOFFTXC);
3289 adapter->stats.gptc += er32(GPTC);
3290 adapter->stats.gotc += er32(GOTCL);
3291 er32(GOTCH); /* Clear gotc */
3292 adapter->stats.rnbc += er32(RNBC);
3293 adapter->stats.ruc += er32(RUC);
3295 adapter->stats.mptc += er32(MPTC);
3296 adapter->stats.bptc += er32(BPTC);
3298 /* used for adaptive IFS */
3300 hw->mac.tx_packet_delta = er32(TPT);
3301 adapter->stats.tpt += hw->mac.tx_packet_delta;
3302 hw->mac.collision_delta = er32(COLC);
3303 adapter->stats.colc += hw->mac.collision_delta;
3305 adapter->stats.algnerrc += er32(ALGNERRC);
3306 adapter->stats.rxerrc += er32(RXERRC);
3307 if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
3308 adapter->stats.tncrs += er32(TNCRS);
3309 adapter->stats.cexterr += er32(CEXTERR);
3310 adapter->stats.tsctc += er32(TSCTC);
3311 adapter->stats.tsctfc += er32(TSCTFC);
3313 /* Fill out the OS statistics structure */
3314 adapter->net_stats.multicast = adapter->stats.mprc;
3315 adapter->net_stats.collisions = adapter->stats.colc;
3320 * RLEC on some newer hardware can be incorrect so build
3321 * our own version based on RUC and ROC
3323 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3324 adapter->stats.crcerrs + adapter->stats.algnerrc +
3325 adapter->stats.ruc + adapter->stats.roc +
3326 adapter->stats.cexterr;
3327 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3329 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3330 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3331 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3334 adapter->net_stats.tx_errors = adapter->stats.ecol +
3335 adapter->stats.latecol;
3336 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3337 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3338 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3340 /* Tx Dropped needs to be maintained elsewhere */
3342 /* Management Stats */
3343 adapter->stats.mgptc += er32(MGTPTC);
3344 adapter->stats.mgprc += er32(MGTPRC);
3345 adapter->stats.mgpdc += er32(MGTPDC);
3349 * e1000_phy_read_status - Update the PHY register status snapshot
3350 * @adapter: board private structure
3352 static void e1000_phy_read_status(struct e1000_adapter *adapter)
3354 struct e1000_hw *hw = &adapter->hw;
3355 struct e1000_phy_regs *phy = &adapter->phy_regs;
3358 if ((er32(STATUS) & E1000_STATUS_LU) &&
3359 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3360 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3361 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3362 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3363 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3364 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3365 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3366 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3367 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3369 e_warn("Error reading PHY register\n");
3372 * Do not read PHY registers if link is not up
3373 * Set values to typical power-on defaults
3375 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3376 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3377 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3379 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3380 ADVERTISE_ALL | ADVERTISE_CSMA);
3382 phy->expansion = EXPANSION_ENABLENPAGE;
3383 phy->ctrl1000 = ADVERTISE_1000FULL;
3385 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3389 static void e1000_print_link_info(struct e1000_adapter *adapter)
3391 struct e1000_hw *hw = &adapter->hw;
3392 u32 ctrl = er32(CTRL);
3394 /* Link status message must follow this format for user tools */
3395 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3396 "Flow Control: %s\n",
3397 adapter->netdev->name,
3398 adapter->link_speed,
3399 (adapter->link_duplex == FULL_DUPLEX) ?
3400 "Full Duplex" : "Half Duplex",
3401 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3403 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3404 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3407 bool e1000_has_link(struct e1000_adapter *adapter)
3409 struct e1000_hw *hw = &adapter->hw;
3410 bool link_active = 0;
3414 * get_link_status is set on LSC (link status) interrupt or
3415 * Rx sequence error interrupt. get_link_status will stay
3416 * false until the check_for_link establishes link
3417 * for copper adapters ONLY
3419 switch (hw->phy.media_type) {
3420 case e1000_media_type_copper:
3421 if (hw->mac.get_link_status) {
3422 ret_val = hw->mac.ops.check_for_link(hw);
3423 link_active = !hw->mac.get_link_status;
3428 case e1000_media_type_fiber:
3429 ret_val = hw->mac.ops.check_for_link(hw);
3430 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
3432 case e1000_media_type_internal_serdes:
3433 ret_val = hw->mac.ops.check_for_link(hw);
3434 link_active = adapter->hw.mac.serdes_has_link;
3437 case e1000_media_type_unknown:
3441 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
3442 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
3443 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
3444 e_info("Gigabit has been disabled, downgrading speed\n");
3450 static void e1000e_enable_receives(struct e1000_adapter *adapter)
3452 /* make sure the receive unit is started */
3453 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
3454 (adapter->flags & FLAG_RX_RESTART_NOW)) {
3455 struct e1000_hw *hw = &adapter->hw;
3456 u32 rctl = er32(RCTL);
3457 ew32(RCTL, rctl | E1000_RCTL_EN);
3458 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3463 * e1000_watchdog - Timer Call-back
3464 * @data: pointer to adapter cast into an unsigned long
3466 static void e1000_watchdog(unsigned long data)
3468 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3470 /* Do the rest outside of interrupt context */
3471 schedule_work(&adapter->watchdog_task);
3473 /* TODO: make this use queue_delayed_work() */
3476 static void e1000_watchdog_task(struct work_struct *work)
3478 struct e1000_adapter *adapter = container_of(work,
3479 struct e1000_adapter, watchdog_task);
3480 struct net_device *netdev = adapter->netdev;
3481 struct e1000_mac_info *mac = &adapter->hw.mac;
3482 struct e1000_phy_info *phy = &adapter->hw.phy;
3483 struct e1000_ring *tx_ring = adapter->tx_ring;
3484 struct e1000_hw *hw = &adapter->hw;
3488 link = e1000_has_link(adapter);
3489 if ((netif_carrier_ok(netdev)) && link) {
3490 e1000e_enable_receives(adapter);
3494 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
3495 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
3496 e1000_update_mng_vlan(adapter);
3499 if (!netif_carrier_ok(netdev)) {
3501 /* update snapshot of PHY registers on LSC */
3502 e1000_phy_read_status(adapter);
3503 mac->ops.get_link_up_info(&adapter->hw,
3504 &adapter->link_speed,
3505 &adapter->link_duplex);
3506 e1000_print_link_info(adapter);
3508 * On supported PHYs, check for duplex mismatch only
3509 * if link has autonegotiated at 10/100 half
3511 if ((hw->phy.type == e1000_phy_igp_3 ||
3512 hw->phy.type == e1000_phy_bm) &&
3513 (hw->mac.autoneg == true) &&
3514 (adapter->link_speed == SPEED_10 ||
3515 adapter->link_speed == SPEED_100) &&
3516 (adapter->link_duplex == HALF_DUPLEX)) {
3519 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3521 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3522 e_info("Autonegotiated half duplex but"
3523 " link partner cannot autoneg. "
3524 " Try forcing full duplex if "
3525 "link gets many collisions.\n");
3529 * tweak tx_queue_len according to speed/duplex
3530 * and adjust the timeout factor
3532 netdev->tx_queue_len = adapter->tx_queue_len;
3533 adapter->tx_timeout_factor = 1;
3534 switch (adapter->link_speed) {
3537 netdev->tx_queue_len = 10;
3538 adapter->tx_timeout_factor = 16;
3542 netdev->tx_queue_len = 100;
3543 /* maybe add some timeout factor ? */
3548 * workaround: re-program speed mode bit after
3551 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
3554 tarc0 = er32(TARC(0));
3555 tarc0 &= ~SPEED_MODE_BIT;
3556 ew32(TARC(0), tarc0);
3560 * disable TSO for pcie and 10/100 speeds, to avoid
3561 * some hardware issues
3563 if (!(adapter->flags & FLAG_TSO_FORCE)) {
3564 switch (adapter->link_speed) {
3567 e_info("10/100 speed: disabling TSO\n");
3568 netdev->features &= ~NETIF_F_TSO;
3569 netdev->features &= ~NETIF_F_TSO6;
3572 netdev->features |= NETIF_F_TSO;
3573 netdev->features |= NETIF_F_TSO6;
3582 * enable transmits in the hardware, need to do this
3583 * after setting TARC(0)
3586 tctl |= E1000_TCTL_EN;
3590 * Perform any post-link-up configuration before
3591 * reporting link up.
3593 if (phy->ops.cfg_on_link_up)
3594 phy->ops.cfg_on_link_up(hw);
3596 netif_carrier_on(netdev);
3597 netif_tx_wake_all_queues(netdev);
3599 if (!test_bit(__E1000_DOWN, &adapter->state))
3600 mod_timer(&adapter->phy_info_timer,
3601 round_jiffies(jiffies + 2 * HZ));
3604 if (netif_carrier_ok(netdev)) {
3605 adapter->link_speed = 0;
3606 adapter->link_duplex = 0;
3607 /* Link status message must follow this format */
3608 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
3609 adapter->netdev->name);
3610 netif_carrier_off(netdev);
3611 netif_tx_stop_all_queues(netdev);
3612 if (!test_bit(__E1000_DOWN, &adapter->state))
3613 mod_timer(&adapter->phy_info_timer,
3614 round_jiffies(jiffies + 2 * HZ));
3616 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3617 schedule_work(&adapter->reset_task);
3622 e1000e_update_stats(adapter);
3624 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3625 adapter->tpt_old = adapter->stats.tpt;
3626 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3627 adapter->colc_old = adapter->stats.colc;
3629 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3630 adapter->gorc_old = adapter->stats.gorc;
3631 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3632 adapter->gotc_old = adapter->stats.gotc;
3634 e1000e_update_adaptive(&adapter->hw);
3636 if (!netif_carrier_ok(netdev)) {
3637 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3641 * We've lost link, so the controller stops DMA,
3642 * but we've got queued Tx work that's never going
3643 * to get done, so reset controller to flush Tx.
3644 * (Do the reset outside of interrupt context).
3646 adapter->tx_timeout_count++;
3647 schedule_work(&adapter->reset_task);
3651 /* Cause software interrupt to ensure Rx ring is cleaned */
3652 if (adapter->msix_entries)
3653 ew32(ICS, adapter->rx_ring->ims_val);
3655 ew32(ICS, E1000_ICS_RXDMT0);
3657 /* Force detection of hung controller every watchdog period */
3658 adapter->detect_tx_hung = 1;
3661 * With 82571 controllers, LAA may be overwritten due to controller
3662 * reset from the other port. Set the appropriate LAA in RAR[0]
3664 if (e1000e_get_laa_state_82571(hw))
3665 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3667 /* Reset the timer */
3668 if (!test_bit(__E1000_DOWN, &adapter->state))
3669 mod_timer(&adapter->watchdog_timer,
3670 round_jiffies(jiffies + 2 * HZ));
3673 #define E1000_TX_FLAGS_CSUM 0x00000001
3674 #define E1000_TX_FLAGS_VLAN 0x00000002
3675 #define E1000_TX_FLAGS_TSO 0x00000004
3676 #define E1000_TX_FLAGS_IPV4 0x00000008
3677 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3678 #define E1000_TX_FLAGS_VLAN_SHIFT 16
3680 static int e1000_tso(struct e1000_adapter *adapter,
3681 struct sk_buff *skb)
3683 struct e1000_ring *tx_ring = adapter->tx_ring;
3684 struct e1000_context_desc *context_desc;
3685 struct e1000_buffer *buffer_info;
3688 u16 ipcse = 0, tucse, mss;
3689 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3692 if (skb_is_gso(skb)) {
3693 if (skb_header_cloned(skb)) {
3694 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3699 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3700 mss = skb_shinfo(skb)->gso_size;
3701 if (skb->protocol == htons(ETH_P_IP)) {
3702 struct iphdr *iph = ip_hdr(skb);
3705 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3709 cmd_length = E1000_TXD_CMD_IP;
3710 ipcse = skb_transport_offset(skb) - 1;
3711 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3712 ipv6_hdr(skb)->payload_len = 0;
3713 tcp_hdr(skb)->check =
3714 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3715 &ipv6_hdr(skb)->daddr,
3719 ipcss = skb_network_offset(skb);
3720 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3721 tucss = skb_transport_offset(skb);
3722 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3725 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3726 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3728 i = tx_ring->next_to_use;
3729 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3730 buffer_info = &tx_ring->buffer_info[i];
3732 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3733 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3734 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3735 context_desc->upper_setup.tcp_fields.tucss = tucss;
3736 context_desc->upper_setup.tcp_fields.tucso = tucso;
3737 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3738 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3739 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3740 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3742 buffer_info->time_stamp = jiffies;
3743 buffer_info->next_to_watch = i;
3746 if (i == tx_ring->count)
3748 tx_ring->next_to_use = i;
3756 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3758 struct e1000_ring *tx_ring = adapter->tx_ring;
3759 struct e1000_context_desc *context_desc;
3760 struct e1000_buffer *buffer_info;
3763 u32 cmd_len = E1000_TXD_CMD_DEXT;
3766 if (skb->ip_summed != CHECKSUM_PARTIAL)
3769 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
3770 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
3772 protocol = skb->protocol;
3775 case cpu_to_be16(ETH_P_IP):
3776 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3777 cmd_len |= E1000_TXD_CMD_TCP;
3779 case cpu_to_be16(ETH_P_IPV6):
3780 /* XXX not handling all IPV6 headers */
3781 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3782 cmd_len |= E1000_TXD_CMD_TCP;
3785 if (unlikely(net_ratelimit()))
3786 e_warn("checksum_partial proto=%x!\n",
3787 be16_to_cpu(protocol));
3791 css = skb_transport_offset(skb);
3793 i = tx_ring->next_to_use;
3794 buffer_info = &tx_ring->buffer_info[i];
3795 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3797 context_desc->lower_setup.ip_config = 0;
3798 context_desc->upper_setup.tcp_fields.tucss = css;
3799 context_desc->upper_setup.tcp_fields.tucso =
3800 css + skb->csum_offset;
3801 context_desc->upper_setup.tcp_fields.tucse = 0;
3802 context_desc->tcp_seg_setup.data = 0;
3803 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
3805 buffer_info->time_stamp = jiffies;
3806 buffer_info->next_to_watch = i;
3809 if (i == tx_ring->count)
3811 tx_ring->next_to_use = i;
3816 #define E1000_MAX_PER_TXD 8192
3817 #define E1000_MAX_TXD_PWR 12
3819 static int e1000_tx_map(struct e1000_adapter *adapter,
3820 struct sk_buff *skb, unsigned int first,
3821 unsigned int max_per_txd, unsigned int nr_frags,
3824 struct e1000_ring *tx_ring = adapter->tx_ring;
3825 struct e1000_buffer *buffer_info;
3826 unsigned int len = skb_headlen(skb);
3827 unsigned int offset, size, count = 0, i;
3831 i = tx_ring->next_to_use;
3833 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
3834 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3835 adapter->tx_dma_failed++;
3839 map = skb_shinfo(skb)->dma_maps;
3843 buffer_info = &tx_ring->buffer_info[i];
3844 size = min(len, max_per_txd);
3846 buffer_info->length = size;
3847 buffer_info->time_stamp = jiffies;
3848 buffer_info->next_to_watch = i;
3849 buffer_info->dma = map[0] + offset;
3857 if (i == tx_ring->count)
3862 for (f = 0; f < nr_frags; f++) {
3863 struct skb_frag_struct *frag;
3865 frag = &skb_shinfo(skb)->frags[f];
3871 if (i == tx_ring->count)
3874 buffer_info = &tx_ring->buffer_info[i];
3875 size = min(len, max_per_txd);
3877 buffer_info->length = size;
3878 buffer_info->time_stamp = jiffies;
3879 buffer_info->next_to_watch = i;
3880 buffer_info->dma = map[f + 1] + offset;
3888 tx_ring->buffer_info[i].skb = skb;
3889 tx_ring->buffer_info[first].next_to_watch = i;
3894 static void e1000_tx_queue(struct e1000_adapter *adapter,
3895 int tx_flags, int count)
3897 struct e1000_ring *tx_ring = adapter->tx_ring;
3898 struct e1000_tx_desc *tx_desc = NULL;
3899 struct e1000_buffer *buffer_info;
3900 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3903 if (tx_flags & E1000_TX_FLAGS_TSO) {
3904 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3906 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3908 if (tx_flags & E1000_TX_FLAGS_IPV4)
3909 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3912 if (tx_flags & E1000_TX_FLAGS_CSUM) {
3913 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3914 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3917 if (tx_flags & E1000_TX_FLAGS_VLAN) {
3918 txd_lower |= E1000_TXD_CMD_VLE;
3919 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3922 i = tx_ring->next_to_use;
3925 buffer_info = &tx_ring->buffer_info[i];
3926 tx_desc = E1000_TX_DESC(*tx_ring, i);
3927 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3928 tx_desc->lower.data =
3929 cpu_to_le32(txd_lower | buffer_info->length);
3930 tx_desc->upper.data = cpu_to_le32(txd_upper);
3933 if (i == tx_ring->count)
3937 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3940 * Force memory writes to complete before letting h/w
3941 * know there are new descriptors to fetch. (Only
3942 * applicable for weak-ordered memory model archs,
3947 tx_ring->next_to_use = i;
3948 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3950 * we need this if more than one processor can write to our tail
3951 * at a time, it synchronizes IO on IA64/Altix systems
3956 #define MINIMUM_DHCP_PACKET_SIZE 282
3957 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3958 struct sk_buff *skb)
3960 struct e1000_hw *hw = &adapter->hw;
3963 if (vlan_tx_tag_present(skb)) {
3964 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
3965 && (adapter->hw.mng_cookie.status &
3966 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
3970 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
3973 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
3977 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
3980 if (ip->protocol != IPPROTO_UDP)
3983 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
3984 if (ntohs(udp->dest) != 67)
3987 offset = (u8 *)udp + 8 - skb->data;
3988 length = skb->len - offset;
3989 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
3995 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3997 struct e1000_adapter *adapter = netdev_priv(netdev);
3999 netif_stop_queue(netdev);
4001 * Herbert's original patch had:
4002 * smp_mb__after_netif_stop_queue();
4003 * but since that doesn't exist yet, just open code it.
4008 * We need to check again in a case another CPU has just
4009 * made room available.
4011 if (e1000_desc_unused(adapter->tx_ring) < size)
4015 netif_start_queue(netdev);
4016 ++adapter->restart_queue;
4020 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4022 struct e1000_adapter *adapter = netdev_priv(netdev);
4024 if (e1000_desc_unused(adapter->tx_ring) >= size)
4026 return __e1000_maybe_stop_tx(netdev, size);
4029 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4030 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4032 struct e1000_adapter *adapter = netdev_priv(netdev);
4033 struct e1000_ring *tx_ring = adapter->tx_ring;
4035 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4036 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4037 unsigned int tx_flags = 0;
4038 unsigned int len = skb->len - skb->data_len;
4039 unsigned int nr_frags;
4045 if (test_bit(__E1000_DOWN, &adapter->state)) {
4046 dev_kfree_skb_any(skb);
4047 return NETDEV_TX_OK;
4050 if (skb->len <= 0) {
4051 dev_kfree_skb_any(skb);
4052 return NETDEV_TX_OK;
4055 mss = skb_shinfo(skb)->gso_size;
4057 * The controller does a simple calculation to
4058 * make sure there is enough room in the FIFO before
4059 * initiating the DMA for each buffer. The calc is:
4060 * 4 = ceil(buffer len/mss). To make sure we don't
4061 * overrun the FIFO, adjust the max buffer len if mss
4066 max_per_txd = min(mss << 2, max_per_txd);
4067 max_txd_pwr = fls(max_per_txd) - 1;
4070 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4071 * points to just header, pull a few bytes of payload from
4072 * frags into skb->data
4074 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4076 * we do this workaround for ES2LAN, but it is un-necessary,
4077 * avoiding it could save a lot of cycles
4079 if (skb->data_len && (hdr_len == len)) {
4080 unsigned int pull_size;
4082 pull_size = min((unsigned int)4, skb->data_len);
4083 if (!__pskb_pull_tail(skb, pull_size)) {
4084 e_err("__pskb_pull_tail failed.\n");
4085 dev_kfree_skb_any(skb);
4086 return NETDEV_TX_OK;
4088 len = skb->len - skb->data_len;
4092 /* reserve a descriptor for the offload context */
4093 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4097 count += TXD_USE_COUNT(len, max_txd_pwr);
4099 nr_frags = skb_shinfo(skb)->nr_frags;
4100 for (f = 0; f < nr_frags; f++)
4101 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4104 if (adapter->hw.mac.tx_pkt_filtering)
4105 e1000_transfer_dhcp_info(adapter, skb);
4108 * need: count + 2 desc gap to keep tail from touching
4109 * head, otherwise try next time
4111 if (e1000_maybe_stop_tx(netdev, count + 2))
4112 return NETDEV_TX_BUSY;
4114 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4115 tx_flags |= E1000_TX_FLAGS_VLAN;
4116 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4119 first = tx_ring->next_to_use;
4121 tso = e1000_tso(adapter, skb);
4123 dev_kfree_skb_any(skb);
4124 return NETDEV_TX_OK;
4128 tx_flags |= E1000_TX_FLAGS_TSO;
4129 else if (e1000_tx_csum(adapter, skb))
4130 tx_flags |= E1000_TX_FLAGS_CSUM;
4133 * Old method was to assume IPv4 packet by default if TSO was enabled.
4134 * 82571 hardware supports TSO capabilities for IPv6 as well...
4135 * no longer assume, we must.
4137 if (skb->protocol == htons(ETH_P_IP))
4138 tx_flags |= E1000_TX_FLAGS_IPV4;
4140 /* if count is 0 then mapping error has occured */
4141 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4143 e1000_tx_queue(adapter, tx_flags, count);
4144 netdev->trans_start = jiffies;
4145 /* Make sure there is space in the ring for the next send. */
4146 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4149 dev_kfree_skb_any(skb);
4150 tx_ring->buffer_info[first].time_stamp = 0;
4151 tx_ring->next_to_use = first;
4154 return NETDEV_TX_OK;
4158 * e1000_tx_timeout - Respond to a Tx Hang
4159 * @netdev: network interface device structure
4161 static void e1000_tx_timeout(struct net_device *netdev)
4163 struct e1000_adapter *adapter = netdev_priv(netdev);
4165 /* Do the reset outside of interrupt context */
4166 adapter->tx_timeout_count++;
4167 schedule_work(&adapter->reset_task);
4170 static void e1000_reset_task(struct work_struct *work)
4172 struct e1000_adapter *adapter;
4173 adapter = container_of(work, struct e1000_adapter, reset_task);
4175 e1000e_reinit_locked(adapter);
4179 * e1000_get_stats - Get System Network Statistics
4180 * @netdev: network interface device structure
4182 * Returns the address of the device statistics structure.
4183 * The statistics are actually updated from the timer callback.
4185 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4187 struct e1000_adapter *adapter = netdev_priv(netdev);
4189 /* only return the current stats */
4190 return &adapter->net_stats;
4194 * e1000_change_mtu - Change the Maximum Transfer Unit
4195 * @netdev: network interface device structure
4196 * @new_mtu: new value for maximum frame size
4198 * Returns 0 on success, negative on failure
4200 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4202 struct e1000_adapter *adapter = netdev_priv(netdev);
4203 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4205 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4206 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
4207 e_err("Invalid MTU setting\n");
4211 /* Jumbo frame size limits */
4212 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
4213 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4214 e_err("Jumbo Frames not supported.\n");
4217 if (adapter->hw.phy.type == e1000_phy_ife) {
4218 e_err("Jumbo Frames not supported.\n");
4223 #define MAX_STD_JUMBO_FRAME_SIZE 9234
4224 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4225 e_err("MTU > 9216 not supported.\n");
4229 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4231 /* e1000e_down has a dependency on max_frame_size */
4232 adapter->max_frame_size = max_frame;
4233 if (netif_running(netdev))
4234 e1000e_down(adapter);
4237 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4238 * means we reserve 2 more, this pushes us to allocate from the next
4240 * i.e. RXBUFFER_2048 --> size-4096 slab
4241 * However with the new *_jumbo_rx* routines, jumbo receives will use
4245 if (max_frame <= 256)
4246 adapter->rx_buffer_len = 256;
4247 else if (max_frame <= 512)
4248 adapter->rx_buffer_len = 512;
4249 else if (max_frame <= 1024)
4250 adapter->rx_buffer_len = 1024;
4251 else if (max_frame <= 2048)
4252 adapter->rx_buffer_len = 2048;
4254 adapter->rx_buffer_len = 4096;
4256 /* adjust allocation if LPE protects us, and we aren't using SBP */
4257 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4258 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4259 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4262 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4263 netdev->mtu = new_mtu;
4265 if (netif_running(netdev))
4268 e1000e_reset(adapter);
4270 clear_bit(__E1000_RESETTING, &adapter->state);
4275 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4278 struct e1000_adapter *adapter = netdev_priv(netdev);
4279 struct mii_ioctl_data *data = if_mii(ifr);
4281 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4286 data->phy_id = adapter->hw.phy.addr;
4289 if (!capable(CAP_NET_ADMIN))
4291 switch (data->reg_num & 0x1F) {
4293 data->val_out = adapter->phy_regs.bmcr;
4296 data->val_out = adapter->phy_regs.bmsr;
4299 data->val_out = (adapter->hw.phy.id >> 16);
4302 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4305 data->val_out = adapter->phy_regs.advertise;
4308 data->val_out = adapter->phy_regs.lpa;
4311 data->val_out = adapter->phy_regs.expansion;
4314 data->val_out = adapter->phy_regs.ctrl1000;
4317 data->val_out = adapter->phy_regs.stat1000;
4320 data->val_out = adapter->phy_regs.estatus;
4333 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4339 return e1000_mii_ioctl(netdev, ifr, cmd);
4345 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4347 struct net_device *netdev = pci_get_drvdata(pdev);
4348 struct e1000_adapter *adapter = netdev_priv(netdev);
4349 struct e1000_hw *hw = &adapter->hw;
4350 u32 ctrl, ctrl_ext, rctl, status;
4351 u32 wufc = adapter->wol;
4354 netif_device_detach(netdev);
4356 if (netif_running(netdev)) {
4357 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4358 e1000e_down(adapter);
4359 e1000_free_irq(adapter);
4361 e1000e_reset_interrupt_capability(adapter);
4363 retval = pci_save_state(pdev);
4367 status = er32(STATUS);
4368 if (status & E1000_STATUS_LU)
4369 wufc &= ~E1000_WUFC_LNKC;
4372 e1000_setup_rctl(adapter);
4373 e1000_set_multi(netdev);
4375 /* turn on all-multi mode if wake on multicast is enabled */
4376 if (wufc & E1000_WUFC_MC) {
4378 rctl |= E1000_RCTL_MPE;
4383 /* advertise wake from D3Cold */
4384 #define E1000_CTRL_ADVD3WUC 0x00100000
4385 /* phy power management enable */
4386 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4387 ctrl |= E1000_CTRL_ADVD3WUC |
4388 E1000_CTRL_EN_PHY_PWR_MGMT;
4391 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4392 adapter->hw.phy.media_type ==
4393 e1000_media_type_internal_serdes) {
4394 /* keep the laser running in D3 */
4395 ctrl_ext = er32(CTRL_EXT);
4396 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4397 ew32(CTRL_EXT, ctrl_ext);
4400 if (adapter->flags & FLAG_IS_ICH)
4401 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
4403 /* Allow time for pending master requests to run */
4404 e1000e_disable_pcie_master(&adapter->hw);
4406 ew32(WUC, E1000_WUC_PME_EN);
4408 pci_enable_wake(pdev, PCI_D3hot, 1);
4409 pci_enable_wake(pdev, PCI_D3cold, 1);
4413 pci_enable_wake(pdev, PCI_D3hot, 0);
4414 pci_enable_wake(pdev, PCI_D3cold, 0);
4417 /* make sure adapter isn't asleep if manageability is enabled */
4418 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
4419 pci_enable_wake(pdev, PCI_D3hot, 1);
4420 pci_enable_wake(pdev, PCI_D3cold, 1);
4423 if (adapter->hw.phy.type == e1000_phy_igp_3)
4424 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4427 * Release control of h/w to f/w. If f/w is AMT enabled, this
4428 * would have already happened in close and is redundant.
4430 e1000_release_hw_control(adapter);
4432 pci_disable_device(pdev);
4435 * The pci-e switch on some quad port adapters will report a
4436 * correctable error when the MAC transitions from D0 to D3. To
4437 * prevent this we need to mask off the correctable errors on the
4438 * downstream port of the pci-e switch.
4440 if (adapter->flags & FLAG_IS_QUAD_PORT) {
4441 struct pci_dev *us_dev = pdev->bus->self;
4442 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
4445 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
4446 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4447 (devctl & ~PCI_EXP_DEVCTL_CERE));
4449 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4451 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4453 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4459 static void e1000e_disable_l1aspm(struct pci_dev *pdev)
4465 * 82573 workaround - disable L1 ASPM on mobile chipsets
4467 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4468 * resulting in lost data or garbage information on the pci-e link
4469 * level. This could result in (false) bad EEPROM checksum errors,
4470 * long ping times (up to 2s) or even a system freeze/hang.
4472 * Unfortunately this feature saves about 1W power consumption when
4475 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
4476 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
4478 dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
4480 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
4485 static int e1000_resume(struct pci_dev *pdev)
4487 struct net_device *netdev = pci_get_drvdata(pdev);
4488 struct e1000_adapter *adapter = netdev_priv(netdev);
4489 struct e1000_hw *hw = &adapter->hw;
4492 pci_set_power_state(pdev, PCI_D0);
4493 pci_restore_state(pdev);
4494 e1000e_disable_l1aspm(pdev);
4496 err = pci_enable_device_mem(pdev);
4499 "Cannot enable PCI device from suspend\n");
4503 /* AER (Advanced Error Reporting) hooks */
4504 err = pci_enable_pcie_error_reporting(pdev);
4506 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4508 /* non-fatal, continue */
4511 pci_set_master(pdev);
4513 pci_enable_wake(pdev, PCI_D3hot, 0);
4514 pci_enable_wake(pdev, PCI_D3cold, 0);
4516 e1000e_set_interrupt_capability(adapter);
4517 if (netif_running(netdev)) {
4518 err = e1000_request_irq(adapter);
4523 e1000e_power_up_phy(adapter);
4524 e1000e_reset(adapter);
4527 e1000_init_manageability(adapter);
4529 if (netif_running(netdev))
4532 netif_device_attach(netdev);
4535 * If the controller has AMT, do not set DRV_LOAD until the interface
4536 * is up. For all other cases, let the f/w know that the h/w is now
4537 * under the control of the driver.
4539 if (!(adapter->flags & FLAG_HAS_AMT))
4540 e1000_get_hw_control(adapter);
4546 static void e1000_shutdown(struct pci_dev *pdev)
4548 e1000_suspend(pdev, PMSG_SUSPEND);
4551 #ifdef CONFIG_NET_POLL_CONTROLLER
4553 * Polling 'interrupt' - used by things like netconsole to send skbs
4554 * without having to re-enable interrupts. It's not called while
4555 * the interrupt routine is executing.
4557 static void e1000_netpoll(struct net_device *netdev)
4559 struct e1000_adapter *adapter = netdev_priv(netdev);
4561 disable_irq(adapter->pdev->irq);
4562 e1000_intr(adapter->pdev->irq, netdev);
4564 enable_irq(adapter->pdev->irq);
4569 * e1000_io_error_detected - called when PCI error is detected
4570 * @pdev: Pointer to PCI device
4571 * @state: The current pci connection state
4573 * This function is called after a PCI bus error affecting
4574 * this device has been detected.
4576 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4577 pci_channel_state_t state)
4579 struct net_device *netdev = pci_get_drvdata(pdev);
4580 struct e1000_adapter *adapter = netdev_priv(netdev);
4582 netif_device_detach(netdev);
4584 if (netif_running(netdev))
4585 e1000e_down(adapter);
4586 pci_disable_device(pdev);
4588 /* Request a slot slot reset. */
4589 return PCI_ERS_RESULT_NEED_RESET;
4593 * e1000_io_slot_reset - called after the pci bus has been reset.
4594 * @pdev: Pointer to PCI device
4596 * Restart the card from scratch, as if from a cold-boot. Implementation
4597 * resembles the first-half of the e1000_resume routine.
4599 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4601 struct net_device *netdev = pci_get_drvdata(pdev);
4602 struct e1000_adapter *adapter = netdev_priv(netdev);
4603 struct e1000_hw *hw = &adapter->hw;
4605 pci_ers_result_t result;
4607 e1000e_disable_l1aspm(pdev);
4608 err = pci_enable_device_mem(pdev);
4611 "Cannot re-enable PCI device after reset.\n");
4612 result = PCI_ERS_RESULT_DISCONNECT;
4614 pci_set_master(pdev);
4615 pci_restore_state(pdev);
4617 pci_enable_wake(pdev, PCI_D3hot, 0);
4618 pci_enable_wake(pdev, PCI_D3cold, 0);
4620 e1000e_reset(adapter);
4622 result = PCI_ERS_RESULT_RECOVERED;
4625 pci_cleanup_aer_uncorrect_error_status(pdev);
4631 * e1000_io_resume - called when traffic can start flowing again.
4632 * @pdev: Pointer to PCI device
4634 * This callback is called when the error recovery driver tells us that
4635 * its OK to resume normal operation. Implementation resembles the
4636 * second-half of the e1000_resume routine.
4638 static void e1000_io_resume(struct pci_dev *pdev)
4640 struct net_device *netdev = pci_get_drvdata(pdev);
4641 struct e1000_adapter *adapter = netdev_priv(netdev);
4643 e1000_init_manageability(adapter);
4645 if (netif_running(netdev)) {
4646 if (e1000e_up(adapter)) {
4648 "can't bring device back up after reset\n");
4653 netif_device_attach(netdev);
4656 * If the controller has AMT, do not set DRV_LOAD until the interface
4657 * is up. For all other cases, let the f/w know that the h/w is now
4658 * under the control of the driver.
4660 if (!(adapter->flags & FLAG_HAS_AMT))
4661 e1000_get_hw_control(adapter);
4665 static void e1000_print_device_info(struct e1000_adapter *adapter)
4667 struct e1000_hw *hw = &adapter->hw;
4668 struct net_device *netdev = adapter->netdev;
4671 /* print bus type/speed/width info */
4672 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
4674 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4678 e_info("Intel(R) PRO/%s Network Connection\n",
4679 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4680 e1000e_read_pba_num(hw, &pba_num);
4681 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4682 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
4685 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4687 struct e1000_hw *hw = &adapter->hw;
4691 if (hw->mac.type != e1000_82573)
4694 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4695 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
4696 /* Deep Smart Power Down (DSPD) */
4697 dev_warn(&adapter->pdev->dev,
4698 "Warning: detected DSPD enabled in EEPROM\n");
4701 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4702 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4704 dev_warn(&adapter->pdev->dev,
4705 "Warning: detected ASPM enabled in EEPROM\n");
4709 static const struct net_device_ops e1000e_netdev_ops = {
4710 .ndo_open = e1000_open,
4711 .ndo_stop = e1000_close,
4712 .ndo_start_xmit = e1000_xmit_frame,
4713 .ndo_get_stats = e1000_get_stats,
4714 .ndo_set_multicast_list = e1000_set_multi,
4715 .ndo_set_mac_address = e1000_set_mac,
4716 .ndo_change_mtu = e1000_change_mtu,
4717 .ndo_do_ioctl = e1000_ioctl,
4718 .ndo_tx_timeout = e1000_tx_timeout,
4719 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_vlan_rx_register = e1000_vlan_rx_register,
4722 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
4723 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
4724 #ifdef CONFIG_NET_POLL_CONTROLLER
4725 .ndo_poll_controller = e1000_netpoll,
4730 * e1000_probe - Device Initialization Routine
4731 * @pdev: PCI device information struct
4732 * @ent: entry in e1000_pci_tbl
4734 * Returns 0 on success, negative on failure
4736 * e1000_probe initializes an adapter identified by a pci_dev structure.
4737 * The OS initialization, configuring of the adapter private structure,
4738 * and a hardware reset occur.
4740 static int __devinit e1000_probe(struct pci_dev *pdev,
4741 const struct pci_device_id *ent)
4743 struct net_device *netdev;
4744 struct e1000_adapter *adapter;
4745 struct e1000_hw *hw;
4746 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4747 resource_size_t mmio_start, mmio_len;
4748 resource_size_t flash_start, flash_len;
4750 static int cards_found;
4751 int i, err, pci_using_dac;
4752 u16 eeprom_data = 0;
4753 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4755 e1000e_disable_l1aspm(pdev);
4757 err = pci_enable_device_mem(pdev);
4762 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4764 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4768 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4770 err = pci_set_consistent_dma_mask(pdev,
4773 dev_err(&pdev->dev, "No usable DMA "
4774 "configuration, aborting\n");
4780 err = pci_request_selected_regions_exclusive(pdev,
4781 pci_select_bars(pdev, IORESOURCE_MEM),
4782 e1000e_driver_name);
4786 pci_set_master(pdev);
4787 /* PCI config space info */
4788 err = pci_save_state(pdev);
4790 goto err_alloc_etherdev;
4793 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
4795 goto err_alloc_etherdev;
4797 SET_NETDEV_DEV(netdev, &pdev->dev);
4799 pci_set_drvdata(pdev, netdev);
4800 adapter = netdev_priv(netdev);
4802 adapter->netdev = netdev;
4803 adapter->pdev = pdev;
4805 adapter->pba = ei->pba;
4806 adapter->flags = ei->flags;
4807 adapter->flags2 = ei->flags2;
4808 adapter->hw.adapter = adapter;
4809 adapter->hw.mac.type = ei->mac;
4810 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4812 mmio_start = pci_resource_start(pdev, 0);
4813 mmio_len = pci_resource_len(pdev, 0);
4816 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
4817 if (!adapter->hw.hw_addr)
4820 if ((adapter->flags & FLAG_HAS_FLASH) &&
4821 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
4822 flash_start = pci_resource_start(pdev, 1);
4823 flash_len = pci_resource_len(pdev, 1);
4824 adapter->hw.flash_address = ioremap(flash_start, flash_len);
4825 if (!adapter->hw.flash_address)
4829 /* construct the net_device struct */
4830 netdev->netdev_ops = &e1000e_netdev_ops;
4831 e1000e_set_ethtool_ops(netdev);
4832 netdev->watchdog_timeo = 5 * HZ;
4833 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
4834 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
4836 netdev->mem_start = mmio_start;
4837 netdev->mem_end = mmio_start + mmio_len;
4839 adapter->bd_number = cards_found++;
4841 e1000e_check_options(adapter);
4843 /* setup adapter struct */
4844 err = e1000_sw_init(adapter);
4850 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
4851 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
4852 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
4854 err = ei->get_variants(adapter);
4858 if ((adapter->flags & FLAG_IS_ICH) &&
4859 (adapter->flags & FLAG_READ_ONLY_NVM))
4860 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
4862 hw->mac.ops.get_bus_info(&adapter->hw);
4864 adapter->hw.phy.autoneg_wait_to_complete = 0;
4866 /* Copper options */
4867 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
4868 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4869 adapter->hw.phy.disable_polarity_correction = 0;
4870 adapter->hw.phy.ms_type = e1000_ms_hw_default;
4873 if (e1000_check_reset_block(&adapter->hw))
4874 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4876 netdev->features = NETIF_F_SG |
4878 NETIF_F_HW_VLAN_TX |
4881 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
4882 netdev->features |= NETIF_F_HW_VLAN_FILTER;
4884 netdev->features |= NETIF_F_TSO;
4885 netdev->features |= NETIF_F_TSO6;
4887 netdev->vlan_features |= NETIF_F_TSO;
4888 netdev->vlan_features |= NETIF_F_TSO6;
4889 netdev->vlan_features |= NETIF_F_HW_CSUM;
4890 netdev->vlan_features |= NETIF_F_SG;
4893 netdev->features |= NETIF_F_HIGHDMA;
4895 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4896 adapter->flags |= FLAG_MNG_PT_ENABLED;
4899 * before reading the NVM, reset the controller to
4900 * put the device in a known good starting state
4902 adapter->hw.mac.ops.reset_hw(&adapter->hw);
4905 * systems with ASPM and others may see the checksum fail on the first
4906 * attempt. Let's give it a few tries
4909 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4912 e_err("The NVM Checksum Is Not Valid\n");
4918 e1000_eeprom_checks(adapter);
4920 /* copy the MAC address out of the NVM */
4921 if (e1000e_read_mac_addr(&adapter->hw))
4922 e_err("NVM Read Error while reading MAC address\n");
4924 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4925 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4927 if (!is_valid_ether_addr(netdev->perm_addr)) {
4928 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
4933 init_timer(&adapter->watchdog_timer);
4934 adapter->watchdog_timer.function = &e1000_watchdog;
4935 adapter->watchdog_timer.data = (unsigned long) adapter;
4937 init_timer(&adapter->phy_info_timer);
4938 adapter->phy_info_timer.function = &e1000_update_phy_info;
4939 adapter->phy_info_timer.data = (unsigned long) adapter;
4941 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4942 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4943 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
4944 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
4946 /* Initialize link parameters. User can change them with ethtool */
4947 adapter->hw.mac.autoneg = 1;
4948 adapter->fc_autoneg = 1;
4949 adapter->hw.fc.requested_mode = e1000_fc_default;
4950 adapter->hw.fc.current_mode = e1000_fc_default;
4951 adapter->hw.phy.autoneg_advertised = 0x2f;
4953 /* ring size defaults */
4954 adapter->rx_ring->count = 256;
4955 adapter->tx_ring->count = 256;
4958 * Initial Wake on LAN setting - If APM wake is enabled in
4959 * the EEPROM, enable the ACPI Magic Packet filter
4961 if (adapter->flags & FLAG_APME_IN_WUC) {
4962 /* APME bit in EEPROM is mapped to WUC.APME */
4963 eeprom_data = er32(WUC);
4964 eeprom_apme_mask = E1000_WUC_APME;
4965 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
4966 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
4967 (adapter->hw.bus.func == 1))
4968 e1000_read_nvm(&adapter->hw,
4969 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4971 e1000_read_nvm(&adapter->hw,
4972 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4975 /* fetch WoL from EEPROM */
4976 if (eeprom_data & eeprom_apme_mask)
4977 adapter->eeprom_wol |= E1000_WUFC_MAG;
4980 * now that we have the eeprom settings, apply the special cases
4981 * where the eeprom may be wrong or the board simply won't support
4982 * wake on lan on a particular port
4984 if (!(adapter->flags & FLAG_HAS_WOL))
4985 adapter->eeprom_wol = 0;
4987 /* initialize the wol settings based on the eeprom settings */
4988 adapter->wol = adapter->eeprom_wol;
4989 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4991 /* save off EEPROM version number */
4992 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
4994 /* reset the hardware with the new settings */
4995 e1000e_reset(adapter);
4998 * If the controller has AMT, do not set DRV_LOAD until the interface
4999 * is up. For all other cases, let the f/w know that the h/w is now
5000 * under the control of the driver.
5002 if (!(adapter->flags & FLAG_HAS_AMT))
5003 e1000_get_hw_control(adapter);
5005 /* tell the stack to leave us alone until e1000_open() is called */
5006 netif_carrier_off(netdev);
5007 netif_tx_stop_all_queues(netdev);
5009 strcpy(netdev->name, "eth%d");
5010 err = register_netdev(netdev);
5014 e1000_print_device_info(adapter);
5019 if (!(adapter->flags & FLAG_HAS_AMT))
5020 e1000_release_hw_control(adapter);
5022 if (!e1000_check_reset_block(&adapter->hw))
5023 e1000_phy_hw_reset(&adapter->hw);
5026 kfree(adapter->tx_ring);
5027 kfree(adapter->rx_ring);
5029 if (adapter->hw.flash_address)
5030 iounmap(adapter->hw.flash_address);
5031 e1000e_reset_interrupt_capability(adapter);
5033 iounmap(adapter->hw.hw_addr);
5035 free_netdev(netdev);
5037 pci_release_selected_regions(pdev,
5038 pci_select_bars(pdev, IORESOURCE_MEM));
5041 pci_disable_device(pdev);
5046 * e1000_remove - Device Removal Routine
5047 * @pdev: PCI device information struct
5049 * e1000_remove is called by the PCI subsystem to alert the driver
5050 * that it should release a PCI device. The could be caused by a
5051 * Hot-Plug event, or because the driver is going to be removed from
5054 static void __devexit e1000_remove(struct pci_dev *pdev)
5056 struct net_device *netdev = pci_get_drvdata(pdev);
5057 struct e1000_adapter *adapter = netdev_priv(netdev);
5061 * flush_scheduled work may reschedule our watchdog task, so
5062 * explicitly disable watchdog tasks from being rescheduled
5064 set_bit(__E1000_DOWN, &adapter->state);
5065 del_timer_sync(&adapter->watchdog_timer);
5066 del_timer_sync(&adapter->phy_info_timer);
5068 flush_scheduled_work();
5071 * Release control of h/w to f/w. If f/w is AMT enabled, this
5072 * would have already happened in close and is redundant.
5074 e1000_release_hw_control(adapter);
5076 unregister_netdev(netdev);
5078 if (!e1000_check_reset_block(&adapter->hw))
5079 e1000_phy_hw_reset(&adapter->hw);
5081 e1000e_reset_interrupt_capability(adapter);
5082 kfree(adapter->tx_ring);
5083 kfree(adapter->rx_ring);
5085 iounmap(adapter->hw.hw_addr);
5086 if (adapter->hw.flash_address)
5087 iounmap(adapter->hw.flash_address);
5088 pci_release_selected_regions(pdev,
5089 pci_select_bars(pdev, IORESOURCE_MEM));
5091 free_netdev(netdev);
5094 err = pci_disable_pcie_error_reporting(pdev);
5097 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5099 pci_disable_device(pdev);
5102 /* PCI Error Recovery (ERS) */
5103 static struct pci_error_handlers e1000_err_handler = {
5104 .error_detected = e1000_io_error_detected,
5105 .slot_reset = e1000_io_slot_reset,
5106 .resume = e1000_io_resume,
5109 static struct pci_device_id e1000_pci_tbl[] = {
5110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5115 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
5116 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5117 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
5120 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5121 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5122 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5123 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
5125 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5126 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5127 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
5129 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5130 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
5131 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
5133 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5134 board_80003es2lan },
5135 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5136 board_80003es2lan },
5137 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5138 board_80003es2lan },
5139 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5140 board_80003es2lan },
5142 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5143 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5144 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5145 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5146 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5147 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5148 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5150 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5151 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5152 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5153 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5154 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5155 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
5156 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5157 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5158 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5160 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5161 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5162 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
5164 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5165 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5167 { } /* terminate list */
5169 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5171 /* PCI Device API Driver */
5172 static struct pci_driver e1000_driver = {
5173 .name = e1000e_driver_name,
5174 .id_table = e1000_pci_tbl,
5175 .probe = e1000_probe,
5176 .remove = __devexit_p(e1000_remove),
5178 /* Power Management Hooks */
5179 .suspend = e1000_suspend,
5180 .resume = e1000_resume,
5182 .shutdown = e1000_shutdown,
5183 .err_handler = &e1000_err_handler
5187 * e1000_init_module - Driver Registration Routine
5189 * e1000_init_module is the first routine called when the driver is
5190 * loaded. All it does is register with the PCI subsystem.
5192 static int __init e1000_init_module(void)
5195 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
5196 e1000e_driver_name, e1000e_driver_version);
5197 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
5198 e1000e_driver_name);
5199 ret = pci_register_driver(&e1000_driver);
5200 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
5201 PM_QOS_DEFAULT_VALUE);
5205 module_init(e1000_init_module);
5208 * e1000_exit_module - Driver Exit Cleanup Routine
5210 * e1000_exit_module is called just before the driver is removed
5213 static void __exit e1000_exit_module(void)
5215 pci_unregister_driver(&e1000_driver);
5216 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
5218 module_exit(e1000_exit_module);
5221 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
5222 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
5223 MODULE_LICENSE("GPL");
5224 MODULE_VERSION(DRV_VERSION);