1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.11"
58 #define DRV_MODULE_RELDATE "June 4, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 switch (bp->line_speed) {
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
608 bnx2_xceiver_str(struct bnx2 *bp)
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
616 bnx2_report_link(struct bnx2 *bp)
619 netif_carrier_on(bp->dev);
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
623 printk("%d Mbps ", bp->line_speed);
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
628 printk("half duplex");
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
637 printk(", transmit ");
639 printk("flow control ON");
644 netif_carrier_off(bp->dev);
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
649 bnx2_report_fw_link(bp);
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
655 u32 local_adv, remote_adv;
658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
667 if (bp->duplex != DUPLEX_FULL) {
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
723 bp->flow_ctrl = FLOW_CTRL_TX;
729 bnx2_5709s_linkup(struct bnx2 *bp)
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
763 bp->duplex = DUPLEX_HALF;
768 bnx2_5708s_linkup(struct bnx2 *bp)
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
791 bp->duplex = DUPLEX_HALF;
797 bnx2_5706s_linkup(struct bnx2 *bp)
799 u32 bmcr, local_adv, remote_adv, common;
802 bp->line_speed = SPEED_1000;
804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
809 bp->duplex = DUPLEX_HALF;
812 if (!(bmcr & BMCR_ANENABLE)) {
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
826 bp->duplex = DUPLEX_HALF;
834 bnx2_copper_linkup(struct bnx2 *bp)
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
886 bp->line_speed = SPEED_10;
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
892 bp->duplex = DUPLEX_HALF;
900 bnx2_set_mac_link(struct bnx2 *bp)
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915 BNX2_EMAC_MODE_25G_MODE);
918 switch (bp->line_speed) {
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
926 val |= BNX2_EMAC_MODE_PORT_MII;
929 val |= BNX2_EMAC_MODE_25G_MODE;
932 val |= BNX2_EMAC_MODE_PORT_GMII;
937 val |= BNX2_EMAC_MODE_PORT_GMII;
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967 bnx2_enable_bmsr1(struct bnx2 *bp)
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
976 bnx2_disable_bmsr1(struct bnx2 *bp)
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 bnx2_set_link(struct bnx2 *bp)
1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1119 link_up = bp->link_up;
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1134 bmsr &= ~BMSR_LSTATUS;
1137 if (bmsr & BMSR_LSTATUS) {
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
1149 bnx2_copper_linkup(bp);
1151 bnx2_resolve_flow_ctrl(bp);
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1166 bnx2_set_mac_link(bp);
1172 bnx2_reset_phy(struct bnx2 *bp)
1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1179 #define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1183 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1184 if (!(reg & BMCR_RESET)) {
1189 if (i == PHY_RESET_MAX_WAIT) {
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1207 adv = ADVERTISE_PAUSE_CAP;
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1215 adv = ADVERTISE_PAUSE_ASYM;
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1234 u32 speed_arg = 0, pause_adv;
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1299 int force_link_down = 0;
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313 new_bmcr |= BMCR_SPEED1000;
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1330 if (bp->req_duplex == DUPLEX_FULL) {
1331 adv |= ADVERTISE_1000XFULL;
1332 new_bmcr |= BMCR_FULLDPLX;
1335 adv |= ADVERTISE_1000XHALF;
1336 new_bmcr &= ~BMCR_FULLDPLX;
1338 if ((new_bmcr != bmcr) || (force_link_down)) {
1339 /* Force a link down visible on the other side */
1341 bnx2_write_phy(bp, bp->mii_adv, adv &
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1348 netif_carrier_off(bp->dev);
1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350 bnx2_report_link(bp);
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
1361 bnx2_test_and_enable_2g5(bp);
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376 spin_unlock_bh(&bp->phy_lock);
1378 spin_lock_bh(&bp->phy_lock);
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
1403 #define ETHTOOL_ALL_FIBRE_SPEED \
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
1408 #define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1466 bnx2_set_default_link(struct bnx2 *bp)
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1490 bnx2_remote_phy_event(struct bnx2 *bp)
1493 u8 link_up = bp->link_up;
1496 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1498 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1504 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505 bp->duplex = DUPLEX_FULL;
1507 case BNX2_LINK_STATUS_10HALF:
1508 bp->duplex = DUPLEX_HALF;
1509 case BNX2_LINK_STATUS_10FULL:
1510 bp->line_speed = SPEED_10;
1512 case BNX2_LINK_STATUS_100HALF:
1513 bp->duplex = DUPLEX_HALF;
1514 case BNX2_LINK_STATUS_100BASE_T4:
1515 case BNX2_LINK_STATUS_100FULL:
1516 bp->line_speed = SPEED_100;
1518 case BNX2_LINK_STATUS_1000HALF:
1519 bp->duplex = DUPLEX_HALF;
1520 case BNX2_LINK_STATUS_1000FULL:
1521 bp->line_speed = SPEED_1000;
1523 case BNX2_LINK_STATUS_2500HALF:
1524 bp->duplex = DUPLEX_HALF;
1525 case BNX2_LINK_STATUS_2500FULL:
1526 bp->line_speed = SPEED_2500;
1533 spin_lock(&bp->phy_lock);
1535 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537 if (bp->duplex == DUPLEX_FULL)
1538 bp->flow_ctrl = bp->req_flow_ctrl;
1540 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541 bp->flow_ctrl |= FLOW_CTRL_TX;
1542 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543 bp->flow_ctrl |= FLOW_CTRL_RX;
1546 old_port = bp->phy_port;
1547 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548 bp->phy_port = PORT_FIBRE;
1550 bp->phy_port = PORT_TP;
1552 if (old_port != bp->phy_port)
1553 bnx2_set_default_link(bp);
1555 spin_unlock(&bp->phy_lock);
1557 if (bp->link_up != link_up)
1558 bnx2_report_link(bp);
1560 bnx2_set_mac_link(bp);
1564 bnx2_set_remote_link(struct bnx2 *bp)
1568 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1570 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571 bnx2_remote_phy_event(bp);
1573 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1581 bnx2_setup_copper_phy(struct bnx2 *bp)
1586 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588 if (bp->autoneg & AUTONEG_SPEED) {
1589 u32 adv_reg, adv1000_reg;
1590 u32 new_adv_reg = 0;
1591 u32 new_adv1000_reg = 0;
1593 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1594 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595 ADVERTISE_PAUSE_ASYM);
1597 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598 adv1000_reg &= PHY_ALL_1000_SPEED;
1600 if (bp->advertising & ADVERTISED_10baseT_Half)
1601 new_adv_reg |= ADVERTISE_10HALF;
1602 if (bp->advertising & ADVERTISED_10baseT_Full)
1603 new_adv_reg |= ADVERTISE_10FULL;
1604 if (bp->advertising & ADVERTISED_100baseT_Half)
1605 new_adv_reg |= ADVERTISE_100HALF;
1606 if (bp->advertising & ADVERTISED_100baseT_Full)
1607 new_adv_reg |= ADVERTISE_100FULL;
1608 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609 new_adv1000_reg |= ADVERTISE_1000FULL;
1611 new_adv_reg |= ADVERTISE_CSMA;
1613 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1615 if ((adv1000_reg != new_adv1000_reg) ||
1616 (adv_reg != new_adv_reg) ||
1617 ((bmcr & BMCR_ANENABLE) == 0)) {
1619 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1620 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1621 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1624 else if (bp->link_up) {
1625 /* Flow ctrl may have changed from auto to forced */
1626 /* or vice-versa. */
1628 bnx2_resolve_flow_ctrl(bp);
1629 bnx2_set_mac_link(bp);
1635 if (bp->req_line_speed == SPEED_100) {
1636 new_bmcr |= BMCR_SPEED100;
1638 if (bp->req_duplex == DUPLEX_FULL) {
1639 new_bmcr |= BMCR_FULLDPLX;
1641 if (new_bmcr != bmcr) {
1644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1647 if (bmsr & BMSR_LSTATUS) {
1648 /* Force link down */
1649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1650 spin_unlock_bh(&bp->phy_lock);
1652 spin_lock_bh(&bp->phy_lock);
1654 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1658 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1660 /* Normally, the new speed is setup after the link has
1661 * gone down and up again. In some cases, link will not go
1662 * down so we need to set up the new speed here.
1664 if (bmsr & BMSR_LSTATUS) {
1665 bp->line_speed = bp->req_line_speed;
1666 bp->duplex = bp->req_duplex;
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
1671 bnx2_resolve_flow_ctrl(bp);
1672 bnx2_set_mac_link(bp);
1678 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1680 if (bp->loopback == MAC_LOOPBACK)
1683 if (bp->phy_flags & PHY_SERDES_FLAG) {
1684 return (bnx2_setup_serdes_phy(bp, port));
1687 return (bnx2_setup_copper_phy(bp));
1692 bnx2_init_5709s_phy(struct bnx2 *bp)
1696 bp->mii_bmcr = MII_BMCR + 0x10;
1697 bp->mii_bmsr = MII_BMSR + 0x10;
1698 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699 bp->mii_adv = MII_ADVERTISE + 0x10;
1700 bp->mii_lpa = MII_LPA + 0x10;
1701 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1703 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1709 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1711 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1716 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719 val |= BCM5708S_UP1_2G5;
1721 val &= ~BCM5708S_UP1_2G5;
1722 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1731 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1741 bnx2_init_5708s_phy(struct bnx2 *bp)
1747 bp->mii_up1 = BCM5708S_UP1;
1749 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1753 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1757 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1761 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763 val |= BCM5708S_UP1_2G5;
1764 bnx2_write_phy(bp, BCM5708S_UP1, val);
1767 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1768 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1770 /* increase tx signal amplitude */
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772 BCM5708S_BLK_ADDR_TX_MISC);
1773 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1779 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1780 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1785 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1786 BNX2_SHARED_HW_CFG_CONFIG);
1787 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789 BCM5708S_BLK_ADDR_TX_MISC);
1790 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_DIG);
1799 bnx2_init_5706s_phy(struct bnx2 *bp)
1803 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1805 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1808 if (bp->dev->mtu > 1500) {
1811 /* Set extended packet length bit */
1812 bnx2_write_phy(bp, 0x18, 0x7);
1813 bnx2_read_phy(bp, 0x18, &val);
1814 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1816 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817 bnx2_read_phy(bp, 0x1c, &val);
1818 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1823 bnx2_write_phy(bp, 0x18, 0x7);
1824 bnx2_read_phy(bp, 0x18, &val);
1825 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1827 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828 bnx2_read_phy(bp, 0x1c, &val);
1829 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1836 bnx2_init_copper_phy(struct bnx2 *bp)
1842 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843 bnx2_write_phy(bp, 0x18, 0x0c00);
1844 bnx2_write_phy(bp, 0x17, 0x000a);
1845 bnx2_write_phy(bp, 0x15, 0x310b);
1846 bnx2_write_phy(bp, 0x17, 0x201f);
1847 bnx2_write_phy(bp, 0x15, 0x9506);
1848 bnx2_write_phy(bp, 0x17, 0x401f);
1849 bnx2_write_phy(bp, 0x15, 0x14e2);
1850 bnx2_write_phy(bp, 0x18, 0x0400);
1853 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855 MII_BNX2_DSP_EXPAND_REG | 0x8);
1856 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1858 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1861 if (bp->dev->mtu > 1500) {
1862 /* Set extended packet length bit */
1863 bnx2_write_phy(bp, 0x18, 0x7);
1864 bnx2_read_phy(bp, 0x18, &val);
1865 bnx2_write_phy(bp, 0x18, val | 0x4000);
1867 bnx2_read_phy(bp, 0x10, &val);
1868 bnx2_write_phy(bp, 0x10, val | 0x1);
1871 bnx2_write_phy(bp, 0x18, 0x7);
1872 bnx2_read_phy(bp, 0x18, &val);
1873 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1875 bnx2_read_phy(bp, 0x10, &val);
1876 bnx2_write_phy(bp, 0x10, val & ~0x1);
1879 /* ethernet@wirespeed */
1880 bnx2_write_phy(bp, 0x18, 0x7007);
1881 bnx2_read_phy(bp, 0x18, &val);
1882 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1888 bnx2_init_phy(struct bnx2 *bp)
1893 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1896 bp->mii_bmcr = MII_BMCR;
1897 bp->mii_bmsr = MII_BMSR;
1898 bp->mii_bmsr1 = MII_BMSR;
1899 bp->mii_adv = MII_ADVERTISE;
1900 bp->mii_lpa = MII_LPA;
1902 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1904 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1907 bnx2_read_phy(bp, MII_PHYSID1, &val);
1908 bp->phy_id = val << 16;
1909 bnx2_read_phy(bp, MII_PHYSID2, &val);
1910 bp->phy_id |= val & 0xffff;
1912 if (bp->phy_flags & PHY_SERDES_FLAG) {
1913 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914 rc = bnx2_init_5706s_phy(bp);
1915 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916 rc = bnx2_init_5708s_phy(bp);
1917 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918 rc = bnx2_init_5709s_phy(bp);
1921 rc = bnx2_init_copper_phy(bp);
1926 rc = bnx2_setup_phy(bp, bp->phy_port);
1932 bnx2_set_mac_loopback(struct bnx2 *bp)
1936 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1944 static int bnx2_test_link(struct bnx2 *);
1947 bnx2_set_phy_loopback(struct bnx2 *bp)
1952 spin_lock_bh(&bp->phy_lock);
1953 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1955 spin_unlock_bh(&bp->phy_lock);
1959 for (i = 0; i < 10; i++) {
1960 if (bnx2_test_link(bp) == 0)
1965 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1968 BNX2_EMAC_MODE_25G_MODE);
1970 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1983 msg_data |= bp->fw_wr_seq;
1985 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1987 /* wait for an acknowledgement. */
1988 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1991 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1993 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1996 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1999 /* If we timed out, inform the firmware that this is the case. */
2000 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2002 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2005 msg_data &= ~BNX2_DRV_MSG_CODE;
2006 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2008 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2013 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2020 bnx2_init_5709_context(struct bnx2 *bp)
2025 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026 val |= (BCM_PAGE_BITS - 8) << 16;
2027 REG_WR(bp, BNX2_CTX_COMMAND, val);
2028 for (i = 0; i < 10; i++) {
2029 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2034 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2037 for (i = 0; i < bp->ctx_pages; i++) {
2040 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044 (u64) bp->ctx_blk_mapping[i] >> 32);
2045 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047 for (j = 0; j < 10; j++) {
2049 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2054 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2063 bnx2_init_context(struct bnx2 *bp)
2069 u32 vcid_addr, pcid_addr, offset;
2074 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2077 vcid_addr = GET_PCID_ADDR(vcid);
2079 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2084 pcid_addr = GET_PCID_ADDR(new_vcid);
2087 vcid_addr = GET_CID_ADDR(vcid);
2088 pcid_addr = vcid_addr;
2091 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092 vcid_addr += (i << PHY_CTX_SHIFT);
2093 pcid_addr += (i << PHY_CTX_SHIFT);
2095 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2098 /* Zero out the context. */
2099 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100 CTX_WR(bp, 0x00, offset, 0);
2102 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2109 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2115 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116 if (good_mbuf == NULL) {
2117 printk(KERN_ERR PFX "Failed to allocate memory in "
2118 "bnx2_alloc_bad_rbuf\n");
2122 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2127 /* Allocate a bunch of mbufs and save the good ones in an array. */
2128 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2132 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2134 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2136 /* The addresses with Bit 9 set are bad memory blocks. */
2137 if (!(val & (1 << 9))) {
2138 good_mbuf[good_mbuf_cnt] = (u16) val;
2142 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2145 /* Free the good ones back to the mbuf pool thus discarding
2146 * all the bad ones. */
2147 while (good_mbuf_cnt) {
2150 val = good_mbuf[good_mbuf_cnt];
2151 val = (val << 9) | val | 1;
2153 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2160 bnx2_set_mac_addr(struct bnx2 *bp)
2163 u8 *mac_addr = bp->dev->dev_addr;
2165 val = (mac_addr[0] << 8) | mac_addr[1];
2167 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2169 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2170 (mac_addr[4] << 8) | mac_addr[5];
2172 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2176 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2178 struct sk_buff *skb;
2179 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2181 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2182 unsigned long align;
2184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2189 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190 skb_reserve(skb, BNX2_RX_ALIGN - align);
2192 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193 PCI_DMA_FROMDEVICE);
2196 pci_unmap_addr_set(rx_buf, mapping, mapping);
2198 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2201 bp->rx_prod_bseq += bp->rx_buf_use_size;
2207 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2209 struct status_block *sblk = bp->status_blk;
2210 u32 new_link_state, old_link_state;
2213 new_link_state = sblk->status_attn_bits & event;
2214 old_link_state = sblk->status_attn_bits_ack & event;
2215 if (new_link_state != old_link_state) {
2217 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2219 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2227 bnx2_phy_int(struct bnx2 *bp)
2229 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230 spin_lock(&bp->phy_lock);
2232 spin_unlock(&bp->phy_lock);
2234 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235 bnx2_set_remote_link(bp);
2240 bnx2_tx_int(struct bnx2 *bp)
2242 struct status_block *sblk = bp->status_blk;
2243 u16 hw_cons, sw_cons, sw_ring_cons;
2246 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2247 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2250 sw_cons = bp->tx_cons;
2252 while (sw_cons != hw_cons) {
2253 struct sw_bd *tx_buf;
2254 struct sk_buff *skb;
2257 sw_ring_cons = TX_RING_IDX(sw_cons);
2259 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2262 /* partial BD completions possible with TSO packets */
2263 if (skb_is_gso(skb)) {
2264 u16 last_idx, last_ring_idx;
2266 last_idx = sw_cons +
2267 skb_shinfo(skb)->nr_frags + 1;
2268 last_ring_idx = sw_ring_cons +
2269 skb_shinfo(skb)->nr_frags + 1;
2270 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2273 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2278 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279 skb_headlen(skb), PCI_DMA_TODEVICE);
2282 last = skb_shinfo(skb)->nr_frags;
2284 for (i = 0; i < last; i++) {
2285 sw_cons = NEXT_TX_BD(sw_cons);
2287 pci_unmap_page(bp->pdev,
2289 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2291 skb_shinfo(skb)->frags[i].size,
2295 sw_cons = NEXT_TX_BD(sw_cons);
2297 tx_free_bd += last + 1;
2301 hw_cons = bp->hw_tx_cons =
2302 sblk->status_tx_quick_consumer_index0;
2304 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2309 bp->tx_cons = sw_cons;
2310 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311 * before checking for netif_queue_stopped(). Without the
2312 * memory barrier, there is a small possibility that bnx2_start_xmit()
2313 * will miss it and cause the queue to be stopped forever.
2317 if (unlikely(netif_queue_stopped(bp->dev)) &&
2318 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319 netif_tx_lock(bp->dev);
2320 if ((netif_queue_stopped(bp->dev)) &&
2321 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2322 netif_wake_queue(bp->dev);
2323 netif_tx_unlock(bp->dev);
2328 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2331 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332 struct rx_bd *cons_bd, *prod_bd;
2334 cons_rx_buf = &bp->rx_buf_ring[cons];
2335 prod_rx_buf = &bp->rx_buf_ring[prod];
2337 pci_dma_sync_single_for_device(bp->pdev,
2338 pci_unmap_addr(cons_rx_buf, mapping),
2339 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2341 bp->rx_prod_bseq += bp->rx_buf_use_size;
2343 prod_rx_buf->skb = skb;
2348 pci_unmap_addr_set(prod_rx_buf, mapping,
2349 pci_unmap_addr(cons_rx_buf, mapping));
2351 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2353 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2358 bnx2_rx_int(struct bnx2 *bp, int budget)
2360 struct status_block *sblk = bp->status_blk;
2361 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362 struct l2_fhdr *rx_hdr;
2365 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2366 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2369 sw_cons = bp->rx_cons;
2370 sw_prod = bp->rx_prod;
2372 /* Memory barrier necessary as speculative reads of the rx
2373 * buffer can be ahead of the index in the status block
2376 while (sw_cons != hw_cons) {
2379 struct sw_bd *rx_buf;
2380 struct sk_buff *skb;
2381 dma_addr_t dma_addr;
2383 sw_ring_cons = RX_RING_IDX(sw_cons);
2384 sw_ring_prod = RX_RING_IDX(sw_prod);
2386 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2391 dma_addr = pci_unmap_addr(rx_buf, mapping);
2393 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2394 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2396 rx_hdr = (struct l2_fhdr *) skb->data;
2397 len = rx_hdr->l2_fhdr_pkt_len - 4;
2399 if ((status = rx_hdr->l2_fhdr_status) &
2400 (L2_FHDR_ERRORS_BAD_CRC |
2401 L2_FHDR_ERRORS_PHY_DECODE |
2402 L2_FHDR_ERRORS_ALIGNMENT |
2403 L2_FHDR_ERRORS_TOO_SHORT |
2404 L2_FHDR_ERRORS_GIANT_FRAME)) {
2409 /* Since we don't have a jumbo ring, copy small packets
2412 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413 struct sk_buff *new_skb;
2415 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2416 if (new_skb == NULL)
2420 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421 new_skb->data, len + 2);
2422 skb_reserve(new_skb, 2);
2423 skb_put(new_skb, len);
2425 bnx2_reuse_rx_skb(bp, skb,
2426 sw_ring_cons, sw_ring_prod);
2430 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2431 pci_unmap_single(bp->pdev, dma_addr,
2432 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2434 skb_reserve(skb, bp->rx_offset);
2439 bnx2_reuse_rx_skb(bp, skb,
2440 sw_ring_cons, sw_ring_prod);
2444 skb->protocol = eth_type_trans(skb, bp->dev);
2446 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2447 (ntohs(skb->protocol) != 0x8100)) {
2454 skb->ip_summed = CHECKSUM_NONE;
2456 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2459 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2461 skb->ip_summed = CHECKSUM_UNNECESSARY;
2465 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467 rx_hdr->l2_fhdr_vlan_tag);
2471 netif_receive_skb(skb);
2473 bp->dev->last_rx = jiffies;
2477 sw_cons = NEXT_RX_BD(sw_cons);
2478 sw_prod = NEXT_RX_BD(sw_prod);
2480 if ((rx_pkt == budget))
2483 /* Refresh hw_cons to see if there is new work */
2484 if (sw_cons == hw_cons) {
2485 hw_cons = bp->hw_rx_cons =
2486 sblk->status_rx_quick_consumer_index0;
2487 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2492 bp->rx_cons = sw_cons;
2493 bp->rx_prod = sw_prod;
2495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2497 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2505 /* MSI ISR - The only difference between this and the INTx ISR
2506 * is that the MSI interrupt is always serviced.
2509 bnx2_msi(int irq, void *dev_instance)
2511 struct net_device *dev = dev_instance;
2512 struct bnx2 *bp = netdev_priv(dev);
2514 prefetch(bp->status_blk);
2515 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2519 /* Return here if interrupt is disabled. */
2520 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2523 netif_rx_schedule(dev);
2529 bnx2_msi_1shot(int irq, void *dev_instance)
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2534 prefetch(bp->status_blk);
2536 /* Return here if interrupt is disabled. */
2537 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2540 netif_rx_schedule(dev);
2546 bnx2_interrupt(int irq, void *dev_instance)
2548 struct net_device *dev = dev_instance;
2549 struct bnx2 *bp = netdev_priv(dev);
2550 struct status_block *sblk = bp->status_blk;
2552 /* When using INTx, it is possible for the interrupt to arrive
2553 * at the CPU before the status block posted prior to the
2554 * interrupt. Reading a register will flush the status block.
2555 * When using MSI, the MSI message will always complete after
2556 * the status block write.
2558 if ((sblk->status_idx == bp->last_status_idx) &&
2559 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2560 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2563 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2564 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2565 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2567 /* Read back to deassert IRQ immediately to avoid too many
2568 * spurious interrupts.
2570 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2572 /* Return here if interrupt is shared and is disabled. */
2573 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2576 if (netif_rx_schedule_prep(dev)) {
2577 bp->last_status_idx = sblk->status_idx;
2578 __netif_rx_schedule(dev);
2584 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2585 STATUS_ATTN_BITS_TIMER_ABORT)
2588 bnx2_has_work(struct bnx2 *bp)
2590 struct status_block *sblk = bp->status_blk;
2592 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2593 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2596 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2597 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2604 bnx2_poll(struct net_device *dev, int *budget)
2606 struct bnx2 *bp = netdev_priv(dev);
2607 struct status_block *sblk = bp->status_blk;
2608 u32 status_attn_bits = sblk->status_attn_bits;
2609 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2611 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2612 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2616 /* This is needed to take care of transient status
2617 * during link changes.
2619 REG_WR(bp, BNX2_HC_COMMAND,
2620 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2621 REG_RD(bp, BNX2_HC_COMMAND);
2624 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2627 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2628 int orig_budget = *budget;
2631 if (orig_budget > dev->quota)
2632 orig_budget = dev->quota;
2634 work_done = bnx2_rx_int(bp, orig_budget);
2635 *budget -= work_done;
2636 dev->quota -= work_done;
2639 bp->last_status_idx = bp->status_blk->status_idx;
2642 if (!bnx2_has_work(bp)) {
2643 netif_rx_complete(dev);
2644 if (likely(bp->flags & USING_MSI_FLAG)) {
2645 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2646 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2647 bp->last_status_idx);
2650 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2651 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2652 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2653 bp->last_status_idx);
2655 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2656 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2657 bp->last_status_idx);
2664 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2665 * from set_multicast.
2668 bnx2_set_rx_mode(struct net_device *dev)
2670 struct bnx2 *bp = netdev_priv(dev);
2671 u32 rx_mode, sort_mode;
2674 spin_lock_bh(&bp->phy_lock);
2676 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2677 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2678 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2680 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2681 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2683 if (!(bp->flags & ASF_ENABLE_FLAG))
2684 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2686 if (dev->flags & IFF_PROMISC) {
2687 /* Promiscuous mode. */
2688 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2689 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2690 BNX2_RPM_SORT_USER0_PROM_VLAN;
2692 else if (dev->flags & IFF_ALLMULTI) {
2693 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2694 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2697 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2700 /* Accept one or more multicast(s). */
2701 struct dev_mc_list *mclist;
2702 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2707 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2709 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2710 i++, mclist = mclist->next) {
2712 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2714 regidx = (bit & 0xe0) >> 5;
2716 mc_filter[regidx] |= (1 << bit);
2719 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2720 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2724 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2727 if (rx_mode != bp->rx_mode) {
2728 bp->rx_mode = rx_mode;
2729 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2732 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2733 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2734 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2736 spin_unlock_bh(&bp->phy_lock);
2739 #define FW_BUF_SIZE 0x8000
2742 bnx2_gunzip_init(struct bnx2 *bp)
2744 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2747 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2750 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2751 if (bp->strm->workspace == NULL)
2761 vfree(bp->gunzip_buf);
2762 bp->gunzip_buf = NULL;
2765 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2766 "uncompression.\n", bp->dev->name);
2771 bnx2_gunzip_end(struct bnx2 *bp)
2773 kfree(bp->strm->workspace);
2778 if (bp->gunzip_buf) {
2779 vfree(bp->gunzip_buf);
2780 bp->gunzip_buf = NULL;
2785 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2789 /* check gzip header */
2790 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2796 if (zbuf[3] & FNAME)
2797 while ((zbuf[n++] != 0) && (n < len));
2799 bp->strm->next_in = zbuf + n;
2800 bp->strm->avail_in = len - n;
2801 bp->strm->next_out = bp->gunzip_buf;
2802 bp->strm->avail_out = FW_BUF_SIZE;
2804 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2808 rc = zlib_inflate(bp->strm, Z_FINISH);
2810 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2811 *outbuf = bp->gunzip_buf;
2813 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2814 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2815 bp->dev->name, bp->strm->msg);
2817 zlib_inflateEnd(bp->strm);
2819 if (rc == Z_STREAM_END)
2826 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2833 for (i = 0; i < rv2p_code_len; i += 8) {
2834 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2836 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2839 if (rv2p_proc == RV2P_PROC1) {
2840 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2841 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2844 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2845 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2849 /* Reset the processor, un-stall is done later. */
2850 if (rv2p_proc == RV2P_PROC1) {
2851 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2854 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2859 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2866 val = REG_RD_IND(bp, cpu_reg->mode);
2867 val |= cpu_reg->mode_value_halt;
2868 REG_WR_IND(bp, cpu_reg->mode, val);
2869 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2871 /* Load the Text area. */
2872 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2877 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2887 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2888 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2892 /* Load the Data area. */
2893 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2897 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2898 REG_WR_IND(bp, offset, fw->data[j]);
2902 /* Load the SBSS area. */
2903 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2907 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2908 REG_WR_IND(bp, offset, fw->sbss[j]);
2912 /* Load the BSS area. */
2913 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2917 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->bss[j]);
2922 /* Load the Read-Only area. */
2923 offset = cpu_reg->spad_base +
2924 (fw->rodata_addr - cpu_reg->mips_view_base);
2928 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2929 REG_WR_IND(bp, offset, fw->rodata[j]);
2933 /* Clear the pre-fetch instruction. */
2934 REG_WR_IND(bp, cpu_reg->inst, 0);
2935 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2937 /* Start the CPU. */
2938 val = REG_RD_IND(bp, cpu_reg->mode);
2939 val &= ~cpu_reg->mode_value_halt;
2940 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2941 REG_WR_IND(bp, cpu_reg->mode, val);
2947 bnx2_init_cpus(struct bnx2 *bp)
2949 struct cpu_reg cpu_reg;
2955 if ((rc = bnx2_gunzip_init(bp)) != 0)
2958 /* Initialize the RV2P processor. */
2959 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2964 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2966 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2971 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2973 /* Initialize the RX Processor. */
2974 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2975 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2976 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2977 cpu_reg.state = BNX2_RXP_CPU_STATE;
2978 cpu_reg.state_value_clear = 0xffffff;
2979 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2980 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2981 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2982 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2983 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2984 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2985 cpu_reg.mips_view_base = 0x8000000;
2987 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2988 fw = &bnx2_rxp_fw_09;
2990 fw = &bnx2_rxp_fw_06;
2992 rc = load_cpu_fw(bp, &cpu_reg, fw);
2996 /* Initialize the TX Processor. */
2997 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2998 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2999 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3000 cpu_reg.state = BNX2_TXP_CPU_STATE;
3001 cpu_reg.state_value_clear = 0xffffff;
3002 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3003 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3004 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3005 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3006 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3007 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3008 cpu_reg.mips_view_base = 0x8000000;
3010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011 fw = &bnx2_txp_fw_09;
3013 fw = &bnx2_txp_fw_06;
3015 rc = load_cpu_fw(bp, &cpu_reg, fw);
3019 /* Initialize the TX Patch-up Processor. */
3020 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3021 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3022 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3023 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3024 cpu_reg.state_value_clear = 0xffffff;
3025 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3026 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3027 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3028 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3029 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3030 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3031 cpu_reg.mips_view_base = 0x8000000;
3033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034 fw = &bnx2_tpat_fw_09;
3036 fw = &bnx2_tpat_fw_06;
3038 rc = load_cpu_fw(bp, &cpu_reg, fw);
3042 /* Initialize the Completion Processor. */
3043 cpu_reg.mode = BNX2_COM_CPU_MODE;
3044 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3045 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3046 cpu_reg.state = BNX2_COM_CPU_STATE;
3047 cpu_reg.state_value_clear = 0xffffff;
3048 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3049 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3050 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3051 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3052 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3053 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3054 cpu_reg.mips_view_base = 0x8000000;
3056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057 fw = &bnx2_com_fw_09;
3059 fw = &bnx2_com_fw_06;
3061 rc = load_cpu_fw(bp, &cpu_reg, fw);
3065 /* Initialize the Command Processor. */
3066 cpu_reg.mode = BNX2_CP_CPU_MODE;
3067 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BNX2_CP_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3072 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3073 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3075 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
3079 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3080 fw = &bnx2_cp_fw_09;
3082 rc = load_cpu_fw(bp, &cpu_reg, fw);
3087 bnx2_gunzip_end(bp);
3092 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3096 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3102 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3103 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3104 PCI_PM_CTRL_PME_STATUS);
3106 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3107 /* delay required during transition out of D3hot */
3110 val = REG_RD(bp, BNX2_EMAC_MODE);
3111 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3112 val &= ~BNX2_EMAC_MODE_MPKT;
3113 REG_WR(bp, BNX2_EMAC_MODE, val);
3115 val = REG_RD(bp, BNX2_RPM_CONFIG);
3116 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3117 REG_WR(bp, BNX2_RPM_CONFIG, val);
3128 autoneg = bp->autoneg;
3129 advertising = bp->advertising;
3131 bp->autoneg = AUTONEG_SPEED;
3132 bp->advertising = ADVERTISED_10baseT_Half |
3133 ADVERTISED_10baseT_Full |
3134 ADVERTISED_100baseT_Half |
3135 ADVERTISED_100baseT_Full |
3138 bnx2_setup_copper_phy(bp);
3140 bp->autoneg = autoneg;
3141 bp->advertising = advertising;
3143 bnx2_set_mac_addr(bp);
3145 val = REG_RD(bp, BNX2_EMAC_MODE);
3147 /* Enable port mode. */
3148 val &= ~BNX2_EMAC_MODE_PORT;
3149 val |= BNX2_EMAC_MODE_PORT_MII |
3150 BNX2_EMAC_MODE_MPKT_RCVD |
3151 BNX2_EMAC_MODE_ACPI_RCVD |
3152 BNX2_EMAC_MODE_MPKT;
3154 REG_WR(bp, BNX2_EMAC_MODE, val);
3156 /* receive all multicast */
3157 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3158 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3161 REG_WR(bp, BNX2_EMAC_RX_MODE,
3162 BNX2_EMAC_RX_MODE_SORT_MODE);
3164 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3165 BNX2_RPM_SORT_USER0_MC_EN;
3166 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3167 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3168 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3169 BNX2_RPM_SORT_USER0_ENA);
3171 /* Need to enable EMAC and RPM for WOL. */
3172 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3173 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3174 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3175 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3177 val = REG_RD(bp, BNX2_RPM_CONFIG);
3178 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3179 REG_WR(bp, BNX2_RPM_CONFIG, val);
3181 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3184 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3187 if (!(bp->flags & NO_WOL_FLAG))
3188 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3190 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3191 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3192 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3201 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3203 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3206 /* No more memory access after this point until
3207 * device is brought back to D0.
3219 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3224 /* Request access to the flash interface. */
3225 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3226 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3227 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3228 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3234 if (j >= NVRAM_TIMEOUT_COUNT)
3241 bnx2_release_nvram_lock(struct bnx2 *bp)
3246 /* Relinquish nvram interface. */
3247 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3265 bnx2_enable_nvram_write(struct bnx2 *bp)
3269 val = REG_RD(bp, BNX2_MISC_CFG);
3270 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3272 if (!bp->flash_info->buffered) {
3275 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3276 REG_WR(bp, BNX2_NVM_COMMAND,
3277 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3279 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3282 val = REG_RD(bp, BNX2_NVM_COMMAND);
3283 if (val & BNX2_NVM_COMMAND_DONE)
3287 if (j >= NVRAM_TIMEOUT_COUNT)
3294 bnx2_disable_nvram_write(struct bnx2 *bp)
3298 val = REG_RD(bp, BNX2_MISC_CFG);
3299 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3304 bnx2_enable_nvram_access(struct bnx2 *bp)
3308 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3309 /* Enable both bits, even on read. */
3310 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3311 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3315 bnx2_disable_nvram_access(struct bnx2 *bp)
3319 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3320 /* Disable both bits, even after read. */
3321 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3322 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3323 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3327 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3332 if (bp->flash_info->buffered)
3333 /* Buffered flash, no erase needed */
3336 /* Build an erase command */
3337 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3338 BNX2_NVM_COMMAND_DOIT;
3340 /* Need to clear DONE bit separately. */
3341 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3343 /* Address of the NVRAM to read from. */
3344 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3346 /* Issue an erase command. */
3347 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3349 /* Wait for completion. */
3350 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3355 val = REG_RD(bp, BNX2_NVM_COMMAND);
3356 if (val & BNX2_NVM_COMMAND_DONE)
3360 if (j >= NVRAM_TIMEOUT_COUNT)
3367 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3372 /* Build the command word. */
3373 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3375 /* Calculate an offset of a buffered flash. */
3376 if (bp->flash_info->buffered) {
3377 offset = ((offset / bp->flash_info->page_size) <<
3378 bp->flash_info->page_bits) +
3379 (offset % bp->flash_info->page_size);
3382 /* Need to clear DONE bit separately. */
3383 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3385 /* Address of the NVRAM to read from. */
3386 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3388 /* Issue a read command. */
3389 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3391 /* Wait for completion. */
3392 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3397 val = REG_RD(bp, BNX2_NVM_COMMAND);
3398 if (val & BNX2_NVM_COMMAND_DONE) {
3399 val = REG_RD(bp, BNX2_NVM_READ);
3401 val = be32_to_cpu(val);
3402 memcpy(ret_val, &val, 4);
3406 if (j >= NVRAM_TIMEOUT_COUNT)
3414 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3419 /* Build the command word. */
3420 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3422 /* Calculate an offset of a buffered flash. */
3423 if (bp->flash_info->buffered) {
3424 offset = ((offset / bp->flash_info->page_size) <<
3425 bp->flash_info->page_bits) +
3426 (offset % bp->flash_info->page_size);
3429 /* Need to clear DONE bit separately. */
3430 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3432 memcpy(&val32, val, 4);
3433 val32 = cpu_to_be32(val32);
3435 /* Write the data. */
3436 REG_WR(bp, BNX2_NVM_WRITE, val32);
3438 /* Address of the NVRAM to write to. */
3439 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3441 /* Issue the write command. */
3442 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3444 /* Wait for completion. */
3445 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3448 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3451 if (j >= NVRAM_TIMEOUT_COUNT)
3458 bnx2_init_nvram(struct bnx2 *bp)
3461 int j, entry_count, rc;
3462 struct flash_spec *flash;
3464 /* Determine the selected interface. */
3465 val = REG_RD(bp, BNX2_NVM_CFG1);
3467 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3470 if (val & 0x40000000) {
3472 /* Flash interface has been reconfigured */
3473 for (j = 0, flash = &flash_table[0]; j < entry_count;
3475 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3476 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3477 bp->flash_info = flash;
3484 /* Not yet been reconfigured */
3486 if (val & (1 << 23))
3487 mask = FLASH_BACKUP_STRAP_MASK;
3489 mask = FLASH_STRAP_MASK;
3491 for (j = 0, flash = &flash_table[0]; j < entry_count;
3494 if ((val & mask) == (flash->strapping & mask)) {
3495 bp->flash_info = flash;
3497 /* Request access to the flash interface. */
3498 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3501 /* Enable access to flash interface */
3502 bnx2_enable_nvram_access(bp);
3504 /* Reconfigure the flash interface */
3505 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3506 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3507 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3508 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3510 /* Disable access to flash interface */
3511 bnx2_disable_nvram_access(bp);
3512 bnx2_release_nvram_lock(bp);
3517 } /* if (val & 0x40000000) */
3519 if (j == entry_count) {
3520 bp->flash_info = NULL;
3521 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3525 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3526 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3528 bp->flash_size = val;
3530 bp->flash_size = bp->flash_info->total_size;
3536 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3540 u32 cmd_flags, offset32, len32, extra;
3545 /* Request access to the flash interface. */
3546 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3549 /* Enable access to flash interface */
3550 bnx2_enable_nvram_access(bp);
3563 pre_len = 4 - (offset & 3);
3565 if (pre_len >= len32) {
3567 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3568 BNX2_NVM_COMMAND_LAST;
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3579 memcpy(ret_buf, buf + (offset & 3), pre_len);
3586 extra = 4 - (len32 & 3);
3587 len32 = (len32 + 4) & ~3;
3594 cmd_flags = BNX2_NVM_COMMAND_LAST;
3596 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3597 BNX2_NVM_COMMAND_LAST;
3599 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3601 memcpy(ret_buf, buf, 4 - extra);
3603 else if (len32 > 0) {
3606 /* Read the first word. */
3610 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3612 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3614 /* Advance to the next dword. */
3619 while (len32 > 4 && rc == 0) {
3620 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3622 /* Advance to the next dword. */
3631 cmd_flags = BNX2_NVM_COMMAND_LAST;
3632 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3634 memcpy(ret_buf, buf, 4 - extra);
3637 /* Disable access to flash interface */
3638 bnx2_disable_nvram_access(bp);
3640 bnx2_release_nvram_lock(bp);
3646 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3649 u32 written, offset32, len32;
3650 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3652 int align_start, align_end;
3657 align_start = align_end = 0;
3659 if ((align_start = (offset32 & 3))) {
3661 len32 += align_start;
3664 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3669 align_end = 4 - (len32 & 3);
3671 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3675 if (align_start || align_end) {
3676 align_buf = kmalloc(len32, GFP_KERNEL);
3677 if (align_buf == NULL)
3680 memcpy(align_buf, start, 4);
3683 memcpy(align_buf + len32 - 4, end, 4);
3685 memcpy(align_buf + align_start, data_buf, buf_size);
3689 if (bp->flash_info->buffered == 0) {
3690 flash_buffer = kmalloc(264, GFP_KERNEL);
3691 if (flash_buffer == NULL) {
3693 goto nvram_write_end;
3698 while ((written < len32) && (rc == 0)) {
3699 u32 page_start, page_end, data_start, data_end;
3700 u32 addr, cmd_flags;
3703 /* Find the page_start addr */
3704 page_start = offset32 + written;
3705 page_start -= (page_start % bp->flash_info->page_size);
3706 /* Find the page_end addr */
3707 page_end = page_start + bp->flash_info->page_size;
3708 /* Find the data_start addr */
3709 data_start = (written == 0) ? offset32 : page_start;
3710 /* Find the data_end addr */
3711 data_end = (page_end > offset32 + len32) ?
3712 (offset32 + len32) : page_end;
3714 /* Request access to the flash interface. */
3715 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3716 goto nvram_write_end;
3718 /* Enable access to flash interface */
3719 bnx2_enable_nvram_access(bp);
3721 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3722 if (bp->flash_info->buffered == 0) {
3725 /* Read the whole page into the buffer
3726 * (non-buffer flash only) */
3727 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3728 if (j == (bp->flash_info->page_size - 4)) {
3729 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3731 rc = bnx2_nvram_read_dword(bp,
3737 goto nvram_write_end;
3743 /* Enable writes to flash interface (unlock write-protect) */
3744 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3745 goto nvram_write_end;
3747 /* Loop to write back the buffer data from page_start to
3750 if (bp->flash_info->buffered == 0) {
3751 /* Erase the page */
3752 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3753 goto nvram_write_end;
3755 /* Re-enable the write again for the actual write */
3756 bnx2_enable_nvram_write(bp);
3758 for (addr = page_start; addr < data_start;
3759 addr += 4, i += 4) {
3761 rc = bnx2_nvram_write_dword(bp, addr,
3762 &flash_buffer[i], cmd_flags);
3765 goto nvram_write_end;
3771 /* Loop to write the new data from data_start to data_end */
3772 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3773 if ((addr == page_end - 4) ||
3774 ((bp->flash_info->buffered) &&
3775 (addr == data_end - 4))) {
3777 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3779 rc = bnx2_nvram_write_dword(bp, addr, buf,
3783 goto nvram_write_end;
3789 /* Loop to write back the buffer data from data_end
3791 if (bp->flash_info->buffered == 0) {
3792 for (addr = data_end; addr < page_end;
3793 addr += 4, i += 4) {
3795 if (addr == page_end-4) {
3796 cmd_flags = BNX2_NVM_COMMAND_LAST;
3798 rc = bnx2_nvram_write_dword(bp, addr,
3799 &flash_buffer[i], cmd_flags);
3802 goto nvram_write_end;
3808 /* Disable writes to flash interface (lock write-protect) */
3809 bnx2_disable_nvram_write(bp);
3811 /* Disable access to flash interface */
3812 bnx2_disable_nvram_access(bp);
3813 bnx2_release_nvram_lock(bp);
3815 /* Increment written */
3816 written += data_end - data_start;
3820 kfree(flash_buffer);
3826 bnx2_init_remote_phy(struct bnx2 *bp)
3830 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3831 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3834 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3835 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3838 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3839 if (netif_running(bp->dev)) {
3840 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3841 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3842 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3845 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3847 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3848 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3849 bp->phy_port = PORT_FIBRE;
3851 bp->phy_port = PORT_TP;
3856 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3861 /* Wait for the current PCI transaction to complete before
3862 * issuing a reset. */
3863 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3864 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3865 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3866 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3867 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3868 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3871 /* Wait for the firmware to tell us it is ok to issue a reset. */
3872 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3874 /* Deposit a driver reset signature so the firmware knows that
3875 * this is a soft reset. */
3876 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3877 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3879 /* Do a dummy read to force the chip to complete all current transaction
3880 * before we issue a reset. */
3881 val = REG_RD(bp, BNX2_MISC_ID);
3883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3885 REG_RD(bp, BNX2_MISC_COMMAND);
3888 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3889 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3891 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3894 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3895 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3896 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3899 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3901 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3902 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3903 current->state = TASK_UNINTERRUPTIBLE;
3904 schedule_timeout(HZ / 50);
3907 /* Reset takes approximate 30 usec */
3908 for (i = 0; i < 10; i++) {
3909 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3910 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3911 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3916 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3917 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3918 printk(KERN_ERR PFX "Chip reset did not complete\n");
3923 /* Make sure byte swapping is properly configured. */
3924 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3925 if (val != 0x01020304) {
3926 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3930 /* Wait for the firmware to finish its initialization. */
3931 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3935 spin_lock_bh(&bp->phy_lock);
3936 bnx2_init_remote_phy(bp);
3937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3938 bnx2_set_default_remote_link(bp);
3939 spin_unlock_bh(&bp->phy_lock);
3941 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3942 /* Adjust the voltage regular to two steps lower. The default
3943 * of this register is 0x0000000e. */
3944 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3946 /* Remove bad rbuf memory from the free pool. */
3947 rc = bnx2_alloc_bad_rbuf(bp);
3954 bnx2_init_chip(struct bnx2 *bp)
3959 /* Make sure the interrupt is not active. */
3960 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3962 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3963 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3965 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3967 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3968 DMA_READ_CHANS << 12 |
3969 DMA_WRITE_CHANS << 16;
3971 val |= (0x2 << 20) | (1 << 11);
3973 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3976 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3977 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3978 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3980 REG_WR(bp, BNX2_DMA_CONFIG, val);
3982 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3983 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3984 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3985 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3988 if (bp->flags & PCIX_FLAG) {
3991 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3993 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3994 val16 & ~PCI_X_CMD_ERO);
3997 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3998 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3999 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4000 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4002 /* Initialize context mapping and zero out the quick contexts. The
4003 * context block must have already been enabled. */
4004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4005 rc = bnx2_init_5709_context(bp);
4009 bnx2_init_context(bp);
4011 if ((rc = bnx2_init_cpus(bp)) != 0)
4014 bnx2_init_nvram(bp);
4016 bnx2_set_mac_addr(bp);
4018 val = REG_RD(bp, BNX2_MQ_CONFIG);
4019 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4020 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4021 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4022 val |= BNX2_MQ_CONFIG_HALT_DIS;
4024 REG_WR(bp, BNX2_MQ_CONFIG, val);
4026 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4027 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4028 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4030 val = (BCM_PAGE_BITS - 8) << 24;
4031 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4033 /* Configure page size. */
4034 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4035 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4036 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4037 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4039 val = bp->mac_addr[0] +
4040 (bp->mac_addr[1] << 8) +
4041 (bp->mac_addr[2] << 16) +
4043 (bp->mac_addr[4] << 8) +
4044 (bp->mac_addr[5] << 16);
4045 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4047 /* Program the MTU. Also include 4 bytes for CRC32. */
4048 val = bp->dev->mtu + ETH_HLEN + 4;
4049 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4050 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4051 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4053 bp->last_status_idx = 0;
4054 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4056 /* Set up how to generate a link change interrupt. */
4057 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4059 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4060 (u64) bp->status_blk_mapping & 0xffffffff);
4061 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4063 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4064 (u64) bp->stats_blk_mapping & 0xffffffff);
4065 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4066 (u64) bp->stats_blk_mapping >> 32);
4068 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4069 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4071 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4072 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4074 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4075 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4077 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4079 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4081 REG_WR(bp, BNX2_HC_COM_TICKS,
4082 (bp->com_ticks_int << 16) | bp->com_ticks);
4084 REG_WR(bp, BNX2_HC_CMD_TICKS,
4085 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4087 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4088 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4090 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4091 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4093 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4094 val = BNX2_HC_CONFIG_COLLECT_STATS;
4096 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4097 BNX2_HC_CONFIG_COLLECT_STATS;
4100 if (bp->flags & ONE_SHOT_MSI_FLAG)
4101 val |= BNX2_HC_CONFIG_ONE_SHOT;
4103 REG_WR(bp, BNX2_HC_CONFIG, val);
4105 /* Clear internal stats counters. */
4106 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4108 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4110 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4111 BNX2_PORT_FEATURE_ASF_ENABLED)
4112 bp->flags |= ASF_ENABLE_FLAG;
4114 /* Initialize the receive filter. */
4115 bnx2_set_rx_mode(bp->dev);
4117 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4118 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4119 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4120 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4122 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4125 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4126 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4130 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4136 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4138 u32 val, offset0, offset1, offset2, offset3;
4140 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4141 offset0 = BNX2_L2CTX_TYPE_XI;
4142 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4143 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4144 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4146 offset0 = BNX2_L2CTX_TYPE;
4147 offset1 = BNX2_L2CTX_CMD_TYPE;
4148 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4149 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4151 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4152 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4154 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4155 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4157 val = (u64) bp->tx_desc_mapping >> 32;
4158 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4160 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4161 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4165 bnx2_init_tx_ring(struct bnx2 *bp)
4170 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4172 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4174 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4175 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4180 bp->tx_prod_bseq = 0;
4183 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4184 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4186 bnx2_init_tx_context(bp, cid);
4190 bnx2_init_rx_ring(struct bnx2 *bp)
4194 u16 prod, ring_prod;
4197 /* 8 for CRC and VLAN */
4198 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4200 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4202 ring_prod = prod = bp->rx_prod = 0;
4205 bp->rx_prod_bseq = 0;
4207 for (i = 0; i < bp->rx_max_ring; i++) {
4210 rxbd = &bp->rx_desc_ring[i][0];
4211 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4212 rxbd->rx_bd_len = bp->rx_buf_use_size;
4213 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4215 if (i == (bp->rx_max_ring - 1))
4219 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4220 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4224 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4225 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4227 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4229 val = (u64) bp->rx_desc_mapping[0] >> 32;
4230 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4232 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4233 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4235 for (i = 0; i < bp->rx_ring_size; i++) {
4236 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4239 prod = NEXT_RX_BD(prod);
4240 ring_prod = RX_RING_IDX(prod);
4244 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4246 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4250 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4254 bp->rx_ring_size = size;
4256 while (size > MAX_RX_DESC_CNT) {
4257 size -= MAX_RX_DESC_CNT;
4260 /* round to next power of 2 */
4262 while ((max & num_rings) == 0)
4265 if (num_rings != max)
4268 bp->rx_max_ring = max;
4269 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4273 bnx2_free_tx_skbs(struct bnx2 *bp)
4277 if (bp->tx_buf_ring == NULL)
4280 for (i = 0; i < TX_DESC_CNT; ) {
4281 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4282 struct sk_buff *skb = tx_buf->skb;
4290 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4291 skb_headlen(skb), PCI_DMA_TODEVICE);
4295 last = skb_shinfo(skb)->nr_frags;
4296 for (j = 0; j < last; j++) {
4297 tx_buf = &bp->tx_buf_ring[i + j + 1];
4298 pci_unmap_page(bp->pdev,
4299 pci_unmap_addr(tx_buf, mapping),
4300 skb_shinfo(skb)->frags[j].size,
4310 bnx2_free_rx_skbs(struct bnx2 *bp)
4314 if (bp->rx_buf_ring == NULL)
4317 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4318 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4319 struct sk_buff *skb = rx_buf->skb;
4324 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4325 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4334 bnx2_free_skbs(struct bnx2 *bp)
4336 bnx2_free_tx_skbs(bp);
4337 bnx2_free_rx_skbs(bp);
4341 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4345 rc = bnx2_reset_chip(bp, reset_code);
4350 if ((rc = bnx2_init_chip(bp)) != 0)
4353 bnx2_init_tx_ring(bp);
4354 bnx2_init_rx_ring(bp);
4359 bnx2_init_nic(struct bnx2 *bp)
4363 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4366 spin_lock_bh(&bp->phy_lock);
4369 spin_unlock_bh(&bp->phy_lock);
4374 bnx2_test_registers(struct bnx2 *bp)
4378 static const struct {
4381 #define BNX2_FL_NOT_5709 1
4385 { 0x006c, 0, 0x00000000, 0x0000003f },
4386 { 0x0090, 0, 0xffffffff, 0x00000000 },
4387 { 0x0094, 0, 0x00000000, 0x00000000 },
4389 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4390 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4393 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4394 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4395 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4396 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4397 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4399 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4400 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4401 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4402 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4403 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4404 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4406 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4407 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4408 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4410 { 0x1000, 0, 0x00000000, 0x00000001 },
4411 { 0x1004, 0, 0x00000000, 0x000f0001 },
4413 { 0x1408, 0, 0x01c00800, 0x00000000 },
4414 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4415 { 0x14a8, 0, 0x00000000, 0x000001ff },
4416 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4417 { 0x14b0, 0, 0x00000002, 0x00000001 },
4418 { 0x14b8, 0, 0x00000000, 0x00000000 },
4419 { 0x14c0, 0, 0x00000000, 0x00000009 },
4420 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4421 { 0x14cc, 0, 0x00000000, 0x00000001 },
4422 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4424 { 0x1800, 0, 0x00000000, 0x00000001 },
4425 { 0x1804, 0, 0x00000000, 0x00000003 },
4427 { 0x2800, 0, 0x00000000, 0x00000001 },
4428 { 0x2804, 0, 0x00000000, 0x00003f01 },
4429 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4430 { 0x2810, 0, 0xffff0000, 0x00000000 },
4431 { 0x2814, 0, 0xffff0000, 0x00000000 },
4432 { 0x2818, 0, 0xffff0000, 0x00000000 },
4433 { 0x281c, 0, 0xffff0000, 0x00000000 },
4434 { 0x2834, 0, 0xffffffff, 0x00000000 },
4435 { 0x2840, 0, 0x00000000, 0xffffffff },
4436 { 0x2844, 0, 0x00000000, 0xffffffff },
4437 { 0x2848, 0, 0xffffffff, 0x00000000 },
4438 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4440 { 0x2c00, 0, 0x00000000, 0x00000011 },
4441 { 0x2c04, 0, 0x00000000, 0x00030007 },
4443 { 0x3c00, 0, 0x00000000, 0x00000001 },
4444 { 0x3c04, 0, 0x00000000, 0x00070000 },
4445 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4446 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4447 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4448 { 0x3c14, 0, 0x00000000, 0xffffffff },
4449 { 0x3c18, 0, 0x00000000, 0xffffffff },
4450 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4451 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4453 { 0x5004, 0, 0x00000000, 0x0000007f },
4454 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4456 { 0x5c00, 0, 0x00000000, 0x00000001 },
4457 { 0x5c04, 0, 0x00000000, 0x0003000f },
4458 { 0x5c08, 0, 0x00000003, 0x00000000 },
4459 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4460 { 0x5c10, 0, 0x00000000, 0xffffffff },
4461 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4462 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4463 { 0x5c88, 0, 0x00000000, 0x00077373 },
4464 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4466 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4467 { 0x680c, 0, 0xffffffff, 0x00000000 },
4468 { 0x6810, 0, 0xffffffff, 0x00000000 },
4469 { 0x6814, 0, 0xffffffff, 0x00000000 },
4470 { 0x6818, 0, 0xffffffff, 0x00000000 },
4471 { 0x681c, 0, 0xffffffff, 0x00000000 },
4472 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4473 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4474 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4475 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4476 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4477 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4478 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4479 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4480 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4481 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4482 { 0x684c, 0, 0xffffffff, 0x00000000 },
4483 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4484 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4485 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4486 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4487 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4488 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4490 { 0xffff, 0, 0x00000000, 0x00000000 },
4495 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4498 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4499 u32 offset, rw_mask, ro_mask, save_val, val;
4500 u16 flags = reg_tbl[i].flags;
4502 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4505 offset = (u32) reg_tbl[i].offset;
4506 rw_mask = reg_tbl[i].rw_mask;
4507 ro_mask = reg_tbl[i].ro_mask;
4509 save_val = readl(bp->regview + offset);
4511 writel(0, bp->regview + offset);
4513 val = readl(bp->regview + offset);
4514 if ((val & rw_mask) != 0) {
4518 if ((val & ro_mask) != (save_val & ro_mask)) {
4522 writel(0xffffffff, bp->regview + offset);
4524 val = readl(bp->regview + offset);
4525 if ((val & rw_mask) != rw_mask) {
4529 if ((val & ro_mask) != (save_val & ro_mask)) {
4533 writel(save_val, bp->regview + offset);
4537 writel(save_val, bp->regview + offset);
4545 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4547 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4548 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4551 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4554 for (offset = 0; offset < size; offset += 4) {
4556 REG_WR_IND(bp, start + offset, test_pattern[i]);
4558 if (REG_RD_IND(bp, start + offset) !=
4568 bnx2_test_memory(struct bnx2 *bp)
4572 static struct mem_entry {
4575 } mem_tbl_5706[] = {
4576 { 0x60000, 0x4000 },
4577 { 0xa0000, 0x3000 },
4578 { 0xe0000, 0x4000 },
4579 { 0x120000, 0x4000 },
4580 { 0x1a0000, 0x4000 },
4581 { 0x160000, 0x4000 },
4585 { 0x60000, 0x4000 },
4586 { 0xa0000, 0x3000 },
4587 { 0xe0000, 0x4000 },
4588 { 0x120000, 0x4000 },
4589 { 0x1a0000, 0x4000 },
4592 struct mem_entry *mem_tbl;
4594 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4595 mem_tbl = mem_tbl_5709;
4597 mem_tbl = mem_tbl_5706;
4599 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4600 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4601 mem_tbl[i].len)) != 0) {
4609 #define BNX2_MAC_LOOPBACK 0
4610 #define BNX2_PHY_LOOPBACK 1
4613 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4615 unsigned int pkt_size, num_pkts, i;
4616 struct sk_buff *skb, *rx_skb;
4617 unsigned char *packet;
4618 u16 rx_start_idx, rx_idx;
4621 struct sw_bd *rx_buf;
4622 struct l2_fhdr *rx_hdr;
4625 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4626 bp->loopback = MAC_LOOPBACK;
4627 bnx2_set_mac_loopback(bp);
4629 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4630 bp->loopback = PHY_LOOPBACK;
4631 bnx2_set_phy_loopback(bp);
4637 skb = netdev_alloc_skb(bp->dev, pkt_size);
4640 packet = skb_put(skb, pkt_size);
4641 memcpy(packet, bp->dev->dev_addr, 6);
4642 memset(packet + 6, 0x0, 8);
4643 for (i = 14; i < pkt_size; i++)
4644 packet[i] = (unsigned char) (i & 0xff);
4646 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4649 REG_WR(bp, BNX2_HC_COMMAND,
4650 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4652 REG_RD(bp, BNX2_HC_COMMAND);
4655 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4659 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4661 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4662 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4663 txbd->tx_bd_mss_nbytes = pkt_size;
4664 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4667 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4668 bp->tx_prod_bseq += pkt_size;
4670 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4671 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4675 REG_WR(bp, BNX2_HC_COMMAND,
4676 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4678 REG_RD(bp, BNX2_HC_COMMAND);
4682 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4685 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4686 goto loopback_test_done;
4689 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4690 if (rx_idx != rx_start_idx + num_pkts) {
4691 goto loopback_test_done;
4694 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4695 rx_skb = rx_buf->skb;
4697 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4698 skb_reserve(rx_skb, bp->rx_offset);
4700 pci_dma_sync_single_for_cpu(bp->pdev,
4701 pci_unmap_addr(rx_buf, mapping),
4702 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4704 if (rx_hdr->l2_fhdr_status &
4705 (L2_FHDR_ERRORS_BAD_CRC |
4706 L2_FHDR_ERRORS_PHY_DECODE |
4707 L2_FHDR_ERRORS_ALIGNMENT |
4708 L2_FHDR_ERRORS_TOO_SHORT |
4709 L2_FHDR_ERRORS_GIANT_FRAME)) {
4711 goto loopback_test_done;
4714 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4715 goto loopback_test_done;
4718 for (i = 14; i < pkt_size; i++) {
4719 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4720 goto loopback_test_done;
4731 #define BNX2_MAC_LOOPBACK_FAILED 1
4732 #define BNX2_PHY_LOOPBACK_FAILED 2
4733 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4734 BNX2_PHY_LOOPBACK_FAILED)
4737 bnx2_test_loopback(struct bnx2 *bp)
4741 if (!netif_running(bp->dev))
4742 return BNX2_LOOPBACK_FAILED;
4744 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4745 spin_lock_bh(&bp->phy_lock);
4747 spin_unlock_bh(&bp->phy_lock);
4748 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4749 rc |= BNX2_MAC_LOOPBACK_FAILED;
4750 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4751 rc |= BNX2_PHY_LOOPBACK_FAILED;
4755 #define NVRAM_SIZE 0x200
4756 #define CRC32_RESIDUAL 0xdebb20e3
4759 bnx2_test_nvram(struct bnx2 *bp)
4761 u32 buf[NVRAM_SIZE / 4];
4762 u8 *data = (u8 *) buf;
4766 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4767 goto test_nvram_done;
4769 magic = be32_to_cpu(buf[0]);
4770 if (magic != 0x669955aa) {
4772 goto test_nvram_done;
4775 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4776 goto test_nvram_done;
4778 csum = ether_crc_le(0x100, data);
4779 if (csum != CRC32_RESIDUAL) {
4781 goto test_nvram_done;
4784 csum = ether_crc_le(0x100, data + 0x100);
4785 if (csum != CRC32_RESIDUAL) {
4794 bnx2_test_link(struct bnx2 *bp)
4798 spin_lock_bh(&bp->phy_lock);
4799 bnx2_enable_bmsr1(bp);
4800 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4801 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4802 bnx2_disable_bmsr1(bp);
4803 spin_unlock_bh(&bp->phy_lock);
4805 if (bmsr & BMSR_LSTATUS) {
4812 bnx2_test_intr(struct bnx2 *bp)
4817 if (!netif_running(bp->dev))
4820 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4822 /* This register is not touched during run-time. */
4823 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4824 REG_RD(bp, BNX2_HC_COMMAND);
4826 for (i = 0; i < 10; i++) {
4827 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4833 msleep_interruptible(10);
4842 bnx2_5706_serdes_timer(struct bnx2 *bp)
4844 spin_lock(&bp->phy_lock);
4845 if (bp->serdes_an_pending)
4846 bp->serdes_an_pending--;
4847 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4850 bp->current_interval = bp->timer_interval;
4852 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4854 if (bmcr & BMCR_ANENABLE) {
4857 bnx2_write_phy(bp, 0x1c, 0x7c00);
4858 bnx2_read_phy(bp, 0x1c, &phy1);
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4862 bnx2_write_phy(bp, 0x17, 0x0f01);
4863 bnx2_read_phy(bp, 0x15, &phy2);
4865 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4866 !(phy2 & 0x20)) { /* no CONFIG */
4868 bmcr &= ~BMCR_ANENABLE;
4869 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4870 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4871 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4875 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4876 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4879 bnx2_write_phy(bp, 0x17, 0x0f01);
4880 bnx2_read_phy(bp, 0x15, &phy2);
4884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4885 bmcr |= BMCR_ANENABLE;
4886 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4888 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4891 bp->current_interval = bp->timer_interval;
4893 spin_unlock(&bp->phy_lock);
4897 bnx2_5708_serdes_timer(struct bnx2 *bp)
4899 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4902 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4903 bp->serdes_an_pending = 0;
4907 spin_lock(&bp->phy_lock);
4908 if (bp->serdes_an_pending)
4909 bp->serdes_an_pending--;
4910 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4913 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4914 if (bmcr & BMCR_ANENABLE) {
4915 bnx2_enable_forced_2g5(bp);
4916 bp->current_interval = SERDES_FORCED_TIMEOUT;
4918 bnx2_disable_forced_2g5(bp);
4919 bp->serdes_an_pending = 2;
4920 bp->current_interval = bp->timer_interval;
4924 bp->current_interval = bp->timer_interval;
4926 spin_unlock(&bp->phy_lock);
4930 bnx2_timer(unsigned long data)
4932 struct bnx2 *bp = (struct bnx2 *) data;
4935 if (!netif_running(bp->dev))
4938 if (atomic_read(&bp->intr_sem) != 0)
4939 goto bnx2_restart_timer;
4941 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4942 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4944 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4946 /* workaround occasional corrupted counters */
4947 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4948 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4949 BNX2_HC_COMMAND_STATS_NOW);
4951 if (bp->phy_flags & PHY_SERDES_FLAG) {
4952 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4953 bnx2_5706_serdes_timer(bp);
4955 bnx2_5708_serdes_timer(bp);
4959 mod_timer(&bp->timer, jiffies + bp->current_interval);
4963 bnx2_request_irq(struct bnx2 *bp)
4965 struct net_device *dev = bp->dev;
4968 if (bp->flags & USING_MSI_FLAG) {
4969 irq_handler_t fn = bnx2_msi;
4971 if (bp->flags & ONE_SHOT_MSI_FLAG)
4972 fn = bnx2_msi_1shot;
4974 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4976 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4977 IRQF_SHARED, dev->name, dev);
4982 bnx2_free_irq(struct bnx2 *bp)
4984 struct net_device *dev = bp->dev;
4986 if (bp->flags & USING_MSI_FLAG) {
4987 free_irq(bp->pdev->irq, dev);
4988 pci_disable_msi(bp->pdev);
4989 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4991 free_irq(bp->pdev->irq, dev);
4994 /* Called with rtnl_lock */
4996 bnx2_open(struct net_device *dev)
4998 struct bnx2 *bp = netdev_priv(dev);
5001 netif_carrier_off(dev);
5003 bnx2_set_power_state(bp, PCI_D0);
5004 bnx2_disable_int(bp);
5006 rc = bnx2_alloc_mem(bp);
5010 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5011 if (pci_enable_msi(bp->pdev) == 0) {
5012 bp->flags |= USING_MSI_FLAG;
5013 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5014 bp->flags |= ONE_SHOT_MSI_FLAG;
5017 rc = bnx2_request_irq(bp);
5024 rc = bnx2_init_nic(bp);
5033 mod_timer(&bp->timer, jiffies + bp->current_interval);
5035 atomic_set(&bp->intr_sem, 0);
5037 bnx2_enable_int(bp);
5039 if (bp->flags & USING_MSI_FLAG) {
5040 /* Test MSI to make sure it is working
5041 * If MSI test fails, go back to INTx mode
5043 if (bnx2_test_intr(bp) != 0) {
5044 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5045 " using MSI, switching to INTx mode. Please"
5046 " report this failure to the PCI maintainer"
5047 " and include system chipset information.\n",
5050 bnx2_disable_int(bp);
5053 rc = bnx2_init_nic(bp);
5056 rc = bnx2_request_irq(bp);
5061 del_timer_sync(&bp->timer);
5064 bnx2_enable_int(bp);
5067 if (bp->flags & USING_MSI_FLAG) {
5068 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5071 netif_start_queue(dev);
5077 bnx2_reset_task(struct work_struct *work)
5079 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5081 if (!netif_running(bp->dev))
5084 bp->in_reset_task = 1;
5085 bnx2_netif_stop(bp);
5089 atomic_set(&bp->intr_sem, 1);
5090 bnx2_netif_start(bp);
5091 bp->in_reset_task = 0;
5095 bnx2_tx_timeout(struct net_device *dev)
5097 struct bnx2 *bp = netdev_priv(dev);
5099 /* This allows the netif to be shutdown gracefully before resetting */
5100 schedule_work(&bp->reset_task);
5104 /* Called with rtnl_lock */
5106 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5108 struct bnx2 *bp = netdev_priv(dev);
5110 bnx2_netif_stop(bp);
5113 bnx2_set_rx_mode(dev);
5115 bnx2_netif_start(bp);
5119 /* Called with netif_tx_lock.
5120 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5121 * netif_wake_queue().
5124 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5126 struct bnx2 *bp = netdev_priv(dev);
5129 struct sw_bd *tx_buf;
5130 u32 len, vlan_tag_flags, last_frag, mss;
5131 u16 prod, ring_prod;
5134 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5135 netif_stop_queue(dev);
5136 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5139 return NETDEV_TX_BUSY;
5141 len = skb_headlen(skb);
5143 ring_prod = TX_RING_IDX(prod);
5146 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5147 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5150 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5152 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5154 if ((mss = skb_shinfo(skb)->gso_size)) {
5155 u32 tcp_opt_len, ip_tcp_len;
5158 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5160 tcp_opt_len = tcp_optlen(skb);
5162 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5163 u32 tcp_off = skb_transport_offset(skb) -
5164 sizeof(struct ipv6hdr) - ETH_HLEN;
5166 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5167 TX_BD_FLAGS_SW_FLAGS;
5168 if (likely(tcp_off == 0))
5169 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5172 vlan_tag_flags |= ((tcp_off & 0x3) <<
5173 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5174 ((tcp_off & 0x10) <<
5175 TX_BD_FLAGS_TCP6_OFF4_SHL);
5176 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5179 if (skb_header_cloned(skb) &&
5180 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5182 return NETDEV_TX_OK;
5185 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5189 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5190 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5194 if (tcp_opt_len || (iph->ihl > 5)) {
5195 vlan_tag_flags |= ((iph->ihl - 5) +
5196 (tcp_opt_len >> 2)) << 8;
5202 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5204 tx_buf = &bp->tx_buf_ring[ring_prod];
5206 pci_unmap_addr_set(tx_buf, mapping, mapping);
5208 txbd = &bp->tx_desc_ring[ring_prod];
5210 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5211 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5212 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5213 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5215 last_frag = skb_shinfo(skb)->nr_frags;
5217 for (i = 0; i < last_frag; i++) {
5218 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5220 prod = NEXT_TX_BD(prod);
5221 ring_prod = TX_RING_IDX(prod);
5222 txbd = &bp->tx_desc_ring[ring_prod];
5225 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5226 len, PCI_DMA_TODEVICE);
5227 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5230 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5231 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5232 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5233 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5236 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5238 prod = NEXT_TX_BD(prod);
5239 bp->tx_prod_bseq += skb->len;
5241 REG_WR16(bp, bp->tx_bidx_addr, prod);
5242 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5247 dev->trans_start = jiffies;
5249 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5250 netif_stop_queue(dev);
5251 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5252 netif_wake_queue(dev);
5255 return NETDEV_TX_OK;
5258 /* Called with rtnl_lock */
5260 bnx2_close(struct net_device *dev)
5262 struct bnx2 *bp = netdev_priv(dev);
5265 /* Calling flush_scheduled_work() may deadlock because
5266 * linkwatch_event() may be on the workqueue and it will try to get
5267 * the rtnl_lock which we are holding.
5269 while (bp->in_reset_task)
5272 bnx2_netif_stop(bp);
5273 del_timer_sync(&bp->timer);
5274 if (bp->flags & NO_WOL_FLAG)
5275 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5277 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5279 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5280 bnx2_reset_chip(bp, reset_code);
5285 netif_carrier_off(bp->dev);
5286 bnx2_set_power_state(bp, PCI_D3hot);
5290 #define GET_NET_STATS64(ctr) \
5291 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5292 (unsigned long) (ctr##_lo)
5294 #define GET_NET_STATS32(ctr) \
5297 #if (BITS_PER_LONG == 64)
5298 #define GET_NET_STATS GET_NET_STATS64
5300 #define GET_NET_STATS GET_NET_STATS32
5303 static struct net_device_stats *
5304 bnx2_get_stats(struct net_device *dev)
5306 struct bnx2 *bp = netdev_priv(dev);
5307 struct statistics_block *stats_blk = bp->stats_blk;
5308 struct net_device_stats *net_stats = &bp->net_stats;
5310 if (bp->stats_blk == NULL) {
5313 net_stats->rx_packets =
5314 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5315 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5318 net_stats->tx_packets =
5319 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5320 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5321 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5323 net_stats->rx_bytes =
5324 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5326 net_stats->tx_bytes =
5327 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5329 net_stats->multicast =
5330 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5332 net_stats->collisions =
5333 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5335 net_stats->rx_length_errors =
5336 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5337 stats_blk->stat_EtherStatsOverrsizePkts);
5339 net_stats->rx_over_errors =
5340 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5342 net_stats->rx_frame_errors =
5343 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5345 net_stats->rx_crc_errors =
5346 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5348 net_stats->rx_errors = net_stats->rx_length_errors +
5349 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5350 net_stats->rx_crc_errors;
5352 net_stats->tx_aborted_errors =
5353 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5354 stats_blk->stat_Dot3StatsLateCollisions);
5356 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5357 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5358 net_stats->tx_carrier_errors = 0;
5360 net_stats->tx_carrier_errors =
5362 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5365 net_stats->tx_errors =
5367 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5369 net_stats->tx_aborted_errors +
5370 net_stats->tx_carrier_errors;
5372 net_stats->rx_missed_errors =
5373 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5374 stats_blk->stat_FwRxDrop);
5379 /* All ethtool functions called with rtnl_lock */
5382 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5384 struct bnx2 *bp = netdev_priv(dev);
5385 int support_serdes = 0, support_copper = 0;
5387 cmd->supported = SUPPORTED_Autoneg;
5388 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5391 } else if (bp->phy_port == PORT_FIBRE)
5396 if (support_serdes) {
5397 cmd->supported |= SUPPORTED_1000baseT_Full |
5399 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5400 cmd->supported |= SUPPORTED_2500baseX_Full;
5403 if (support_copper) {
5404 cmd->supported |= SUPPORTED_10baseT_Half |
5405 SUPPORTED_10baseT_Full |
5406 SUPPORTED_100baseT_Half |
5407 SUPPORTED_100baseT_Full |
5408 SUPPORTED_1000baseT_Full |
5413 spin_lock_bh(&bp->phy_lock);
5414 cmd->port = bp->phy_port;
5415 cmd->advertising = bp->advertising;
5417 if (bp->autoneg & AUTONEG_SPEED) {
5418 cmd->autoneg = AUTONEG_ENABLE;
5421 cmd->autoneg = AUTONEG_DISABLE;
5424 if (netif_carrier_ok(dev)) {
5425 cmd->speed = bp->line_speed;
5426 cmd->duplex = bp->duplex;
5432 spin_unlock_bh(&bp->phy_lock);
5434 cmd->transceiver = XCVR_INTERNAL;
5435 cmd->phy_address = bp->phy_addr;
5441 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5443 struct bnx2 *bp = netdev_priv(dev);
5444 u8 autoneg = bp->autoneg;
5445 u8 req_duplex = bp->req_duplex;
5446 u16 req_line_speed = bp->req_line_speed;
5447 u32 advertising = bp->advertising;
5450 spin_lock_bh(&bp->phy_lock);
5452 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5453 goto err_out_unlock;
5455 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5456 goto err_out_unlock;
5458 if (cmd->autoneg == AUTONEG_ENABLE) {
5459 autoneg |= AUTONEG_SPEED;
5461 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5463 /* allow advertising 1 speed */
5464 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5465 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5466 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5467 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5469 if (cmd->port == PORT_FIBRE)
5470 goto err_out_unlock;
5472 advertising = cmd->advertising;
5474 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5475 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5476 (cmd->port == PORT_TP))
5477 goto err_out_unlock;
5478 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5479 advertising = cmd->advertising;
5480 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5481 goto err_out_unlock;
5483 if (cmd->port == PORT_FIBRE)
5484 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5486 advertising = ETHTOOL_ALL_COPPER_SPEED;
5488 advertising |= ADVERTISED_Autoneg;
5491 if (cmd->port == PORT_FIBRE) {
5492 if ((cmd->speed != SPEED_1000 &&
5493 cmd->speed != SPEED_2500) ||
5494 (cmd->duplex != DUPLEX_FULL))
5495 goto err_out_unlock;
5497 if (cmd->speed == SPEED_2500 &&
5498 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5499 goto err_out_unlock;
5501 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5502 goto err_out_unlock;
5504 autoneg &= ~AUTONEG_SPEED;
5505 req_line_speed = cmd->speed;
5506 req_duplex = cmd->duplex;
5510 bp->autoneg = autoneg;
5511 bp->advertising = advertising;
5512 bp->req_line_speed = req_line_speed;
5513 bp->req_duplex = req_duplex;
5515 err = bnx2_setup_phy(bp, cmd->port);
5518 spin_unlock_bh(&bp->phy_lock);
5524 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5526 struct bnx2 *bp = netdev_priv(dev);
5528 strcpy(info->driver, DRV_MODULE_NAME);
5529 strcpy(info->version, DRV_MODULE_VERSION);
5530 strcpy(info->bus_info, pci_name(bp->pdev));
5531 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5532 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5533 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5534 info->fw_version[1] = info->fw_version[3] = '.';
5535 info->fw_version[5] = 0;
5538 #define BNX2_REGDUMP_LEN (32 * 1024)
5541 bnx2_get_regs_len(struct net_device *dev)
5543 return BNX2_REGDUMP_LEN;
5547 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5549 u32 *p = _p, i, offset;
5551 struct bnx2 *bp = netdev_priv(dev);
5552 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5553 0x0800, 0x0880, 0x0c00, 0x0c10,
5554 0x0c30, 0x0d08, 0x1000, 0x101c,
5555 0x1040, 0x1048, 0x1080, 0x10a4,
5556 0x1400, 0x1490, 0x1498, 0x14f0,
5557 0x1500, 0x155c, 0x1580, 0x15dc,
5558 0x1600, 0x1658, 0x1680, 0x16d8,
5559 0x1800, 0x1820, 0x1840, 0x1854,
5560 0x1880, 0x1894, 0x1900, 0x1984,
5561 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5562 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5563 0x2000, 0x2030, 0x23c0, 0x2400,
5564 0x2800, 0x2820, 0x2830, 0x2850,
5565 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5566 0x3c00, 0x3c94, 0x4000, 0x4010,
5567 0x4080, 0x4090, 0x43c0, 0x4458,
5568 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5569 0x4fc0, 0x5010, 0x53c0, 0x5444,
5570 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5571 0x5fc0, 0x6000, 0x6400, 0x6428,
5572 0x6800, 0x6848, 0x684c, 0x6860,
5573 0x6888, 0x6910, 0x8000 };
5577 memset(p, 0, BNX2_REGDUMP_LEN);
5579 if (!netif_running(bp->dev))
5583 offset = reg_boundaries[0];
5585 while (offset < BNX2_REGDUMP_LEN) {
5586 *p++ = REG_RD(bp, offset);
5588 if (offset == reg_boundaries[i + 1]) {
5589 offset = reg_boundaries[i + 2];
5590 p = (u32 *) (orig_p + offset);
5597 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5599 struct bnx2 *bp = netdev_priv(dev);
5601 if (bp->flags & NO_WOL_FLAG) {
5606 wol->supported = WAKE_MAGIC;
5608 wol->wolopts = WAKE_MAGIC;
5612 memset(&wol->sopass, 0, sizeof(wol->sopass));
5616 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5618 struct bnx2 *bp = netdev_priv(dev);
5620 if (wol->wolopts & ~WAKE_MAGIC)
5623 if (wol->wolopts & WAKE_MAGIC) {
5624 if (bp->flags & NO_WOL_FLAG)
5636 bnx2_nway_reset(struct net_device *dev)
5638 struct bnx2 *bp = netdev_priv(dev);
5641 if (!(bp->autoneg & AUTONEG_SPEED)) {
5645 spin_lock_bh(&bp->phy_lock);
5647 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5650 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5651 spin_unlock_bh(&bp->phy_lock);
5655 /* Force a link down visible on the other side */
5656 if (bp->phy_flags & PHY_SERDES_FLAG) {
5657 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5658 spin_unlock_bh(&bp->phy_lock);
5662 spin_lock_bh(&bp->phy_lock);
5664 bp->current_interval = SERDES_AN_TIMEOUT;
5665 bp->serdes_an_pending = 1;
5666 mod_timer(&bp->timer, jiffies + bp->current_interval);
5669 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5670 bmcr &= ~BMCR_LOOPBACK;
5671 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5673 spin_unlock_bh(&bp->phy_lock);
5679 bnx2_get_eeprom_len(struct net_device *dev)
5681 struct bnx2 *bp = netdev_priv(dev);
5683 if (bp->flash_info == NULL)
5686 return (int) bp->flash_size;
5690 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5693 struct bnx2 *bp = netdev_priv(dev);
5696 /* parameters already validated in ethtool_get_eeprom */
5698 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5704 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5707 struct bnx2 *bp = netdev_priv(dev);
5710 /* parameters already validated in ethtool_set_eeprom */
5712 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5718 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5720 struct bnx2 *bp = netdev_priv(dev);
5722 memset(coal, 0, sizeof(struct ethtool_coalesce));
5724 coal->rx_coalesce_usecs = bp->rx_ticks;
5725 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5726 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5727 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5729 coal->tx_coalesce_usecs = bp->tx_ticks;
5730 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5731 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5732 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5734 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5740 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5742 struct bnx2 *bp = netdev_priv(dev);
5744 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5745 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5747 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5748 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5750 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5751 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5753 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5754 if (bp->rx_quick_cons_trip_int > 0xff)
5755 bp->rx_quick_cons_trip_int = 0xff;
5757 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5758 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5760 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5761 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5763 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5764 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5766 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5767 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5770 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5771 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5772 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5773 bp->stats_ticks = USEC_PER_SEC;
5775 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5776 bp->stats_ticks &= 0xffff00;
5778 if (netif_running(bp->dev)) {
5779 bnx2_netif_stop(bp);
5781 bnx2_netif_start(bp);
5788 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5790 struct bnx2 *bp = netdev_priv(dev);
5792 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5793 ering->rx_mini_max_pending = 0;
5794 ering->rx_jumbo_max_pending = 0;
5796 ering->rx_pending = bp->rx_ring_size;
5797 ering->rx_mini_pending = 0;
5798 ering->rx_jumbo_pending = 0;
5800 ering->tx_max_pending = MAX_TX_DESC_CNT;
5801 ering->tx_pending = bp->tx_ring_size;
5805 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5807 struct bnx2 *bp = netdev_priv(dev);
5809 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5810 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5811 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5815 if (netif_running(bp->dev)) {
5816 bnx2_netif_stop(bp);
5817 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5822 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5823 bp->tx_ring_size = ering->tx_pending;
5825 if (netif_running(bp->dev)) {
5828 rc = bnx2_alloc_mem(bp);
5832 bnx2_netif_start(bp);
5839 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5841 struct bnx2 *bp = netdev_priv(dev);
5843 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5844 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5845 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5849 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5851 struct bnx2 *bp = netdev_priv(dev);
5853 bp->req_flow_ctrl = 0;
5854 if (epause->rx_pause)
5855 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5856 if (epause->tx_pause)
5857 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5859 if (epause->autoneg) {
5860 bp->autoneg |= AUTONEG_FLOW_CTRL;
5863 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5866 spin_lock_bh(&bp->phy_lock);
5868 bnx2_setup_phy(bp, bp->phy_port);
5870 spin_unlock_bh(&bp->phy_lock);
5876 bnx2_get_rx_csum(struct net_device *dev)
5878 struct bnx2 *bp = netdev_priv(dev);
5884 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5886 struct bnx2 *bp = netdev_priv(dev);
5893 bnx2_set_tso(struct net_device *dev, u32 data)
5895 struct bnx2 *bp = netdev_priv(dev);
5898 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5899 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5900 dev->features |= NETIF_F_TSO6;
5902 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5907 #define BNX2_NUM_STATS 46
5910 char string[ETH_GSTRING_LEN];
5911 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5913 { "rx_error_bytes" },
5915 { "tx_error_bytes" },
5916 { "rx_ucast_packets" },
5917 { "rx_mcast_packets" },
5918 { "rx_bcast_packets" },
5919 { "tx_ucast_packets" },
5920 { "tx_mcast_packets" },
5921 { "tx_bcast_packets" },
5922 { "tx_mac_errors" },
5923 { "tx_carrier_errors" },
5924 { "rx_crc_errors" },
5925 { "rx_align_errors" },
5926 { "tx_single_collisions" },
5927 { "tx_multi_collisions" },
5929 { "tx_excess_collisions" },
5930 { "tx_late_collisions" },
5931 { "tx_total_collisions" },
5934 { "rx_undersize_packets" },
5935 { "rx_oversize_packets" },
5936 { "rx_64_byte_packets" },
5937 { "rx_65_to_127_byte_packets" },
5938 { "rx_128_to_255_byte_packets" },
5939 { "rx_256_to_511_byte_packets" },
5940 { "rx_512_to_1023_byte_packets" },
5941 { "rx_1024_to_1522_byte_packets" },
5942 { "rx_1523_to_9022_byte_packets" },
5943 { "tx_64_byte_packets" },
5944 { "tx_65_to_127_byte_packets" },
5945 { "tx_128_to_255_byte_packets" },
5946 { "tx_256_to_511_byte_packets" },
5947 { "tx_512_to_1023_byte_packets" },
5948 { "tx_1024_to_1522_byte_packets" },
5949 { "tx_1523_to_9022_byte_packets" },
5950 { "rx_xon_frames" },
5951 { "rx_xoff_frames" },
5952 { "tx_xon_frames" },
5953 { "tx_xoff_frames" },
5954 { "rx_mac_ctrl_frames" },
5955 { "rx_filtered_packets" },
5957 { "rx_fw_discards" },
5960 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5962 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5963 STATS_OFFSET32(stat_IfHCInOctets_hi),
5964 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5965 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5966 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5967 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5968 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5969 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5970 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5971 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5972 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5973 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5974 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5975 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5976 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5977 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5978 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5979 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5980 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5981 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5982 STATS_OFFSET32(stat_EtherStatsCollisions),
5983 STATS_OFFSET32(stat_EtherStatsFragments),
5984 STATS_OFFSET32(stat_EtherStatsJabbers),
5985 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5986 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5987 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5995 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5996 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5997 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5998 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5999 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6000 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6001 STATS_OFFSET32(stat_XonPauseFramesReceived),
6002 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6003 STATS_OFFSET32(stat_OutXonSent),
6004 STATS_OFFSET32(stat_OutXoffSent),
6005 STATS_OFFSET32(stat_MacControlFramesReceived),
6006 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6007 STATS_OFFSET32(stat_IfInMBUFDiscards),
6008 STATS_OFFSET32(stat_FwRxDrop),
6011 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6012 * skipped because of errata.
6014 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6015 8,0,8,8,8,8,8,8,8,8,
6016 4,0,4,4,4,4,4,4,4,4,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
6022 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6023 8,0,8,8,8,8,8,8,8,8,
6024 4,4,4,4,4,4,4,4,4,4,
6025 4,4,4,4,4,4,4,4,4,4,
6026 4,4,4,4,4,4,4,4,4,4,
6030 #define BNX2_NUM_TESTS 6
6033 char string[ETH_GSTRING_LEN];
6034 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6035 { "register_test (offline)" },
6036 { "memory_test (offline)" },
6037 { "loopback_test (offline)" },
6038 { "nvram_test (online)" },
6039 { "interrupt_test (online)" },
6040 { "link_test (online)" },
6044 bnx2_self_test_count(struct net_device *dev)
6046 return BNX2_NUM_TESTS;
6050 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6052 struct bnx2 *bp = netdev_priv(dev);
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6062 if (bnx2_test_registers(bp) != 0) {
6064 etest->flags |= ETH_TEST_FL_FAILED;
6066 if (bnx2_test_memory(bp) != 0) {
6068 etest->flags |= ETH_TEST_FL_FAILED;
6070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6071 etest->flags |= ETH_TEST_FL_FAILED;
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6078 bnx2_netif_start(bp);
6081 /* wait for link up */
6082 for (i = 0; i < 7; i++) {
6085 msleep_interruptible(1000);
6089 if (bnx2_test_nvram(bp) != 0) {
6091 etest->flags |= ETH_TEST_FL_FAILED;
6093 if (bnx2_test_intr(bp) != 0) {
6095 etest->flags |= ETH_TEST_FL_FAILED;
6098 if (bnx2_test_link(bp) != 0) {
6100 etest->flags |= ETH_TEST_FL_FAILED;
6106 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6108 switch (stringset) {
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6121 bnx2_get_stats_count(struct net_device *dev)
6123 return BNX2_NUM_STATS;
6127 bnx2_get_ethtool_stats(struct net_device *dev,
6128 struct ethtool_stats *stats, u64 *buf)
6130 struct bnx2 *bp = netdev_priv(dev);
6132 u32 *hw_stats = (u32 *) bp->stats_blk;
6133 u8 *stats_len_arr = NULL;
6135 if (hw_stats == NULL) {
6136 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6140 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6141 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6142 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6143 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6144 stats_len_arr = bnx2_5706_stats_len_arr;
6146 stats_len_arr = bnx2_5708_stats_len_arr;
6148 for (i = 0; i < BNX2_NUM_STATS; i++) {
6149 if (stats_len_arr[i] == 0) {
6150 /* skip this counter */
6154 if (stats_len_arr[i] == 4) {
6155 /* 4-byte counter */
6157 *(hw_stats + bnx2_stats_offset_arr[i]);
6160 /* 8-byte counter */
6161 buf[i] = (((u64) *(hw_stats +
6162 bnx2_stats_offset_arr[i])) << 32) +
6163 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6168 bnx2_phys_id(struct net_device *dev, u32 data)
6170 struct bnx2 *bp = netdev_priv(dev);
6177 save = REG_RD(bp, BNX2_MISC_CFG);
6178 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6180 for (i = 0; i < (data * 2); i++) {
6182 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6185 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6186 BNX2_EMAC_LED_1000MB_OVERRIDE |
6187 BNX2_EMAC_LED_100MB_OVERRIDE |
6188 BNX2_EMAC_LED_10MB_OVERRIDE |
6189 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6190 BNX2_EMAC_LED_TRAFFIC);
6192 msleep_interruptible(500);
6193 if (signal_pending(current))
6196 REG_WR(bp, BNX2_EMAC_LED, 0);
6197 REG_WR(bp, BNX2_MISC_CFG, save);
6202 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6204 struct bnx2 *bp = netdev_priv(dev);
6206 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6207 return (ethtool_op_set_tx_hw_csum(dev, data));
6209 return (ethtool_op_set_tx_csum(dev, data));
6212 static const struct ethtool_ops bnx2_ethtool_ops = {
6213 .get_settings = bnx2_get_settings,
6214 .set_settings = bnx2_set_settings,
6215 .get_drvinfo = bnx2_get_drvinfo,
6216 .get_regs_len = bnx2_get_regs_len,
6217 .get_regs = bnx2_get_regs,
6218 .get_wol = bnx2_get_wol,
6219 .set_wol = bnx2_set_wol,
6220 .nway_reset = bnx2_nway_reset,
6221 .get_link = ethtool_op_get_link,
6222 .get_eeprom_len = bnx2_get_eeprom_len,
6223 .get_eeprom = bnx2_get_eeprom,
6224 .set_eeprom = bnx2_set_eeprom,
6225 .get_coalesce = bnx2_get_coalesce,
6226 .set_coalesce = bnx2_set_coalesce,
6227 .get_ringparam = bnx2_get_ringparam,
6228 .set_ringparam = bnx2_set_ringparam,
6229 .get_pauseparam = bnx2_get_pauseparam,
6230 .set_pauseparam = bnx2_set_pauseparam,
6231 .get_rx_csum = bnx2_get_rx_csum,
6232 .set_rx_csum = bnx2_set_rx_csum,
6233 .get_tx_csum = ethtool_op_get_tx_csum,
6234 .set_tx_csum = bnx2_set_tx_csum,
6235 .get_sg = ethtool_op_get_sg,
6236 .set_sg = ethtool_op_set_sg,
6237 .get_tso = ethtool_op_get_tso,
6238 .set_tso = bnx2_set_tso,
6239 .self_test_count = bnx2_self_test_count,
6240 .self_test = bnx2_self_test,
6241 .get_strings = bnx2_get_strings,
6242 .phys_id = bnx2_phys_id,
6243 .get_stats_count = bnx2_get_stats_count,
6244 .get_ethtool_stats = bnx2_get_ethtool_stats,
6245 .get_perm_addr = ethtool_op_get_perm_addr,
6248 /* Called with rtnl_lock */
6250 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6252 struct mii_ioctl_data *data = if_mii(ifr);
6253 struct bnx2 *bp = netdev_priv(dev);
6258 data->phy_id = bp->phy_addr;
6264 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6267 if (!netif_running(dev))
6270 spin_lock_bh(&bp->phy_lock);
6271 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6272 spin_unlock_bh(&bp->phy_lock);
6274 data->val_out = mii_regval;
6280 if (!capable(CAP_NET_ADMIN))
6283 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6286 if (!netif_running(dev))
6289 spin_lock_bh(&bp->phy_lock);
6290 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6291 spin_unlock_bh(&bp->phy_lock);
6302 /* Called with rtnl_lock */
6304 bnx2_change_mac_addr(struct net_device *dev, void *p)
6306 struct sockaddr *addr = p;
6307 struct bnx2 *bp = netdev_priv(dev);
6309 if (!is_valid_ether_addr(addr->sa_data))
6312 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6313 if (netif_running(dev))
6314 bnx2_set_mac_addr(bp);
6319 /* Called with rtnl_lock */
6321 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6323 struct bnx2 *bp = netdev_priv(dev);
6325 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6326 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6330 if (netif_running(dev)) {
6331 bnx2_netif_stop(bp);
6335 bnx2_netif_start(bp);
6340 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6342 poll_bnx2(struct net_device *dev)
6344 struct bnx2 *bp = netdev_priv(dev);
6346 disable_irq(bp->pdev->irq);
6347 bnx2_interrupt(bp->pdev->irq, dev);
6348 enable_irq(bp->pdev->irq);
6352 static void __devinit
6353 bnx2_get_5709_media(struct bnx2 *bp)
6355 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6356 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6359 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6361 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6362 bp->phy_flags |= PHY_SERDES_FLAG;
6366 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6367 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6369 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6371 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6376 bp->phy_flags |= PHY_SERDES_FLAG;
6384 bp->phy_flags |= PHY_SERDES_FLAG;
6390 static void __devinit
6391 bnx2_get_pci_speed(struct bnx2 *bp)
6395 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6396 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6399 bp->flags |= PCIX_FLAG;
6401 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6403 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6406 bp->bus_speed_mhz = 133;
6409 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6410 bp->bus_speed_mhz = 100;
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6415 bp->bus_speed_mhz = 66;
6418 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6419 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6420 bp->bus_speed_mhz = 50;
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6424 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6425 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6426 bp->bus_speed_mhz = 33;
6431 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6432 bp->bus_speed_mhz = 66;
6434 bp->bus_speed_mhz = 33;
6437 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6438 bp->flags |= PCI_32BIT_FLAG;
6442 static int __devinit
6443 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6446 unsigned long mem_len;
6449 u64 dma_mask, persist_dma_mask;
6451 SET_MODULE_OWNER(dev);
6452 SET_NETDEV_DEV(dev, &pdev->dev);
6453 bp = netdev_priv(dev);
6458 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6459 rc = pci_enable_device(pdev);
6461 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6465 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6467 "Cannot find PCI device base address, aborting.\n");
6469 goto err_out_disable;
6472 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6474 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6475 goto err_out_disable;
6478 pci_set_master(pdev);
6480 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6481 if (bp->pm_cap == 0) {
6483 "Cannot find power management capability, aborting.\n");
6485 goto err_out_release;
6491 spin_lock_init(&bp->phy_lock);
6492 spin_lock_init(&bp->indirect_lock);
6493 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6495 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6496 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6497 dev->mem_end = dev->mem_start + mem_len;
6498 dev->irq = pdev->irq;
6500 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6503 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6505 goto err_out_release;
6508 /* Configure byte swap and enable write to the reg_window registers.
6509 * Rely on CPU to do target byte swapping on big endian systems
6510 * The chip's target access swapping will not swap all accesses
6512 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6513 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6514 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6516 bnx2_set_power_state(bp, PCI_D0);
6518 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6521 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6523 "Cannot find PCIE capability, aborting.\n");
6527 bp->flags |= PCIE_FLAG;
6529 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6530 if (bp->pcix_cap == 0) {
6532 "Cannot find PCIX capability, aborting.\n");
6538 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6539 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6540 bp->flags |= MSI_CAP_FLAG;
6543 /* 5708 cannot support DMA addresses > 40-bit. */
6544 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6545 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6547 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6549 /* Configure DMA attributes. */
6550 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6551 dev->features |= NETIF_F_HIGHDMA;
6552 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6555 "pci_set_consistent_dma_mask failed, aborting.\n");
6558 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6559 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6563 if (!(bp->flags & PCIE_FLAG))
6564 bnx2_get_pci_speed(bp);
6566 /* 5706A0 may falsely detect SERR and PERR. */
6567 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6568 reg = REG_RD(bp, PCI_COMMAND);
6569 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6570 REG_WR(bp, PCI_COMMAND, reg);
6572 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6573 !(bp->flags & PCIX_FLAG)) {
6576 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6580 bnx2_init_nvram(bp);
6582 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6584 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6585 BNX2_SHM_HDR_SIGNATURE_SIG) {
6586 u32 off = PCI_FUNC(pdev->devfn) << 2;
6588 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6590 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6592 /* Get the permanent MAC address. First we need to make sure the
6593 * firmware is actually running.
6595 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6597 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6598 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6599 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6604 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6607 bp->mac_addr[0] = (u8) (reg >> 8);
6608 bp->mac_addr[1] = (u8) reg;
6610 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6611 bp->mac_addr[2] = (u8) (reg >> 24);
6612 bp->mac_addr[3] = (u8) (reg >> 16);
6613 bp->mac_addr[4] = (u8) (reg >> 8);
6614 bp->mac_addr[5] = (u8) reg;
6616 bp->tx_ring_size = MAX_TX_DESC_CNT;
6617 bnx2_set_rx_ring_size(bp, 255);
6621 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6623 bp->tx_quick_cons_trip_int = 20;
6624 bp->tx_quick_cons_trip = 20;
6625 bp->tx_ticks_int = 80;
6628 bp->rx_quick_cons_trip_int = 6;
6629 bp->rx_quick_cons_trip = 6;
6630 bp->rx_ticks_int = 18;
6633 bp->stats_ticks = 1000000 & 0xffff00;
6635 bp->timer_interval = HZ;
6636 bp->current_interval = HZ;
6640 /* Disable WOL support if we are running on a SERDES chip. */
6641 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6642 bnx2_get_5709_media(bp);
6643 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6644 bp->phy_flags |= PHY_SERDES_FLAG;
6646 bp->phy_port = PORT_TP;
6647 if (bp->phy_flags & PHY_SERDES_FLAG) {
6648 bp->phy_port = PORT_FIBRE;
6649 bp->flags |= NO_WOL_FLAG;
6650 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6652 reg = REG_RD_IND(bp, bp->shmem_base +
6653 BNX2_SHARED_HW_CFG_CONFIG);
6654 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6655 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6657 bnx2_init_remote_phy(bp);
6659 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6660 CHIP_NUM(bp) == CHIP_NUM_5708)
6661 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6662 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6663 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6665 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6666 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6667 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6668 bp->flags |= NO_WOL_FLAG;
6670 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6671 bp->tx_quick_cons_trip_int =
6672 bp->tx_quick_cons_trip;
6673 bp->tx_ticks_int = bp->tx_ticks;
6674 bp->rx_quick_cons_trip_int =
6675 bp->rx_quick_cons_trip;
6676 bp->rx_ticks_int = bp->rx_ticks;
6677 bp->comp_prod_trip_int = bp->comp_prod_trip;
6678 bp->com_ticks_int = bp->com_ticks;
6679 bp->cmd_ticks_int = bp->cmd_ticks;
6682 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6684 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6685 * with byte enables disabled on the unused 32-bit word. This is legal
6686 * but causes problems on the AMD 8132 which will eventually stop
6687 * responding after a while.
6689 * AMD believes this incompatibility is unique to the 5706, and
6690 * prefers to locally disable MSI rather than globally disabling it.
6692 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6693 struct pci_dev *amd_8132 = NULL;
6695 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6696 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6700 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6701 if (rev >= 0x10 && rev <= 0x13) {
6703 pci_dev_put(amd_8132);
6709 bnx2_set_default_link(bp);
6710 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6712 init_timer(&bp->timer);
6713 bp->timer.expires = RUN_AT(bp->timer_interval);
6714 bp->timer.data = (unsigned long) bp;
6715 bp->timer.function = bnx2_timer;
6721 iounmap(bp->regview);
6726 pci_release_regions(pdev);
6729 pci_disable_device(pdev);
6730 pci_set_drvdata(pdev, NULL);
6736 static char * __devinit
6737 bnx2_bus_string(struct bnx2 *bp, char *str)
6741 if (bp->flags & PCIE_FLAG) {
6742 s += sprintf(s, "PCI Express");
6744 s += sprintf(s, "PCI");
6745 if (bp->flags & PCIX_FLAG)
6746 s += sprintf(s, "-X");
6747 if (bp->flags & PCI_32BIT_FLAG)
6748 s += sprintf(s, " 32-bit");
6750 s += sprintf(s, " 64-bit");
6751 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6756 static int __devinit
6757 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6759 static int version_printed = 0;
6760 struct net_device *dev = NULL;
6765 if (version_printed++ == 0)
6766 printk(KERN_INFO "%s", version);
6768 /* dev zeroed in init_etherdev */
6769 dev = alloc_etherdev(sizeof(*bp));
6774 rc = bnx2_init_board(pdev, dev);
6780 dev->open = bnx2_open;
6781 dev->hard_start_xmit = bnx2_start_xmit;
6782 dev->stop = bnx2_close;
6783 dev->get_stats = bnx2_get_stats;
6784 dev->set_multicast_list = bnx2_set_rx_mode;
6785 dev->do_ioctl = bnx2_ioctl;
6786 dev->set_mac_address = bnx2_change_mac_addr;
6787 dev->change_mtu = bnx2_change_mtu;
6788 dev->tx_timeout = bnx2_tx_timeout;
6789 dev->watchdog_timeo = TX_TIMEOUT;
6791 dev->vlan_rx_register = bnx2_vlan_rx_register;
6793 dev->poll = bnx2_poll;
6794 dev->ethtool_ops = &bnx2_ethtool_ops;
6797 bp = netdev_priv(dev);
6799 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6800 dev->poll_controller = poll_bnx2;
6803 pci_set_drvdata(pdev, dev);
6805 memcpy(dev->dev_addr, bp->mac_addr, 6);
6806 memcpy(dev->perm_addr, bp->mac_addr, 6);
6807 bp->name = board_info[ent->driver_data].name;
6809 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6810 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6811 dev->features |= NETIF_F_IPV6_CSUM;
6814 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6816 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6817 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6818 dev->features |= NETIF_F_TSO6;
6820 if ((rc = register_netdev(dev))) {
6821 dev_err(&pdev->dev, "Cannot register net device\n");
6823 iounmap(bp->regview);
6824 pci_release_regions(pdev);
6825 pci_disable_device(pdev);
6826 pci_set_drvdata(pdev, NULL);
6831 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6835 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6836 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6837 bnx2_bus_string(bp, str),
6841 printk("node addr ");
6842 for (i = 0; i < 6; i++)
6843 printk("%2.2x", dev->dev_addr[i]);
6849 static void __devexit
6850 bnx2_remove_one(struct pci_dev *pdev)
6852 struct net_device *dev = pci_get_drvdata(pdev);
6853 struct bnx2 *bp = netdev_priv(dev);
6855 flush_scheduled_work();
6857 unregister_netdev(dev);
6860 iounmap(bp->regview);
6863 pci_release_regions(pdev);
6864 pci_disable_device(pdev);
6865 pci_set_drvdata(pdev, NULL);
6869 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6871 struct net_device *dev = pci_get_drvdata(pdev);
6872 struct bnx2 *bp = netdev_priv(dev);
6875 if (!netif_running(dev))
6878 flush_scheduled_work();
6879 bnx2_netif_stop(bp);
6880 netif_device_detach(dev);
6881 del_timer_sync(&bp->timer);
6882 if (bp->flags & NO_WOL_FLAG)
6883 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6885 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6887 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6888 bnx2_reset_chip(bp, reset_code);
6890 pci_save_state(pdev);
6891 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6896 bnx2_resume(struct pci_dev *pdev)
6898 struct net_device *dev = pci_get_drvdata(pdev);
6899 struct bnx2 *bp = netdev_priv(dev);
6901 if (!netif_running(dev))
6904 pci_restore_state(pdev);
6905 bnx2_set_power_state(bp, PCI_D0);
6906 netif_device_attach(dev);
6908 bnx2_netif_start(bp);
6912 static struct pci_driver bnx2_pci_driver = {
6913 .name = DRV_MODULE_NAME,
6914 .id_table = bnx2_pci_tbl,
6915 .probe = bnx2_init_one,
6916 .remove = __devexit_p(bnx2_remove_one),
6917 .suspend = bnx2_suspend,
6918 .resume = bnx2_resume,
6921 static int __init bnx2_init(void)
6923 return pci_register_driver(&bnx2_pci_driver);
6926 static void __exit bnx2_cleanup(void)
6928 pci_unregister_driver(&bnx2_pci_driver);
6931 module_init(bnx2_init);
6932 module_exit(bnx2_cleanup);