1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.4.45"
61 #define DRV_MODULE_RELDATE "September 29, 2006"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 /* indexed by board_t, above */
94 } board_info[] __devinitdata = {
95 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
96 { "HP NC370T Multifunction Gigabit Server Adapter" },
97 { "HP NC370i Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
99 { "HP NC370F Multifunction Gigabit Server Adapter" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104 static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 static struct flash_spec flash_table[] =
125 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 0001 */
130 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Saifun SA25F010 (non-buffered flash) */
135 /* strap, cfg1, & write1 need updates */
136 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 "Non-buffered flash (128kB)"},
140 /* Saifun SA25F020 (non-buffered flash) */
141 /* strap, cfg1, & write1 need updates */
142 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 "Non-buffered flash (256kB)"},
146 /* Expansion entry 0100 */
147 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
153 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 /* Saifun SA25F005 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 "Non-buffered flash (64kB)"},
168 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 /* Expansion entry 1001 */
173 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 /* Expansion entry 1010 */
178 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* ATMEL AT45DB011B (buffered flash) */
183 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 "Buffered flash (128kB)"},
187 /* Expansion entry 1100 */
188 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 /* Expansion entry 1101 */
193 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Ateml Expansion entry 1110 */
198 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1110 (Atmel)"},
202 /* ATMEL AT45DB021B (buffered flash) */
203 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 "Buffered flash (256kB)"},
209 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
217 if (diff > MAX_TX_DESC_CNT)
218 diff = (diff & MAX_TX_DESC_CNT) - 1;
219 return (bp->tx_ring_size - diff);
223 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
225 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
226 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
230 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
237 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
240 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
243 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
244 REG_WR(bp, BNX2_CTX_CTX_CTRL,
245 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
246 for (i = 0; i < 5; i++) {
248 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
249 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
254 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
255 REG_WR(bp, BNX2_CTX_DATA, val);
260 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
265 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
266 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
267 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
269 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
270 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 = (bp->phy_addr << 21) | (reg << 16) |
276 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
277 BNX2_EMAC_MDIO_COMM_START_BUSY;
278 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
280 for (i = 0; i < 50; i++) {
283 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
284 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
288 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
294 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
307 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
308 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
322 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
323 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
324 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
326 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
327 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
333 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
334 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
335 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
337 for (i = 0; i < 50; i++) {
340 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
341 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
347 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
352 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
353 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
354 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
357 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 bnx2_disable_int(struct bnx2 *bp)
368 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
369 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
370 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
374 bnx2_enable_int(struct bnx2 *bp)
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
380 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
381 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
383 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
387 bnx2_disable_int_sync(struct bnx2 *bp)
389 atomic_inc(&bp->intr_sem);
390 bnx2_disable_int(bp);
391 synchronize_irq(bp->pdev->irq);
395 bnx2_netif_stop(struct bnx2 *bp)
397 bnx2_disable_int_sync(bp);
398 if (netif_running(bp->dev)) {
399 netif_poll_disable(bp->dev);
400 netif_tx_disable(bp->dev);
401 bp->dev->trans_start = jiffies; /* prevent tx timeout */
406 bnx2_netif_start(struct bnx2 *bp)
408 if (atomic_dec_and_test(&bp->intr_sem)) {
409 if (netif_running(bp->dev)) {
410 netif_wake_queue(bp->dev);
411 netif_poll_enable(bp->dev);
418 bnx2_free_mem(struct bnx2 *bp)
422 for (i = 0; i < bp->ctx_pages; i++) {
423 if (bp->ctx_blk[i]) {
424 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
426 bp->ctx_blk_mapping[i]);
427 bp->ctx_blk[i] = NULL;
430 if (bp->status_blk) {
431 pci_free_consistent(bp->pdev, bp->status_stats_size,
432 bp->status_blk, bp->status_blk_mapping);
433 bp->status_blk = NULL;
434 bp->stats_blk = NULL;
436 if (bp->tx_desc_ring) {
437 pci_free_consistent(bp->pdev,
438 sizeof(struct tx_bd) * TX_DESC_CNT,
439 bp->tx_desc_ring, bp->tx_desc_mapping);
440 bp->tx_desc_ring = NULL;
442 kfree(bp->tx_buf_ring);
443 bp->tx_buf_ring = NULL;
444 for (i = 0; i < bp->rx_max_ring; i++) {
445 if (bp->rx_desc_ring[i])
446 pci_free_consistent(bp->pdev,
447 sizeof(struct rx_bd) * RX_DESC_CNT,
449 bp->rx_desc_mapping[i]);
450 bp->rx_desc_ring[i] = NULL;
452 vfree(bp->rx_buf_ring);
453 bp->rx_buf_ring = NULL;
457 bnx2_alloc_mem(struct bnx2 *bp)
459 int i, status_blk_size;
461 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
463 if (bp->tx_buf_ring == NULL)
466 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
467 sizeof(struct tx_bd) *
469 &bp->tx_desc_mapping);
470 if (bp->tx_desc_ring == NULL)
473 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
475 if (bp->rx_buf_ring == NULL)
478 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
481 for (i = 0; i < bp->rx_max_ring; i++) {
482 bp->rx_desc_ring[i] =
483 pci_alloc_consistent(bp->pdev,
484 sizeof(struct rx_bd) * RX_DESC_CNT,
485 &bp->rx_desc_mapping[i]);
486 if (bp->rx_desc_ring[i] == NULL)
491 /* Combine status and statistics blocks into one allocation. */
492 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
493 bp->status_stats_size = status_blk_size +
494 sizeof(struct statistics_block);
496 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
497 &bp->status_blk_mapping);
498 if (bp->status_blk == NULL)
501 memset(bp->status_blk, 0, bp->status_stats_size);
503 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
506 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
508 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
509 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
510 if (bp->ctx_pages == 0)
512 for (i = 0; i < bp->ctx_pages; i++) {
513 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
515 &bp->ctx_blk_mapping[i]);
516 if (bp->ctx_blk[i] == NULL)
528 bnx2_report_fw_link(struct bnx2 *bp)
530 u32 fw_link_status = 0;
535 switch (bp->line_speed) {
537 if (bp->duplex == DUPLEX_HALF)
538 fw_link_status = BNX2_LINK_STATUS_10HALF;
540 fw_link_status = BNX2_LINK_STATUS_10FULL;
543 if (bp->duplex == DUPLEX_HALF)
544 fw_link_status = BNX2_LINK_STATUS_100HALF;
546 fw_link_status = BNX2_LINK_STATUS_100FULL;
549 if (bp->duplex == DUPLEX_HALF)
550 fw_link_status = BNX2_LINK_STATUS_1000HALF;
552 fw_link_status = BNX2_LINK_STATUS_1000FULL;
555 if (bp->duplex == DUPLEX_HALF)
556 fw_link_status = BNX2_LINK_STATUS_2500HALF;
558 fw_link_status = BNX2_LINK_STATUS_2500FULL;
562 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
565 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
567 bnx2_read_phy(bp, MII_BMSR, &bmsr);
568 bnx2_read_phy(bp, MII_BMSR, &bmsr);
570 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
571 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
572 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
574 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
578 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
580 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
584 bnx2_report_link(struct bnx2 *bp)
587 netif_carrier_on(bp->dev);
588 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
590 printk("%d Mbps ", bp->line_speed);
592 if (bp->duplex == DUPLEX_FULL)
593 printk("full duplex");
595 printk("half duplex");
598 if (bp->flow_ctrl & FLOW_CTRL_RX) {
599 printk(", receive ");
600 if (bp->flow_ctrl & FLOW_CTRL_TX)
601 printk("& transmit ");
604 printk(", transmit ");
606 printk("flow control ON");
611 netif_carrier_off(bp->dev);
612 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
615 bnx2_report_fw_link(bp);
619 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
621 u32 local_adv, remote_adv;
624 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
625 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
627 if (bp->duplex == DUPLEX_FULL) {
628 bp->flow_ctrl = bp->req_flow_ctrl;
633 if (bp->duplex != DUPLEX_FULL) {
637 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
638 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
641 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
642 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
643 bp->flow_ctrl |= FLOW_CTRL_TX;
644 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
645 bp->flow_ctrl |= FLOW_CTRL_RX;
649 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
650 bnx2_read_phy(bp, MII_LPA, &remote_adv);
652 if (bp->phy_flags & PHY_SERDES_FLAG) {
653 u32 new_local_adv = 0;
654 u32 new_remote_adv = 0;
656 if (local_adv & ADVERTISE_1000XPAUSE)
657 new_local_adv |= ADVERTISE_PAUSE_CAP;
658 if (local_adv & ADVERTISE_1000XPSE_ASYM)
659 new_local_adv |= ADVERTISE_PAUSE_ASYM;
660 if (remote_adv & ADVERTISE_1000XPAUSE)
661 new_remote_adv |= ADVERTISE_PAUSE_CAP;
662 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
663 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
665 local_adv = new_local_adv;
666 remote_adv = new_remote_adv;
669 /* See Table 28B-3 of 802.3ab-1999 spec. */
670 if (local_adv & ADVERTISE_PAUSE_CAP) {
671 if(local_adv & ADVERTISE_PAUSE_ASYM) {
672 if (remote_adv & ADVERTISE_PAUSE_CAP) {
673 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
675 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
676 bp->flow_ctrl = FLOW_CTRL_RX;
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
685 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
686 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
687 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
689 bp->flow_ctrl = FLOW_CTRL_TX;
695 bnx2_5708s_linkup(struct bnx2 *bp)
700 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
701 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
702 case BCM5708S_1000X_STAT1_SPEED_10:
703 bp->line_speed = SPEED_10;
705 case BCM5708S_1000X_STAT1_SPEED_100:
706 bp->line_speed = SPEED_100;
708 case BCM5708S_1000X_STAT1_SPEED_1G:
709 bp->line_speed = SPEED_1000;
711 case BCM5708S_1000X_STAT1_SPEED_2G5:
712 bp->line_speed = SPEED_2500;
715 if (val & BCM5708S_1000X_STAT1_FD)
716 bp->duplex = DUPLEX_FULL;
718 bp->duplex = DUPLEX_HALF;
724 bnx2_5706s_linkup(struct bnx2 *bp)
726 u32 bmcr, local_adv, remote_adv, common;
729 bp->line_speed = SPEED_1000;
731 bnx2_read_phy(bp, MII_BMCR, &bmcr);
732 if (bmcr & BMCR_FULLDPLX) {
733 bp->duplex = DUPLEX_FULL;
736 bp->duplex = DUPLEX_HALF;
739 if (!(bmcr & BMCR_ANENABLE)) {
743 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
744 bnx2_read_phy(bp, MII_LPA, &remote_adv);
746 common = local_adv & remote_adv;
747 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
749 if (common & ADVERTISE_1000XFULL) {
750 bp->duplex = DUPLEX_FULL;
753 bp->duplex = DUPLEX_HALF;
761 bnx2_copper_linkup(struct bnx2 *bp)
765 bnx2_read_phy(bp, MII_BMCR, &bmcr);
766 if (bmcr & BMCR_ANENABLE) {
767 u32 local_adv, remote_adv, common;
769 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
770 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
772 common = local_adv & (remote_adv >> 2);
773 if (common & ADVERTISE_1000FULL) {
774 bp->line_speed = SPEED_1000;
775 bp->duplex = DUPLEX_FULL;
777 else if (common & ADVERTISE_1000HALF) {
778 bp->line_speed = SPEED_1000;
779 bp->duplex = DUPLEX_HALF;
782 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
783 bnx2_read_phy(bp, MII_LPA, &remote_adv);
785 common = local_adv & remote_adv;
786 if (common & ADVERTISE_100FULL) {
787 bp->line_speed = SPEED_100;
788 bp->duplex = DUPLEX_FULL;
790 else if (common & ADVERTISE_100HALF) {
791 bp->line_speed = SPEED_100;
792 bp->duplex = DUPLEX_HALF;
794 else if (common & ADVERTISE_10FULL) {
795 bp->line_speed = SPEED_10;
796 bp->duplex = DUPLEX_FULL;
798 else if (common & ADVERTISE_10HALF) {
799 bp->line_speed = SPEED_10;
800 bp->duplex = DUPLEX_HALF;
809 if (bmcr & BMCR_SPEED100) {
810 bp->line_speed = SPEED_100;
813 bp->line_speed = SPEED_10;
815 if (bmcr & BMCR_FULLDPLX) {
816 bp->duplex = DUPLEX_FULL;
819 bp->duplex = DUPLEX_HALF;
827 bnx2_set_mac_link(struct bnx2 *bp)
831 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
832 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
833 (bp->duplex == DUPLEX_HALF)) {
834 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
837 /* Configure the EMAC mode register. */
838 val = REG_RD(bp, BNX2_EMAC_MODE);
840 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
841 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
842 BNX2_EMAC_MODE_25G_MODE);
845 switch (bp->line_speed) {
847 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
848 val |= BNX2_EMAC_MODE_PORT_MII_10M;
853 val |= BNX2_EMAC_MODE_PORT_MII;
856 val |= BNX2_EMAC_MODE_25G_MODE;
859 val |= BNX2_EMAC_MODE_PORT_GMII;
864 val |= BNX2_EMAC_MODE_PORT_GMII;
867 /* Set the MAC to operate in the appropriate duplex mode. */
868 if (bp->duplex == DUPLEX_HALF)
869 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
870 REG_WR(bp, BNX2_EMAC_MODE, val);
872 /* Enable/disable rx PAUSE. */
873 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
875 if (bp->flow_ctrl & FLOW_CTRL_RX)
876 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
877 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
879 /* Enable/disable tx PAUSE. */
880 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
881 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
883 if (bp->flow_ctrl & FLOW_CTRL_TX)
884 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
887 /* Acknowledge the interrupt. */
888 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
894 bnx2_set_link(struct bnx2 *bp)
899 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
904 link_up = bp->link_up;
906 bnx2_read_phy(bp, MII_BMSR, &bmsr);
907 bnx2_read_phy(bp, MII_BMSR, &bmsr);
909 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
910 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
913 val = REG_RD(bp, BNX2_EMAC_STATUS);
914 if (val & BNX2_EMAC_STATUS_LINK)
915 bmsr |= BMSR_LSTATUS;
917 bmsr &= ~BMSR_LSTATUS;
920 if (bmsr & BMSR_LSTATUS) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 if (CHIP_NUM(bp) == CHIP_NUM_5706)
925 bnx2_5706s_linkup(bp);
926 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
927 bnx2_5708s_linkup(bp);
930 bnx2_copper_linkup(bp);
932 bnx2_resolve_flow_ctrl(bp);
935 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
936 (bp->autoneg & AUTONEG_SPEED)) {
940 bnx2_read_phy(bp, MII_BMCR, &bmcr);
941 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
942 if (!(bmcr & BMCR_ANENABLE)) {
943 bnx2_write_phy(bp, MII_BMCR, bmcr |
947 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
951 if (bp->link_up != link_up) {
952 bnx2_report_link(bp);
955 bnx2_set_mac_link(bp);
961 bnx2_reset_phy(struct bnx2 *bp)
966 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
968 #define PHY_RESET_MAX_WAIT 100
969 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
972 bnx2_read_phy(bp, MII_BMCR, ®);
973 if (!(reg & BMCR_RESET)) {
978 if (i == PHY_RESET_MAX_WAIT) {
985 bnx2_phy_get_pause_adv(struct bnx2 *bp)
989 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
990 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
992 if (bp->phy_flags & PHY_SERDES_FLAG) {
993 adv = ADVERTISE_1000XPAUSE;
996 adv = ADVERTISE_PAUSE_CAP;
999 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPSE_ASYM;
1004 adv = ADVERTISE_PAUSE_ASYM;
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1012 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1019 bnx2_setup_serdes_phy(struct bnx2 *bp)
1024 if (!(bp->autoneg & AUTONEG_SPEED)) {
1026 int force_link_down = 0;
1028 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1029 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1031 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1032 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1033 new_bmcr |= BMCR_SPEED1000;
1034 if (bp->req_line_speed == SPEED_2500) {
1035 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1036 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1037 if (!(up1 & BCM5708S_UP1_2G5)) {
1038 up1 |= BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1040 force_link_down = 1;
1042 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1043 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1044 if (up1 & BCM5708S_UP1_2G5) {
1045 up1 &= ~BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 force_link_down = 1;
1051 if (bp->req_duplex == DUPLEX_FULL) {
1052 adv |= ADVERTISE_1000XFULL;
1053 new_bmcr |= BMCR_FULLDPLX;
1056 adv |= ADVERTISE_1000XHALF;
1057 new_bmcr &= ~BMCR_FULLDPLX;
1059 if ((new_bmcr != bmcr) || (force_link_down)) {
1060 /* Force a link down visible on the other side */
1062 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1063 ~(ADVERTISE_1000XFULL |
1064 ADVERTISE_1000XHALF));
1065 bnx2_write_phy(bp, MII_BMCR, bmcr |
1066 BMCR_ANRESTART | BMCR_ANENABLE);
1069 netif_carrier_off(bp->dev);
1070 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1071 bnx2_report_link(bp);
1073 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1074 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1080 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1081 up1 |= BCM5708S_UP1_2G5;
1082 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1085 if (bp->advertising & ADVERTISED_1000baseT_Full)
1086 new_adv |= ADVERTISE_1000XFULL;
1088 new_adv |= bnx2_phy_get_pause_adv(bp);
1090 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1091 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1093 bp->serdes_an_pending = 0;
1094 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1095 /* Force a link down visible on the other side */
1097 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1098 spin_unlock_bh(&bp->phy_lock);
1100 spin_lock_bh(&bp->phy_lock);
1103 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1104 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1106 /* Speed up link-up time when the link partner
1107 * does not autonegotiate which is very common
1108 * in blade servers. Some blade servers use
1109 * IPMI for kerboard input and it's important
1110 * to minimize link disruptions. Autoneg. involves
1111 * exchanging base pages plus 3 next pages and
1112 * normally completes in about 120 msec.
1114 bp->current_interval = SERDES_AN_TIMEOUT;
1115 bp->serdes_an_pending = 1;
1116 mod_timer(&bp->timer, jiffies + bp->current_interval);
1122 #define ETHTOOL_ALL_FIBRE_SPEED \
1123 (ADVERTISED_1000baseT_Full)
1125 #define ETHTOOL_ALL_COPPER_SPEED \
1126 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1127 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1128 ADVERTISED_1000baseT_Full)
1130 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1131 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1133 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1136 bnx2_setup_copper_phy(struct bnx2 *bp)
1141 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1143 if (bp->autoneg & AUTONEG_SPEED) {
1144 u32 adv_reg, adv1000_reg;
1145 u32 new_adv_reg = 0;
1146 u32 new_adv1000_reg = 0;
1148 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1149 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1150 ADVERTISE_PAUSE_ASYM);
1152 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1153 adv1000_reg &= PHY_ALL_1000_SPEED;
1155 if (bp->advertising & ADVERTISED_10baseT_Half)
1156 new_adv_reg |= ADVERTISE_10HALF;
1157 if (bp->advertising & ADVERTISED_10baseT_Full)
1158 new_adv_reg |= ADVERTISE_10FULL;
1159 if (bp->advertising & ADVERTISED_100baseT_Half)
1160 new_adv_reg |= ADVERTISE_100HALF;
1161 if (bp->advertising & ADVERTISED_100baseT_Full)
1162 new_adv_reg |= ADVERTISE_100FULL;
1163 if (bp->advertising & ADVERTISED_1000baseT_Full)
1164 new_adv1000_reg |= ADVERTISE_1000FULL;
1166 new_adv_reg |= ADVERTISE_CSMA;
1168 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1170 if ((adv1000_reg != new_adv1000_reg) ||
1171 (adv_reg != new_adv_reg) ||
1172 ((bmcr & BMCR_ANENABLE) == 0)) {
1174 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1175 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1176 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1179 else if (bp->link_up) {
1180 /* Flow ctrl may have changed from auto to forced */
1181 /* or vice-versa. */
1183 bnx2_resolve_flow_ctrl(bp);
1184 bnx2_set_mac_link(bp);
1190 if (bp->req_line_speed == SPEED_100) {
1191 new_bmcr |= BMCR_SPEED100;
1193 if (bp->req_duplex == DUPLEX_FULL) {
1194 new_bmcr |= BMCR_FULLDPLX;
1196 if (new_bmcr != bmcr) {
1199 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1202 if (bmsr & BMSR_LSTATUS) {
1203 /* Force link down */
1204 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1205 spin_unlock_bh(&bp->phy_lock);
1207 spin_lock_bh(&bp->phy_lock);
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1213 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1215 /* Normally, the new speed is setup after the link has
1216 * gone down and up again. In some cases, link will not go
1217 * down so we need to set up the new speed here.
1219 if (bmsr & BMSR_LSTATUS) {
1220 bp->line_speed = bp->req_line_speed;
1221 bp->duplex = bp->req_duplex;
1222 bnx2_resolve_flow_ctrl(bp);
1223 bnx2_set_mac_link(bp);
1230 bnx2_setup_phy(struct bnx2 *bp)
1232 if (bp->loopback == MAC_LOOPBACK)
1235 if (bp->phy_flags & PHY_SERDES_FLAG) {
1236 return (bnx2_setup_serdes_phy(bp));
1239 return (bnx2_setup_copper_phy(bp));
1244 bnx2_init_5708s_phy(struct bnx2 *bp)
1248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1249 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1252 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1253 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1254 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1256 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1257 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1258 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1260 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1261 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1262 val |= BCM5708S_UP1_2G5;
1263 bnx2_write_phy(bp, BCM5708S_UP1, val);
1266 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1267 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1268 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1269 /* increase tx signal amplitude */
1270 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1271 BCM5708S_BLK_ADDR_TX_MISC);
1272 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1273 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1274 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1275 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1278 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1279 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1284 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1285 BNX2_SHARED_HW_CFG_CONFIG);
1286 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1287 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1288 BCM5708S_BLK_ADDR_TX_MISC);
1289 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1290 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1291 BCM5708S_BLK_ADDR_DIG);
1298 bnx2_init_5706s_phy(struct bnx2 *bp)
1300 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1302 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1303 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1305 if (bp->dev->mtu > 1500) {
1308 /* Set extended packet length bit */
1309 bnx2_write_phy(bp, 0x18, 0x7);
1310 bnx2_read_phy(bp, 0x18, &val);
1311 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1313 bnx2_write_phy(bp, 0x1c, 0x6c00);
1314 bnx2_read_phy(bp, 0x1c, &val);
1315 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1333 bnx2_init_copper_phy(struct bnx2 *bp)
1337 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1339 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1340 bnx2_write_phy(bp, 0x18, 0x0c00);
1341 bnx2_write_phy(bp, 0x17, 0x000a);
1342 bnx2_write_phy(bp, 0x15, 0x310b);
1343 bnx2_write_phy(bp, 0x17, 0x201f);
1344 bnx2_write_phy(bp, 0x15, 0x9506);
1345 bnx2_write_phy(bp, 0x17, 0x401f);
1346 bnx2_write_phy(bp, 0x15, 0x14e2);
1347 bnx2_write_phy(bp, 0x18, 0x0400);
1350 if (bp->dev->mtu > 1500) {
1351 /* Set extended packet length bit */
1352 bnx2_write_phy(bp, 0x18, 0x7);
1353 bnx2_read_phy(bp, 0x18, &val);
1354 bnx2_write_phy(bp, 0x18, val | 0x4000);
1356 bnx2_read_phy(bp, 0x10, &val);
1357 bnx2_write_phy(bp, 0x10, val | 0x1);
1360 bnx2_write_phy(bp, 0x18, 0x7);
1361 bnx2_read_phy(bp, 0x18, &val);
1362 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1364 bnx2_read_phy(bp, 0x10, &val);
1365 bnx2_write_phy(bp, 0x10, val & ~0x1);
1368 /* ethernet@wirespeed */
1369 bnx2_write_phy(bp, 0x18, 0x7007);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1377 bnx2_init_phy(struct bnx2 *bp)
1382 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1383 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1385 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1389 bnx2_read_phy(bp, MII_PHYSID1, &val);
1390 bp->phy_id = val << 16;
1391 bnx2_read_phy(bp, MII_PHYSID2, &val);
1392 bp->phy_id |= val & 0xffff;
1394 if (bp->phy_flags & PHY_SERDES_FLAG) {
1395 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1396 rc = bnx2_init_5706s_phy(bp);
1397 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1398 rc = bnx2_init_5708s_phy(bp);
1401 rc = bnx2_init_copper_phy(bp);
1410 bnx2_set_mac_loopback(struct bnx2 *bp)
1414 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1415 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1416 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1422 static int bnx2_test_link(struct bnx2 *);
1425 bnx2_set_phy_loopback(struct bnx2 *bp)
1430 spin_lock_bh(&bp->phy_lock);
1431 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1433 spin_unlock_bh(&bp->phy_lock);
1437 for (i = 0; i < 10; i++) {
1438 if (bnx2_test_link(bp) == 0)
1443 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1444 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1445 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1446 BNX2_EMAC_MODE_25G_MODE);
1448 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1449 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1455 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1461 msg_data |= bp->fw_wr_seq;
1463 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1465 /* wait for an acknowledgement. */
1466 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1469 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1471 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1474 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1477 /* If we timed out, inform the firmware that this is the case. */
1478 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1480 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1483 msg_data &= ~BNX2_DRV_MSG_CODE;
1484 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1486 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1491 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1498 bnx2_init_5709_context(struct bnx2 *bp)
1503 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1504 val |= (BCM_PAGE_BITS - 8) << 16;
1505 REG_WR(bp, BNX2_CTX_COMMAND, val);
1506 for (i = 0; i < bp->ctx_pages; i++) {
1509 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1510 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1511 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1512 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1513 (u64) bp->ctx_blk_mapping[i] >> 32);
1514 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1515 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1516 for (j = 0; j < 10; j++) {
1518 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1519 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1523 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1532 bnx2_init_context(struct bnx2 *bp)
1538 u32 vcid_addr, pcid_addr, offset;
1542 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1545 vcid_addr = GET_PCID_ADDR(vcid);
1547 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1552 pcid_addr = GET_PCID_ADDR(new_vcid);
1555 vcid_addr = GET_CID_ADDR(vcid);
1556 pcid_addr = vcid_addr;
1559 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1560 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1562 /* Zero out the context. */
1563 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1564 CTX_WR(bp, 0x00, offset, 0);
1567 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1568 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1573 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1579 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1580 if (good_mbuf == NULL) {
1581 printk(KERN_ERR PFX "Failed to allocate memory in "
1582 "bnx2_alloc_bad_rbuf\n");
1586 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1587 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1591 /* Allocate a bunch of mbufs and save the good ones in an array. */
1592 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1593 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1594 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1596 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1598 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1600 /* The addresses with Bit 9 set are bad memory blocks. */
1601 if (!(val & (1 << 9))) {
1602 good_mbuf[good_mbuf_cnt] = (u16) val;
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1609 /* Free the good ones back to the mbuf pool thus discarding
1610 * all the bad ones. */
1611 while (good_mbuf_cnt) {
1614 val = good_mbuf[good_mbuf_cnt];
1615 val = (val << 9) | val | 1;
1617 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1624 bnx2_set_mac_addr(struct bnx2 *bp)
1627 u8 *mac_addr = bp->dev->dev_addr;
1629 val = (mac_addr[0] << 8) | mac_addr[1];
1631 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1633 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1634 (mac_addr[4] << 8) | mac_addr[5];
1636 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1640 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1642 struct sk_buff *skb;
1643 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1645 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1646 unsigned long align;
1648 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1653 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1654 skb_reserve(skb, BNX2_RX_ALIGN - align);
1656 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1657 PCI_DMA_FROMDEVICE);
1660 pci_unmap_addr_set(rx_buf, mapping, mapping);
1662 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1663 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1665 bp->rx_prod_bseq += bp->rx_buf_use_size;
1671 bnx2_phy_int(struct bnx2 *bp)
1673 u32 new_link_state, old_link_state;
1675 new_link_state = bp->status_blk->status_attn_bits &
1676 STATUS_ATTN_BITS_LINK_STATE;
1677 old_link_state = bp->status_blk->status_attn_bits_ack &
1678 STATUS_ATTN_BITS_LINK_STATE;
1679 if (new_link_state != old_link_state) {
1680 if (new_link_state) {
1681 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1682 STATUS_ATTN_BITS_LINK_STATE);
1685 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1686 STATUS_ATTN_BITS_LINK_STATE);
1693 bnx2_tx_int(struct bnx2 *bp)
1695 struct status_block *sblk = bp->status_blk;
1696 u16 hw_cons, sw_cons, sw_ring_cons;
1699 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1700 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1703 sw_cons = bp->tx_cons;
1705 while (sw_cons != hw_cons) {
1706 struct sw_bd *tx_buf;
1707 struct sk_buff *skb;
1710 sw_ring_cons = TX_RING_IDX(sw_cons);
1712 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1715 /* partial BD completions possible with TSO packets */
1716 if (skb_is_gso(skb)) {
1717 u16 last_idx, last_ring_idx;
1719 last_idx = sw_cons +
1720 skb_shinfo(skb)->nr_frags + 1;
1721 last_ring_idx = sw_ring_cons +
1722 skb_shinfo(skb)->nr_frags + 1;
1723 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1726 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1731 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1732 skb_headlen(skb), PCI_DMA_TODEVICE);
1735 last = skb_shinfo(skb)->nr_frags;
1737 for (i = 0; i < last; i++) {
1738 sw_cons = NEXT_TX_BD(sw_cons);
1740 pci_unmap_page(bp->pdev,
1742 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1744 skb_shinfo(skb)->frags[i].size,
1748 sw_cons = NEXT_TX_BD(sw_cons);
1750 tx_free_bd += last + 1;
1754 hw_cons = bp->hw_tx_cons =
1755 sblk->status_tx_quick_consumer_index0;
1757 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1762 bp->tx_cons = sw_cons;
1763 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1764 * before checking for netif_queue_stopped(). Without the
1765 * memory barrier, there is a small possibility that bnx2_start_xmit()
1766 * will miss it and cause the queue to be stopped forever.
1770 if (unlikely(netif_queue_stopped(bp->dev)) &&
1771 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1772 netif_tx_lock(bp->dev);
1773 if ((netif_queue_stopped(bp->dev)) &&
1774 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1775 netif_wake_queue(bp->dev);
1776 netif_tx_unlock(bp->dev);
1781 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1784 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1785 struct rx_bd *cons_bd, *prod_bd;
1787 cons_rx_buf = &bp->rx_buf_ring[cons];
1788 prod_rx_buf = &bp->rx_buf_ring[prod];
1790 pci_dma_sync_single_for_device(bp->pdev,
1791 pci_unmap_addr(cons_rx_buf, mapping),
1792 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1794 bp->rx_prod_bseq += bp->rx_buf_use_size;
1796 prod_rx_buf->skb = skb;
1801 pci_unmap_addr_set(prod_rx_buf, mapping,
1802 pci_unmap_addr(cons_rx_buf, mapping));
1804 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1805 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1806 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1807 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1811 bnx2_rx_int(struct bnx2 *bp, int budget)
1813 struct status_block *sblk = bp->status_blk;
1814 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1815 struct l2_fhdr *rx_hdr;
1818 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1819 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1822 sw_cons = bp->rx_cons;
1823 sw_prod = bp->rx_prod;
1825 /* Memory barrier necessary as speculative reads of the rx
1826 * buffer can be ahead of the index in the status block
1829 while (sw_cons != hw_cons) {
1832 struct sw_bd *rx_buf;
1833 struct sk_buff *skb;
1834 dma_addr_t dma_addr;
1836 sw_ring_cons = RX_RING_IDX(sw_cons);
1837 sw_ring_prod = RX_RING_IDX(sw_prod);
1839 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1844 dma_addr = pci_unmap_addr(rx_buf, mapping);
1846 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1847 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1849 rx_hdr = (struct l2_fhdr *) skb->data;
1850 len = rx_hdr->l2_fhdr_pkt_len - 4;
1852 if ((status = rx_hdr->l2_fhdr_status) &
1853 (L2_FHDR_ERRORS_BAD_CRC |
1854 L2_FHDR_ERRORS_PHY_DECODE |
1855 L2_FHDR_ERRORS_ALIGNMENT |
1856 L2_FHDR_ERRORS_TOO_SHORT |
1857 L2_FHDR_ERRORS_GIANT_FRAME)) {
1862 /* Since we don't have a jumbo ring, copy small packets
1865 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1866 struct sk_buff *new_skb;
1868 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1869 if (new_skb == NULL)
1873 memcpy(new_skb->data,
1874 skb->data + bp->rx_offset - 2,
1877 skb_reserve(new_skb, 2);
1878 skb_put(new_skb, len);
1880 bnx2_reuse_rx_skb(bp, skb,
1881 sw_ring_cons, sw_ring_prod);
1885 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1886 pci_unmap_single(bp->pdev, dma_addr,
1887 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1889 skb_reserve(skb, bp->rx_offset);
1894 bnx2_reuse_rx_skb(bp, skb,
1895 sw_ring_cons, sw_ring_prod);
1899 skb->protocol = eth_type_trans(skb, bp->dev);
1901 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1902 (ntohs(skb->protocol) != 0x8100)) {
1909 skb->ip_summed = CHECKSUM_NONE;
1911 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1912 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1914 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1915 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1916 skb->ip_summed = CHECKSUM_UNNECESSARY;
1920 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1921 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1922 rx_hdr->l2_fhdr_vlan_tag);
1926 netif_receive_skb(skb);
1928 bp->dev->last_rx = jiffies;
1932 sw_cons = NEXT_RX_BD(sw_cons);
1933 sw_prod = NEXT_RX_BD(sw_prod);
1935 if ((rx_pkt == budget))
1938 /* Refresh hw_cons to see if there is new work */
1939 if (sw_cons == hw_cons) {
1940 hw_cons = bp->hw_rx_cons =
1941 sblk->status_rx_quick_consumer_index0;
1942 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1947 bp->rx_cons = sw_cons;
1948 bp->rx_prod = sw_prod;
1950 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1952 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1960 /* MSI ISR - The only difference between this and the INTx ISR
1961 * is that the MSI interrupt is always serviced.
1964 bnx2_msi(int irq, void *dev_instance)
1966 struct net_device *dev = dev_instance;
1967 struct bnx2 *bp = netdev_priv(dev);
1969 prefetch(bp->status_blk);
1970 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1971 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1972 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1974 /* Return here if interrupt is disabled. */
1975 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1978 netif_rx_schedule(dev);
1984 bnx2_interrupt(int irq, void *dev_instance)
1986 struct net_device *dev = dev_instance;
1987 struct bnx2 *bp = netdev_priv(dev);
1989 /* When using INTx, it is possible for the interrupt to arrive
1990 * at the CPU before the status block posted prior to the
1991 * interrupt. Reading a register will flush the status block.
1992 * When using MSI, the MSI message will always complete after
1993 * the status block write.
1995 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1996 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1997 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2000 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2001 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2002 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2004 /* Return here if interrupt is shared and is disabled. */
2005 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2008 netif_rx_schedule(dev);
2014 bnx2_has_work(struct bnx2 *bp)
2016 struct status_block *sblk = bp->status_blk;
2018 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2019 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2022 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2030 bnx2_poll(struct net_device *dev, int *budget)
2032 struct bnx2 *bp = netdev_priv(dev);
2034 if ((bp->status_blk->status_attn_bits &
2035 STATUS_ATTN_BITS_LINK_STATE) !=
2036 (bp->status_blk->status_attn_bits_ack &
2037 STATUS_ATTN_BITS_LINK_STATE)) {
2039 spin_lock(&bp->phy_lock);
2041 spin_unlock(&bp->phy_lock);
2043 /* This is needed to take care of transient status
2044 * during link changes.
2046 REG_WR(bp, BNX2_HC_COMMAND,
2047 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2048 REG_RD(bp, BNX2_HC_COMMAND);
2051 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2054 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2055 int orig_budget = *budget;
2058 if (orig_budget > dev->quota)
2059 orig_budget = dev->quota;
2061 work_done = bnx2_rx_int(bp, orig_budget);
2062 *budget -= work_done;
2063 dev->quota -= work_done;
2066 bp->last_status_idx = bp->status_blk->status_idx;
2069 if (!bnx2_has_work(bp)) {
2070 netif_rx_complete(dev);
2071 if (likely(bp->flags & USING_MSI_FLAG)) {
2072 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2073 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2074 bp->last_status_idx);
2077 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2078 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2079 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2080 bp->last_status_idx);
2082 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2083 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2084 bp->last_status_idx);
2091 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2092 * from set_multicast.
2095 bnx2_set_rx_mode(struct net_device *dev)
2097 struct bnx2 *bp = netdev_priv(dev);
2098 u32 rx_mode, sort_mode;
2101 spin_lock_bh(&bp->phy_lock);
2103 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2104 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2105 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2107 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2108 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2110 if (!(bp->flags & ASF_ENABLE_FLAG))
2111 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2113 if (dev->flags & IFF_PROMISC) {
2114 /* Promiscuous mode. */
2115 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2116 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2117 BNX2_RPM_SORT_USER0_PROM_VLAN;
2119 else if (dev->flags & IFF_ALLMULTI) {
2120 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2121 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2124 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2127 /* Accept one or more multicast(s). */
2128 struct dev_mc_list *mclist;
2129 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2134 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2136 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2137 i++, mclist = mclist->next) {
2139 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2141 regidx = (bit & 0xe0) >> 5;
2143 mc_filter[regidx] |= (1 << bit);
2146 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2147 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2151 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2154 if (rx_mode != bp->rx_mode) {
2155 bp->rx_mode = rx_mode;
2156 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2159 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2160 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2161 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2163 spin_unlock_bh(&bp->phy_lock);
2166 #define FW_BUF_SIZE 0x8000
2169 bnx2_gunzip_init(struct bnx2 *bp)
2171 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2174 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2177 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2178 if (bp->strm->workspace == NULL)
2188 vfree(bp->gunzip_buf);
2189 bp->gunzip_buf = NULL;
2192 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2193 "uncompression.\n", bp->dev->name);
2198 bnx2_gunzip_end(struct bnx2 *bp)
2200 kfree(bp->strm->workspace);
2205 if (bp->gunzip_buf) {
2206 vfree(bp->gunzip_buf);
2207 bp->gunzip_buf = NULL;
2212 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2216 /* check gzip header */
2217 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2223 if (zbuf[3] & FNAME)
2224 while ((zbuf[n++] != 0) && (n < len));
2226 bp->strm->next_in = zbuf + n;
2227 bp->strm->avail_in = len - n;
2228 bp->strm->next_out = bp->gunzip_buf;
2229 bp->strm->avail_out = FW_BUF_SIZE;
2231 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2235 rc = zlib_inflate(bp->strm, Z_FINISH);
2237 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2238 *outbuf = bp->gunzip_buf;
2240 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2241 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2242 bp->dev->name, bp->strm->msg);
2244 zlib_inflateEnd(bp->strm);
2246 if (rc == Z_STREAM_END)
2253 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2260 for (i = 0; i < rv2p_code_len; i += 8) {
2261 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2263 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2266 if (rv2p_proc == RV2P_PROC1) {
2267 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2268 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2271 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2272 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2276 /* Reset the processor, un-stall is done later. */
2277 if (rv2p_proc == RV2P_PROC1) {
2278 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2281 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2286 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2293 val = REG_RD_IND(bp, cpu_reg->mode);
2294 val |= cpu_reg->mode_value_halt;
2295 REG_WR_IND(bp, cpu_reg->mode, val);
2296 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2298 /* Load the Text area. */
2299 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2304 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2314 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2315 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2319 /* Load the Data area. */
2320 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2324 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2325 REG_WR_IND(bp, offset, fw->data[j]);
2329 /* Load the SBSS area. */
2330 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2334 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2335 REG_WR_IND(bp, offset, fw->sbss[j]);
2339 /* Load the BSS area. */
2340 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2344 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2345 REG_WR_IND(bp, offset, fw->bss[j]);
2349 /* Load the Read-Only area. */
2350 offset = cpu_reg->spad_base +
2351 (fw->rodata_addr - cpu_reg->mips_view_base);
2355 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2356 REG_WR_IND(bp, offset, fw->rodata[j]);
2360 /* Clear the pre-fetch instruction. */
2361 REG_WR_IND(bp, cpu_reg->inst, 0);
2362 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2364 /* Start the CPU. */
2365 val = REG_RD_IND(bp, cpu_reg->mode);
2366 val &= ~cpu_reg->mode_value_halt;
2367 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2368 REG_WR_IND(bp, cpu_reg->mode, val);
2374 bnx2_init_cpus(struct bnx2 *bp)
2376 struct cpu_reg cpu_reg;
2382 if ((rc = bnx2_gunzip_init(bp)) != 0)
2385 /* Initialize the RV2P processor. */
2386 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2391 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2393 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2398 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2400 /* Initialize the RX Processor. */
2401 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2402 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2403 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2404 cpu_reg.state = BNX2_RXP_CPU_STATE;
2405 cpu_reg.state_value_clear = 0xffffff;
2406 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2407 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2408 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2409 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2410 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2411 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2412 cpu_reg.mips_view_base = 0x8000000;
2414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2415 fw = &bnx2_rxp_fw_09;
2417 fw = &bnx2_rxp_fw_06;
2419 rc = load_cpu_fw(bp, &cpu_reg, fw);
2423 /* Initialize the TX Processor. */
2424 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2425 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2426 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2427 cpu_reg.state = BNX2_TXP_CPU_STATE;
2428 cpu_reg.state_value_clear = 0xffffff;
2429 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2430 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2431 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2432 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2433 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2434 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2435 cpu_reg.mips_view_base = 0x8000000;
2437 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2438 fw = &bnx2_txp_fw_09;
2440 fw = &bnx2_txp_fw_06;
2442 rc = load_cpu_fw(bp, &cpu_reg, fw);
2446 /* Initialize the TX Patch-up Processor. */
2447 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2448 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2449 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2450 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2451 cpu_reg.state_value_clear = 0xffffff;
2452 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2453 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2454 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2455 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2456 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2457 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2458 cpu_reg.mips_view_base = 0x8000000;
2460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2461 fw = &bnx2_tpat_fw_09;
2463 fw = &bnx2_tpat_fw_06;
2465 rc = load_cpu_fw(bp, &cpu_reg, fw);
2469 /* Initialize the Completion Processor. */
2470 cpu_reg.mode = BNX2_COM_CPU_MODE;
2471 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2472 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2473 cpu_reg.state = BNX2_COM_CPU_STATE;
2474 cpu_reg.state_value_clear = 0xffffff;
2475 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2476 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2477 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2478 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2479 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2480 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2481 cpu_reg.mips_view_base = 0x8000000;
2483 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2484 fw = &bnx2_com_fw_09;
2486 fw = &bnx2_com_fw_06;
2488 rc = load_cpu_fw(bp, &cpu_reg, fw);
2492 /* Initialize the Command Processor. */
2493 cpu_reg.mode = BNX2_CP_CPU_MODE;
2494 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2495 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2496 cpu_reg.state = BNX2_CP_CPU_STATE;
2497 cpu_reg.state_value_clear = 0xffffff;
2498 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2499 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2500 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2501 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2502 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2503 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2504 cpu_reg.mips_view_base = 0x8000000;
2506 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2507 fw = &bnx2_cp_fw_09;
2509 load_cpu_fw(bp, &cpu_reg, fw);
2514 bnx2_gunzip_end(bp);
2519 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2523 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2530 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2531 PCI_PM_CTRL_PME_STATUS);
2533 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2534 /* delay required during transition out of D3hot */
2537 val = REG_RD(bp, BNX2_EMAC_MODE);
2538 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2539 val &= ~BNX2_EMAC_MODE_MPKT;
2540 REG_WR(bp, BNX2_EMAC_MODE, val);
2542 val = REG_RD(bp, BNX2_RPM_CONFIG);
2543 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2544 REG_WR(bp, BNX2_RPM_CONFIG, val);
2555 autoneg = bp->autoneg;
2556 advertising = bp->advertising;
2558 bp->autoneg = AUTONEG_SPEED;
2559 bp->advertising = ADVERTISED_10baseT_Half |
2560 ADVERTISED_10baseT_Full |
2561 ADVERTISED_100baseT_Half |
2562 ADVERTISED_100baseT_Full |
2565 bnx2_setup_copper_phy(bp);
2567 bp->autoneg = autoneg;
2568 bp->advertising = advertising;
2570 bnx2_set_mac_addr(bp);
2572 val = REG_RD(bp, BNX2_EMAC_MODE);
2574 /* Enable port mode. */
2575 val &= ~BNX2_EMAC_MODE_PORT;
2576 val |= BNX2_EMAC_MODE_PORT_MII |
2577 BNX2_EMAC_MODE_MPKT_RCVD |
2578 BNX2_EMAC_MODE_ACPI_RCVD |
2579 BNX2_EMAC_MODE_MPKT;
2581 REG_WR(bp, BNX2_EMAC_MODE, val);
2583 /* receive all multicast */
2584 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2585 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2588 REG_WR(bp, BNX2_EMAC_RX_MODE,
2589 BNX2_EMAC_RX_MODE_SORT_MODE);
2591 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2592 BNX2_RPM_SORT_USER0_MC_EN;
2593 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2594 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2595 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2596 BNX2_RPM_SORT_USER0_ENA);
2598 /* Need to enable EMAC and RPM for WOL. */
2599 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2600 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2601 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2602 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2604 val = REG_RD(bp, BNX2_RPM_CONFIG);
2605 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2606 REG_WR(bp, BNX2_RPM_CONFIG, val);
2608 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2611 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2614 if (!(bp->flags & NO_WOL_FLAG))
2615 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2617 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2618 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2619 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2628 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2630 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2633 /* No more memory access after this point until
2634 * device is brought back to D0.
2646 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2651 /* Request access to the flash interface. */
2652 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2653 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2654 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2655 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2661 if (j >= NVRAM_TIMEOUT_COUNT)
2668 bnx2_release_nvram_lock(struct bnx2 *bp)
2673 /* Relinquish nvram interface. */
2674 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2677 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2678 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2684 if (j >= NVRAM_TIMEOUT_COUNT)
2692 bnx2_enable_nvram_write(struct bnx2 *bp)
2696 val = REG_RD(bp, BNX2_MISC_CFG);
2697 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2699 if (!bp->flash_info->buffered) {
2702 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2703 REG_WR(bp, BNX2_NVM_COMMAND,
2704 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2706 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2709 val = REG_RD(bp, BNX2_NVM_COMMAND);
2710 if (val & BNX2_NVM_COMMAND_DONE)
2714 if (j >= NVRAM_TIMEOUT_COUNT)
2721 bnx2_disable_nvram_write(struct bnx2 *bp)
2725 val = REG_RD(bp, BNX2_MISC_CFG);
2726 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2731 bnx2_enable_nvram_access(struct bnx2 *bp)
2735 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2736 /* Enable both bits, even on read. */
2737 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2738 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2742 bnx2_disable_nvram_access(struct bnx2 *bp)
2746 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2747 /* Disable both bits, even after read. */
2748 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2749 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2750 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2754 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2759 if (bp->flash_info->buffered)
2760 /* Buffered flash, no erase needed */
2763 /* Build an erase command */
2764 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2765 BNX2_NVM_COMMAND_DOIT;
2767 /* Need to clear DONE bit separately. */
2768 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2770 /* Address of the NVRAM to read from. */
2771 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2773 /* Issue an erase command. */
2774 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2776 /* Wait for completion. */
2777 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2782 val = REG_RD(bp, BNX2_NVM_COMMAND);
2783 if (val & BNX2_NVM_COMMAND_DONE)
2787 if (j >= NVRAM_TIMEOUT_COUNT)
2794 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2799 /* Build the command word. */
2800 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2802 /* Calculate an offset of a buffered flash. */
2803 if (bp->flash_info->buffered) {
2804 offset = ((offset / bp->flash_info->page_size) <<
2805 bp->flash_info->page_bits) +
2806 (offset % bp->flash_info->page_size);
2809 /* Need to clear DONE bit separately. */
2810 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2812 /* Address of the NVRAM to read from. */
2813 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2815 /* Issue a read command. */
2816 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2818 /* Wait for completion. */
2819 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2824 val = REG_RD(bp, BNX2_NVM_COMMAND);
2825 if (val & BNX2_NVM_COMMAND_DONE) {
2826 val = REG_RD(bp, BNX2_NVM_READ);
2828 val = be32_to_cpu(val);
2829 memcpy(ret_val, &val, 4);
2833 if (j >= NVRAM_TIMEOUT_COUNT)
2841 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2846 /* Build the command word. */
2847 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2849 /* Calculate an offset of a buffered flash. */
2850 if (bp->flash_info->buffered) {
2851 offset = ((offset / bp->flash_info->page_size) <<
2852 bp->flash_info->page_bits) +
2853 (offset % bp->flash_info->page_size);
2856 /* Need to clear DONE bit separately. */
2857 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2859 memcpy(&val32, val, 4);
2860 val32 = cpu_to_be32(val32);
2862 /* Write the data. */
2863 REG_WR(bp, BNX2_NVM_WRITE, val32);
2865 /* Address of the NVRAM to write to. */
2866 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2868 /* Issue the write command. */
2869 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2871 /* Wait for completion. */
2872 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2875 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2878 if (j >= NVRAM_TIMEOUT_COUNT)
2885 bnx2_init_nvram(struct bnx2 *bp)
2888 int j, entry_count, rc;
2889 struct flash_spec *flash;
2891 /* Determine the selected interface. */
2892 val = REG_RD(bp, BNX2_NVM_CFG1);
2894 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2897 if (val & 0x40000000) {
2899 /* Flash interface has been reconfigured */
2900 for (j = 0, flash = &flash_table[0]; j < entry_count;
2902 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2903 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2904 bp->flash_info = flash;
2911 /* Not yet been reconfigured */
2913 if (val & (1 << 23))
2914 mask = FLASH_BACKUP_STRAP_MASK;
2916 mask = FLASH_STRAP_MASK;
2918 for (j = 0, flash = &flash_table[0]; j < entry_count;
2921 if ((val & mask) == (flash->strapping & mask)) {
2922 bp->flash_info = flash;
2924 /* Request access to the flash interface. */
2925 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2928 /* Enable access to flash interface */
2929 bnx2_enable_nvram_access(bp);
2931 /* Reconfigure the flash interface */
2932 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2933 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2934 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2935 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2937 /* Disable access to flash interface */
2938 bnx2_disable_nvram_access(bp);
2939 bnx2_release_nvram_lock(bp);
2944 } /* if (val & 0x40000000) */
2946 if (j == entry_count) {
2947 bp->flash_info = NULL;
2948 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2952 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2953 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2955 bp->flash_size = val;
2957 bp->flash_size = bp->flash_info->total_size;
2963 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2967 u32 cmd_flags, offset32, len32, extra;
2972 /* Request access to the flash interface. */
2973 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2976 /* Enable access to flash interface */
2977 bnx2_enable_nvram_access(bp);
2990 pre_len = 4 - (offset & 3);
2992 if (pre_len >= len32) {
2994 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2995 BNX2_NVM_COMMAND_LAST;
2998 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3001 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3006 memcpy(ret_buf, buf + (offset & 3), pre_len);
3013 extra = 4 - (len32 & 3);
3014 len32 = (len32 + 4) & ~3;
3021 cmd_flags = BNX2_NVM_COMMAND_LAST;
3023 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3024 BNX2_NVM_COMMAND_LAST;
3026 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3028 memcpy(ret_buf, buf, 4 - extra);
3030 else if (len32 > 0) {
3033 /* Read the first word. */
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3039 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3041 /* Advance to the next dword. */
3046 while (len32 > 4 && rc == 0) {
3047 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3049 /* Advance to the next dword. */
3058 cmd_flags = BNX2_NVM_COMMAND_LAST;
3059 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3061 memcpy(ret_buf, buf, 4 - extra);
3064 /* Disable access to flash interface */
3065 bnx2_disable_nvram_access(bp);
3067 bnx2_release_nvram_lock(bp);
3073 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3076 u32 written, offset32, len32;
3077 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3079 int align_start, align_end;
3084 align_start = align_end = 0;
3086 if ((align_start = (offset32 & 3))) {
3088 len32 += align_start;
3089 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3094 if ((len32 > 4) || !align_start) {
3095 align_end = 4 - (len32 & 3);
3097 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3104 if (align_start || align_end) {
3105 buf = kmalloc(len32, GFP_KERNEL);
3109 memcpy(buf, start, 4);
3112 memcpy(buf + len32 - 4, end, 4);
3114 memcpy(buf + align_start, data_buf, buf_size);
3117 if (bp->flash_info->buffered == 0) {
3118 flash_buffer = kmalloc(264, GFP_KERNEL);
3119 if (flash_buffer == NULL) {
3121 goto nvram_write_end;
3126 while ((written < len32) && (rc == 0)) {
3127 u32 page_start, page_end, data_start, data_end;
3128 u32 addr, cmd_flags;
3131 /* Find the page_start addr */
3132 page_start = offset32 + written;
3133 page_start -= (page_start % bp->flash_info->page_size);
3134 /* Find the page_end addr */
3135 page_end = page_start + bp->flash_info->page_size;
3136 /* Find the data_start addr */
3137 data_start = (written == 0) ? offset32 : page_start;
3138 /* Find the data_end addr */
3139 data_end = (page_end > offset32 + len32) ?
3140 (offset32 + len32) : page_end;
3142 /* Request access to the flash interface. */
3143 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3144 goto nvram_write_end;
3146 /* Enable access to flash interface */
3147 bnx2_enable_nvram_access(bp);
3149 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3150 if (bp->flash_info->buffered == 0) {
3153 /* Read the whole page into the buffer
3154 * (non-buffer flash only) */
3155 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3156 if (j == (bp->flash_info->page_size - 4)) {
3157 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3159 rc = bnx2_nvram_read_dword(bp,
3165 goto nvram_write_end;
3171 /* Enable writes to flash interface (unlock write-protect) */
3172 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3173 goto nvram_write_end;
3175 /* Erase the page */
3176 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3177 goto nvram_write_end;
3179 /* Re-enable the write again for the actual write */
3180 bnx2_enable_nvram_write(bp);
3182 /* Loop to write back the buffer data from page_start to
3185 if (bp->flash_info->buffered == 0) {
3186 for (addr = page_start; addr < data_start;
3187 addr += 4, i += 4) {
3189 rc = bnx2_nvram_write_dword(bp, addr,
3190 &flash_buffer[i], cmd_flags);
3193 goto nvram_write_end;
3199 /* Loop to write the new data from data_start to data_end */
3200 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3201 if ((addr == page_end - 4) ||
3202 ((bp->flash_info->buffered) &&
3203 (addr == data_end - 4))) {
3205 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3207 rc = bnx2_nvram_write_dword(bp, addr, buf,
3211 goto nvram_write_end;
3217 /* Loop to write back the buffer data from data_end
3219 if (bp->flash_info->buffered == 0) {
3220 for (addr = data_end; addr < page_end;
3221 addr += 4, i += 4) {
3223 if (addr == page_end-4) {
3224 cmd_flags = BNX2_NVM_COMMAND_LAST;
3226 rc = bnx2_nvram_write_dword(bp, addr,
3227 &flash_buffer[i], cmd_flags);
3230 goto nvram_write_end;
3236 /* Disable writes to flash interface (lock write-protect) */
3237 bnx2_disable_nvram_write(bp);
3239 /* Disable access to flash interface */
3240 bnx2_disable_nvram_access(bp);
3241 bnx2_release_nvram_lock(bp);
3243 /* Increment written */
3244 written += data_end - data_start;
3248 if (bp->flash_info->buffered == 0)
3249 kfree(flash_buffer);
3251 if (align_start || align_end)
3257 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3262 /* Wait for the current PCI transaction to complete before
3263 * issuing a reset. */
3264 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3265 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3266 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3267 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3268 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3269 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3272 /* Wait for the firmware to tell us it is ok to issue a reset. */
3273 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3275 /* Deposit a driver reset signature so the firmware knows that
3276 * this is a soft reset. */
3277 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3278 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3280 /* Do a dummy read to force the chip to complete all current transaction
3281 * before we issue a reset. */
3282 val = REG_RD(bp, BNX2_MISC_ID);
3284 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3285 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3286 REG_RD(bp, BNX2_MISC_COMMAND);
3289 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3290 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3292 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3295 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3296 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3297 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3300 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3302 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3303 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3304 current->state = TASK_UNINTERRUPTIBLE;
3305 schedule_timeout(HZ / 50);
3308 /* Reset takes approximate 30 usec */
3309 for (i = 0; i < 10; i++) {
3310 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3311 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3312 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3317 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3318 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3319 printk(KERN_ERR PFX "Chip reset did not complete\n");
3324 /* Make sure byte swapping is properly configured. */
3325 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3326 if (val != 0x01020304) {
3327 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3331 /* Wait for the firmware to finish its initialization. */
3332 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3336 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3337 /* Adjust the voltage regular to two steps lower. The default
3338 * of this register is 0x0000000e. */
3339 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3341 /* Remove bad rbuf memory from the free pool. */
3342 rc = bnx2_alloc_bad_rbuf(bp);
3349 bnx2_init_chip(struct bnx2 *bp)
3354 /* Make sure the interrupt is not active. */
3355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3357 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3358 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3360 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3362 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3363 DMA_READ_CHANS << 12 |
3364 DMA_WRITE_CHANS << 16;
3366 val |= (0x2 << 20) | (1 << 11);
3368 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3371 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3372 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3373 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3375 REG_WR(bp, BNX2_DMA_CONFIG, val);
3377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3378 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3379 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3380 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3383 if (bp->flags & PCIX_FLAG) {
3386 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3388 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3389 val16 & ~PCI_X_CMD_ERO);
3392 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3393 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3394 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3397 /* Initialize context mapping and zero out the quick contexts. The
3398 * context block must have already been enabled. */
3399 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3400 bnx2_init_5709_context(bp);
3402 bnx2_init_context(bp);
3404 if ((rc = bnx2_init_cpus(bp)) != 0)
3407 bnx2_init_nvram(bp);
3409 bnx2_set_mac_addr(bp);
3411 val = REG_RD(bp, BNX2_MQ_CONFIG);
3412 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3413 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3414 REG_WR(bp, BNX2_MQ_CONFIG, val);
3416 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3417 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3418 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3420 val = (BCM_PAGE_BITS - 8) << 24;
3421 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3423 /* Configure page size. */
3424 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3425 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3426 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3427 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3429 val = bp->mac_addr[0] +
3430 (bp->mac_addr[1] << 8) +
3431 (bp->mac_addr[2] << 16) +
3433 (bp->mac_addr[4] << 8) +
3434 (bp->mac_addr[5] << 16);
3435 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3437 /* Program the MTU. Also include 4 bytes for CRC32. */
3438 val = bp->dev->mtu + ETH_HLEN + 4;
3439 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3440 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3441 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3443 bp->last_status_idx = 0;
3444 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3446 /* Set up how to generate a link change interrupt. */
3447 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3449 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3450 (u64) bp->status_blk_mapping & 0xffffffff);
3451 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3453 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3454 (u64) bp->stats_blk_mapping & 0xffffffff);
3455 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3456 (u64) bp->stats_blk_mapping >> 32);
3458 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3459 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3461 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3462 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3464 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3465 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3467 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3469 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3471 REG_WR(bp, BNX2_HC_COM_TICKS,
3472 (bp->com_ticks_int << 16) | bp->com_ticks);
3474 REG_WR(bp, BNX2_HC_CMD_TICKS,
3475 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3477 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3478 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3480 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3481 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3483 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3484 BNX2_HC_CONFIG_TX_TMR_MODE |
3485 BNX2_HC_CONFIG_COLLECT_STATS);
3488 /* Clear internal stats counters. */
3489 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3491 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3493 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3494 BNX2_PORT_FEATURE_ASF_ENABLED)
3495 bp->flags |= ASF_ENABLE_FLAG;
3497 /* Initialize the receive filter. */
3498 bnx2_set_rx_mode(bp->dev);
3500 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3503 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3504 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3508 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3514 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3516 u32 val, offset0, offset1, offset2, offset3;
3518 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3519 offset0 = BNX2_L2CTX_TYPE_XI;
3520 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3521 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3522 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3524 offset0 = BNX2_L2CTX_TYPE;
3525 offset1 = BNX2_L2CTX_CMD_TYPE;
3526 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3527 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3529 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3530 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3532 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3533 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3535 val = (u64) bp->tx_desc_mapping >> 32;
3536 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3538 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3539 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3543 bnx2_init_tx_ring(struct bnx2 *bp)
3548 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3550 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3552 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3553 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3558 bp->tx_prod_bseq = 0;
3561 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3562 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3564 bnx2_init_tx_context(bp, cid);
3568 bnx2_init_rx_ring(struct bnx2 *bp)
3572 u16 prod, ring_prod;
3575 /* 8 for CRC and VLAN */
3576 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3578 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3580 ring_prod = prod = bp->rx_prod = 0;
3583 bp->rx_prod_bseq = 0;
3585 for (i = 0; i < bp->rx_max_ring; i++) {
3588 rxbd = &bp->rx_desc_ring[i][0];
3589 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3590 rxbd->rx_bd_len = bp->rx_buf_use_size;
3591 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3593 if (i == (bp->rx_max_ring - 1))
3597 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3598 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3602 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3603 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3605 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3607 val = (u64) bp->rx_desc_mapping[0] >> 32;
3608 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3610 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3611 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3613 for (i = 0; i < bp->rx_ring_size; i++) {
3614 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3617 prod = NEXT_RX_BD(prod);
3618 ring_prod = RX_RING_IDX(prod);
3622 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3624 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3628 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3632 bp->rx_ring_size = size;
3634 while (size > MAX_RX_DESC_CNT) {
3635 size -= MAX_RX_DESC_CNT;
3638 /* round to next power of 2 */
3640 while ((max & num_rings) == 0)
3643 if (num_rings != max)
3646 bp->rx_max_ring = max;
3647 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3651 bnx2_free_tx_skbs(struct bnx2 *bp)
3655 if (bp->tx_buf_ring == NULL)
3658 for (i = 0; i < TX_DESC_CNT; ) {
3659 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3660 struct sk_buff *skb = tx_buf->skb;
3668 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3669 skb_headlen(skb), PCI_DMA_TODEVICE);
3673 last = skb_shinfo(skb)->nr_frags;
3674 for (j = 0; j < last; j++) {
3675 tx_buf = &bp->tx_buf_ring[i + j + 1];
3676 pci_unmap_page(bp->pdev,
3677 pci_unmap_addr(tx_buf, mapping),
3678 skb_shinfo(skb)->frags[j].size,
3688 bnx2_free_rx_skbs(struct bnx2 *bp)
3692 if (bp->rx_buf_ring == NULL)
3695 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3696 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3697 struct sk_buff *skb = rx_buf->skb;
3702 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3703 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3712 bnx2_free_skbs(struct bnx2 *bp)
3714 bnx2_free_tx_skbs(bp);
3715 bnx2_free_rx_skbs(bp);
3719 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3723 rc = bnx2_reset_chip(bp, reset_code);
3728 if ((rc = bnx2_init_chip(bp)) != 0)
3731 bnx2_init_tx_ring(bp);
3732 bnx2_init_rx_ring(bp);
3737 bnx2_init_nic(struct bnx2 *bp)
3741 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3744 spin_lock_bh(&bp->phy_lock);
3746 spin_unlock_bh(&bp->phy_lock);
3752 bnx2_test_registers(struct bnx2 *bp)
3756 static const struct {
3762 { 0x006c, 0, 0x00000000, 0x0000003f },
3763 { 0x0090, 0, 0xffffffff, 0x00000000 },
3764 { 0x0094, 0, 0x00000000, 0x00000000 },
3766 { 0x0404, 0, 0x00003f00, 0x00000000 },
3767 { 0x0418, 0, 0x00000000, 0xffffffff },
3768 { 0x041c, 0, 0x00000000, 0xffffffff },
3769 { 0x0420, 0, 0x00000000, 0x80ffffff },
3770 { 0x0424, 0, 0x00000000, 0x00000000 },
3771 { 0x0428, 0, 0x00000000, 0x00000001 },
3772 { 0x0450, 0, 0x00000000, 0x0000ffff },
3773 { 0x0454, 0, 0x00000000, 0xffffffff },
3774 { 0x0458, 0, 0x00000000, 0xffffffff },
3776 { 0x0808, 0, 0x00000000, 0xffffffff },
3777 { 0x0854, 0, 0x00000000, 0xffffffff },
3778 { 0x0868, 0, 0x00000000, 0x77777777 },
3779 { 0x086c, 0, 0x00000000, 0x77777777 },
3780 { 0x0870, 0, 0x00000000, 0x77777777 },
3781 { 0x0874, 0, 0x00000000, 0x77777777 },
3783 { 0x0c00, 0, 0x00000000, 0x00000001 },
3784 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3785 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3787 { 0x1000, 0, 0x00000000, 0x00000001 },
3788 { 0x1004, 0, 0x00000000, 0x000f0001 },
3790 { 0x1408, 0, 0x01c00800, 0x00000000 },
3791 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3792 { 0x14a8, 0, 0x00000000, 0x000001ff },
3793 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3794 { 0x14b0, 0, 0x00000002, 0x00000001 },
3795 { 0x14b8, 0, 0x00000000, 0x00000000 },
3796 { 0x14c0, 0, 0x00000000, 0x00000009 },
3797 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3798 { 0x14cc, 0, 0x00000000, 0x00000001 },
3799 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3801 { 0x1800, 0, 0x00000000, 0x00000001 },
3802 { 0x1804, 0, 0x00000000, 0x00000003 },
3804 { 0x2800, 0, 0x00000000, 0x00000001 },
3805 { 0x2804, 0, 0x00000000, 0x00003f01 },
3806 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3807 { 0x2810, 0, 0xffff0000, 0x00000000 },
3808 { 0x2814, 0, 0xffff0000, 0x00000000 },
3809 { 0x2818, 0, 0xffff0000, 0x00000000 },
3810 { 0x281c, 0, 0xffff0000, 0x00000000 },
3811 { 0x2834, 0, 0xffffffff, 0x00000000 },
3812 { 0x2840, 0, 0x00000000, 0xffffffff },
3813 { 0x2844, 0, 0x00000000, 0xffffffff },
3814 { 0x2848, 0, 0xffffffff, 0x00000000 },
3815 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3817 { 0x2c00, 0, 0x00000000, 0x00000011 },
3818 { 0x2c04, 0, 0x00000000, 0x00030007 },
3820 { 0x3c00, 0, 0x00000000, 0x00000001 },
3821 { 0x3c04, 0, 0x00000000, 0x00070000 },
3822 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3823 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3824 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3825 { 0x3c14, 0, 0x00000000, 0xffffffff },
3826 { 0x3c18, 0, 0x00000000, 0xffffffff },
3827 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3828 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3830 { 0x5004, 0, 0x00000000, 0x0000007f },
3831 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3832 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3834 { 0x5c00, 0, 0x00000000, 0x00000001 },
3835 { 0x5c04, 0, 0x00000000, 0x0003000f },
3836 { 0x5c08, 0, 0x00000003, 0x00000000 },
3837 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3838 { 0x5c10, 0, 0x00000000, 0xffffffff },
3839 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3840 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3841 { 0x5c88, 0, 0x00000000, 0x00077373 },
3842 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3844 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3845 { 0x680c, 0, 0xffffffff, 0x00000000 },
3846 { 0x6810, 0, 0xffffffff, 0x00000000 },
3847 { 0x6814, 0, 0xffffffff, 0x00000000 },
3848 { 0x6818, 0, 0xffffffff, 0x00000000 },
3849 { 0x681c, 0, 0xffffffff, 0x00000000 },
3850 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3851 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3852 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3853 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3854 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3855 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3856 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3857 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3858 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3859 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3860 { 0x684c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3862 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3863 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3864 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3865 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3866 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3868 { 0xffff, 0, 0x00000000, 0x00000000 },
3872 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3873 u32 offset, rw_mask, ro_mask, save_val, val;
3875 offset = (u32) reg_tbl[i].offset;
3876 rw_mask = reg_tbl[i].rw_mask;
3877 ro_mask = reg_tbl[i].ro_mask;
3879 save_val = readl(bp->regview + offset);
3881 writel(0, bp->regview + offset);
3883 val = readl(bp->regview + offset);
3884 if ((val & rw_mask) != 0) {
3888 if ((val & ro_mask) != (save_val & ro_mask)) {
3892 writel(0xffffffff, bp->regview + offset);
3894 val = readl(bp->regview + offset);
3895 if ((val & rw_mask) != rw_mask) {
3899 if ((val & ro_mask) != (save_val & ro_mask)) {
3903 writel(save_val, bp->regview + offset);
3907 writel(save_val, bp->regview + offset);
3915 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3917 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3918 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3921 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3924 for (offset = 0; offset < size; offset += 4) {
3926 REG_WR_IND(bp, start + offset, test_pattern[i]);
3928 if (REG_RD_IND(bp, start + offset) !=
3938 bnx2_test_memory(struct bnx2 *bp)
3942 static const struct {
3946 { 0x60000, 0x4000 },
3947 { 0xa0000, 0x3000 },
3948 { 0xe0000, 0x4000 },
3949 { 0x120000, 0x4000 },
3950 { 0x1a0000, 0x4000 },
3951 { 0x160000, 0x4000 },
3955 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3956 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3957 mem_tbl[i].len)) != 0) {
3965 #define BNX2_MAC_LOOPBACK 0
3966 #define BNX2_PHY_LOOPBACK 1
3969 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3971 unsigned int pkt_size, num_pkts, i;
3972 struct sk_buff *skb, *rx_skb;
3973 unsigned char *packet;
3974 u16 rx_start_idx, rx_idx;
3977 struct sw_bd *rx_buf;
3978 struct l2_fhdr *rx_hdr;
3981 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3982 bp->loopback = MAC_LOOPBACK;
3983 bnx2_set_mac_loopback(bp);
3985 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3986 bp->loopback = PHY_LOOPBACK;
3987 bnx2_set_phy_loopback(bp);
3993 skb = netdev_alloc_skb(bp->dev, pkt_size);
3996 packet = skb_put(skb, pkt_size);
3997 memcpy(packet, bp->mac_addr, 6);
3998 memset(packet + 6, 0x0, 8);
3999 for (i = 14; i < pkt_size; i++)
4000 packet[i] = (unsigned char) (i & 0xff);
4002 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4005 REG_WR(bp, BNX2_HC_COMMAND,
4006 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4008 REG_RD(bp, BNX2_HC_COMMAND);
4011 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4015 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4017 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4018 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4019 txbd->tx_bd_mss_nbytes = pkt_size;
4020 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4023 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4024 bp->tx_prod_bseq += pkt_size;
4026 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4027 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4031 REG_WR(bp, BNX2_HC_COMMAND,
4032 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4034 REG_RD(bp, BNX2_HC_COMMAND);
4038 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4041 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4042 goto loopback_test_done;
4045 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4046 if (rx_idx != rx_start_idx + num_pkts) {
4047 goto loopback_test_done;
4050 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4051 rx_skb = rx_buf->skb;
4053 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4054 skb_reserve(rx_skb, bp->rx_offset);
4056 pci_dma_sync_single_for_cpu(bp->pdev,
4057 pci_unmap_addr(rx_buf, mapping),
4058 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4060 if (rx_hdr->l2_fhdr_status &
4061 (L2_FHDR_ERRORS_BAD_CRC |
4062 L2_FHDR_ERRORS_PHY_DECODE |
4063 L2_FHDR_ERRORS_ALIGNMENT |
4064 L2_FHDR_ERRORS_TOO_SHORT |
4065 L2_FHDR_ERRORS_GIANT_FRAME)) {
4067 goto loopback_test_done;
4070 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4071 goto loopback_test_done;
4074 for (i = 14; i < pkt_size; i++) {
4075 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4076 goto loopback_test_done;
4087 #define BNX2_MAC_LOOPBACK_FAILED 1
4088 #define BNX2_PHY_LOOPBACK_FAILED 2
4089 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4090 BNX2_PHY_LOOPBACK_FAILED)
4093 bnx2_test_loopback(struct bnx2 *bp)
4097 if (!netif_running(bp->dev))
4098 return BNX2_LOOPBACK_FAILED;
4100 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4101 spin_lock_bh(&bp->phy_lock);
4103 spin_unlock_bh(&bp->phy_lock);
4104 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4105 rc |= BNX2_MAC_LOOPBACK_FAILED;
4106 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4107 rc |= BNX2_PHY_LOOPBACK_FAILED;
4111 #define NVRAM_SIZE 0x200
4112 #define CRC32_RESIDUAL 0xdebb20e3
4115 bnx2_test_nvram(struct bnx2 *bp)
4117 u32 buf[NVRAM_SIZE / 4];
4118 u8 *data = (u8 *) buf;
4122 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4123 goto test_nvram_done;
4125 magic = be32_to_cpu(buf[0]);
4126 if (magic != 0x669955aa) {
4128 goto test_nvram_done;
4131 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4132 goto test_nvram_done;
4134 csum = ether_crc_le(0x100, data);
4135 if (csum != CRC32_RESIDUAL) {
4137 goto test_nvram_done;
4140 csum = ether_crc_le(0x100, data + 0x100);
4141 if (csum != CRC32_RESIDUAL) {
4150 bnx2_test_link(struct bnx2 *bp)
4154 spin_lock_bh(&bp->phy_lock);
4155 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4156 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4157 spin_unlock_bh(&bp->phy_lock);
4159 if (bmsr & BMSR_LSTATUS) {
4166 bnx2_test_intr(struct bnx2 *bp)
4171 if (!netif_running(bp->dev))
4174 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4176 /* This register is not touched during run-time. */
4177 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4178 REG_RD(bp, BNX2_HC_COMMAND);
4180 for (i = 0; i < 10; i++) {
4181 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4187 msleep_interruptible(10);
4196 bnx2_5706_serdes_timer(struct bnx2 *bp)
4198 spin_lock(&bp->phy_lock);
4199 if (bp->serdes_an_pending)
4200 bp->serdes_an_pending--;
4201 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4204 bp->current_interval = bp->timer_interval;
4206 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4208 if (bmcr & BMCR_ANENABLE) {
4211 bnx2_write_phy(bp, 0x1c, 0x7c00);
4212 bnx2_read_phy(bp, 0x1c, &phy1);
4214 bnx2_write_phy(bp, 0x17, 0x0f01);
4215 bnx2_read_phy(bp, 0x15, &phy2);
4216 bnx2_write_phy(bp, 0x17, 0x0f01);
4217 bnx2_read_phy(bp, 0x15, &phy2);
4219 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4220 !(phy2 & 0x20)) { /* no CONFIG */
4222 bmcr &= ~BMCR_ANENABLE;
4223 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4224 bnx2_write_phy(bp, MII_BMCR, bmcr);
4225 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4229 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4230 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4233 bnx2_write_phy(bp, 0x17, 0x0f01);
4234 bnx2_read_phy(bp, 0x15, &phy2);
4238 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4239 bmcr |= BMCR_ANENABLE;
4240 bnx2_write_phy(bp, MII_BMCR, bmcr);
4242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4245 bp->current_interval = bp->timer_interval;
4247 spin_unlock(&bp->phy_lock);
4251 bnx2_5708_serdes_timer(struct bnx2 *bp)
4253 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4254 bp->serdes_an_pending = 0;
4258 spin_lock(&bp->phy_lock);
4259 if (bp->serdes_an_pending)
4260 bp->serdes_an_pending--;
4261 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4264 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4266 if (bmcr & BMCR_ANENABLE) {
4267 bmcr &= ~BMCR_ANENABLE;
4268 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4269 bnx2_write_phy(bp, MII_BMCR, bmcr);
4270 bp->current_interval = SERDES_FORCED_TIMEOUT;
4272 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4273 bmcr |= BMCR_ANENABLE;
4274 bnx2_write_phy(bp, MII_BMCR, bmcr);
4275 bp->serdes_an_pending = 2;
4276 bp->current_interval = bp->timer_interval;
4280 bp->current_interval = bp->timer_interval;
4282 spin_unlock(&bp->phy_lock);
4286 bnx2_timer(unsigned long data)
4288 struct bnx2 *bp = (struct bnx2 *) data;
4291 if (!netif_running(bp->dev))
4294 if (atomic_read(&bp->intr_sem) != 0)
4295 goto bnx2_restart_timer;
4297 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4298 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4300 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4302 if (bp->phy_flags & PHY_SERDES_FLAG) {
4303 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4304 bnx2_5706_serdes_timer(bp);
4305 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4306 bnx2_5708_serdes_timer(bp);
4310 mod_timer(&bp->timer, jiffies + bp->current_interval);
4313 /* Called with rtnl_lock */
4315 bnx2_open(struct net_device *dev)
4317 struct bnx2 *bp = netdev_priv(dev);
4320 bnx2_set_power_state(bp, PCI_D0);
4321 bnx2_disable_int(bp);
4323 rc = bnx2_alloc_mem(bp);
4327 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4328 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4331 if (pci_enable_msi(bp->pdev) == 0) {
4332 bp->flags |= USING_MSI_FLAG;
4333 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4337 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4338 IRQF_SHARED, dev->name, dev);
4342 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4350 rc = bnx2_init_nic(bp);
4353 free_irq(bp->pdev->irq, dev);
4354 if (bp->flags & USING_MSI_FLAG) {
4355 pci_disable_msi(bp->pdev);
4356 bp->flags &= ~USING_MSI_FLAG;
4363 mod_timer(&bp->timer, jiffies + bp->current_interval);
4365 atomic_set(&bp->intr_sem, 0);
4367 bnx2_enable_int(bp);
4369 if (bp->flags & USING_MSI_FLAG) {
4370 /* Test MSI to make sure it is working
4371 * If MSI test fails, go back to INTx mode
4373 if (bnx2_test_intr(bp) != 0) {
4374 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4375 " using MSI, switching to INTx mode. Please"
4376 " report this failure to the PCI maintainer"
4377 " and include system chipset information.\n",
4380 bnx2_disable_int(bp);
4381 free_irq(bp->pdev->irq, dev);
4382 pci_disable_msi(bp->pdev);
4383 bp->flags &= ~USING_MSI_FLAG;
4385 rc = bnx2_init_nic(bp);
4388 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4389 IRQF_SHARED, dev->name, dev);
4394 del_timer_sync(&bp->timer);
4397 bnx2_enable_int(bp);
4400 if (bp->flags & USING_MSI_FLAG) {
4401 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4404 netif_start_queue(dev);
4410 bnx2_reset_task(void *data)
4412 struct bnx2 *bp = data;
4414 if (!netif_running(bp->dev))
4417 bp->in_reset_task = 1;
4418 bnx2_netif_stop(bp);
4422 atomic_set(&bp->intr_sem, 1);
4423 bnx2_netif_start(bp);
4424 bp->in_reset_task = 0;
4428 bnx2_tx_timeout(struct net_device *dev)
4430 struct bnx2 *bp = netdev_priv(dev);
4432 /* This allows the netif to be shutdown gracefully before resetting */
4433 schedule_work(&bp->reset_task);
4437 /* Called with rtnl_lock */
4439 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4441 struct bnx2 *bp = netdev_priv(dev);
4443 bnx2_netif_stop(bp);
4446 bnx2_set_rx_mode(dev);
4448 bnx2_netif_start(bp);
4451 /* Called with rtnl_lock */
4453 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4455 struct bnx2 *bp = netdev_priv(dev);
4457 bnx2_netif_stop(bp);
4460 bp->vlgrp->vlan_devices[vid] = NULL;
4461 bnx2_set_rx_mode(dev);
4463 bnx2_netif_start(bp);
4467 /* Called with netif_tx_lock.
4468 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4469 * netif_wake_queue().
4472 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4474 struct bnx2 *bp = netdev_priv(dev);
4477 struct sw_bd *tx_buf;
4478 u32 len, vlan_tag_flags, last_frag, mss;
4479 u16 prod, ring_prod;
4482 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4483 netif_stop_queue(dev);
4484 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4487 return NETDEV_TX_BUSY;
4489 len = skb_headlen(skb);
4491 ring_prod = TX_RING_IDX(prod);
4494 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4495 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4498 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4500 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4503 if ((mss = skb_shinfo(skb)->gso_size) &&
4504 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4505 u32 tcp_opt_len, ip_tcp_len;
4507 if (skb_header_cloned(skb) &&
4508 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4510 return NETDEV_TX_OK;
4513 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4514 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4517 if (skb->h.th->doff > 5) {
4518 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4520 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4522 skb->nh.iph->check = 0;
4523 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4525 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4529 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4530 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4531 (tcp_opt_len >> 2)) << 8;
4540 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4542 tx_buf = &bp->tx_buf_ring[ring_prod];
4544 pci_unmap_addr_set(tx_buf, mapping, mapping);
4546 txbd = &bp->tx_desc_ring[ring_prod];
4548 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4549 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4550 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4551 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4553 last_frag = skb_shinfo(skb)->nr_frags;
4555 for (i = 0; i < last_frag; i++) {
4556 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4558 prod = NEXT_TX_BD(prod);
4559 ring_prod = TX_RING_IDX(prod);
4560 txbd = &bp->tx_desc_ring[ring_prod];
4563 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4564 len, PCI_DMA_TODEVICE);
4565 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4568 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4569 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4570 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4571 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4574 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4576 prod = NEXT_TX_BD(prod);
4577 bp->tx_prod_bseq += skb->len;
4579 REG_WR16(bp, bp->tx_bidx_addr, prod);
4580 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4585 dev->trans_start = jiffies;
4587 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4588 netif_stop_queue(dev);
4589 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4590 netif_wake_queue(dev);
4593 return NETDEV_TX_OK;
4596 /* Called with rtnl_lock */
4598 bnx2_close(struct net_device *dev)
4600 struct bnx2 *bp = netdev_priv(dev);
4603 /* Calling flush_scheduled_work() may deadlock because
4604 * linkwatch_event() may be on the workqueue and it will try to get
4605 * the rtnl_lock which we are holding.
4607 while (bp->in_reset_task)
4610 bnx2_netif_stop(bp);
4611 del_timer_sync(&bp->timer);
4612 if (bp->flags & NO_WOL_FLAG)
4613 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4615 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4617 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4618 bnx2_reset_chip(bp, reset_code);
4619 free_irq(bp->pdev->irq, dev);
4620 if (bp->flags & USING_MSI_FLAG) {
4621 pci_disable_msi(bp->pdev);
4622 bp->flags &= ~USING_MSI_FLAG;
4627 netif_carrier_off(bp->dev);
4628 bnx2_set_power_state(bp, PCI_D3hot);
4632 #define GET_NET_STATS64(ctr) \
4633 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4634 (unsigned long) (ctr##_lo)
4636 #define GET_NET_STATS32(ctr) \
4639 #if (BITS_PER_LONG == 64)
4640 #define GET_NET_STATS GET_NET_STATS64
4642 #define GET_NET_STATS GET_NET_STATS32
4645 static struct net_device_stats *
4646 bnx2_get_stats(struct net_device *dev)
4648 struct bnx2 *bp = netdev_priv(dev);
4649 struct statistics_block *stats_blk = bp->stats_blk;
4650 struct net_device_stats *net_stats = &bp->net_stats;
4652 if (bp->stats_blk == NULL) {
4655 net_stats->rx_packets =
4656 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4657 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4658 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4660 net_stats->tx_packets =
4661 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4662 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4663 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4665 net_stats->rx_bytes =
4666 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4668 net_stats->tx_bytes =
4669 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4671 net_stats->multicast =
4672 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4674 net_stats->collisions =
4675 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4677 net_stats->rx_length_errors =
4678 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4679 stats_blk->stat_EtherStatsOverrsizePkts);
4681 net_stats->rx_over_errors =
4682 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4684 net_stats->rx_frame_errors =
4685 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4687 net_stats->rx_crc_errors =
4688 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4690 net_stats->rx_errors = net_stats->rx_length_errors +
4691 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4692 net_stats->rx_crc_errors;
4694 net_stats->tx_aborted_errors =
4695 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4696 stats_blk->stat_Dot3StatsLateCollisions);
4698 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4699 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4700 net_stats->tx_carrier_errors = 0;
4702 net_stats->tx_carrier_errors =
4704 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4707 net_stats->tx_errors =
4709 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4711 net_stats->tx_aborted_errors +
4712 net_stats->tx_carrier_errors;
4714 net_stats->rx_missed_errors =
4715 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4716 stats_blk->stat_FwRxDrop);
4721 /* All ethtool functions called with rtnl_lock */
4724 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4726 struct bnx2 *bp = netdev_priv(dev);
4728 cmd->supported = SUPPORTED_Autoneg;
4729 if (bp->phy_flags & PHY_SERDES_FLAG) {
4730 cmd->supported |= SUPPORTED_1000baseT_Full |
4733 cmd->port = PORT_FIBRE;
4736 cmd->supported |= SUPPORTED_10baseT_Half |
4737 SUPPORTED_10baseT_Full |
4738 SUPPORTED_100baseT_Half |
4739 SUPPORTED_100baseT_Full |
4740 SUPPORTED_1000baseT_Full |
4743 cmd->port = PORT_TP;
4746 cmd->advertising = bp->advertising;
4748 if (bp->autoneg & AUTONEG_SPEED) {
4749 cmd->autoneg = AUTONEG_ENABLE;
4752 cmd->autoneg = AUTONEG_DISABLE;
4755 if (netif_carrier_ok(dev)) {
4756 cmd->speed = bp->line_speed;
4757 cmd->duplex = bp->duplex;
4764 cmd->transceiver = XCVR_INTERNAL;
4765 cmd->phy_address = bp->phy_addr;
4771 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4773 struct bnx2 *bp = netdev_priv(dev);
4774 u8 autoneg = bp->autoneg;
4775 u8 req_duplex = bp->req_duplex;
4776 u16 req_line_speed = bp->req_line_speed;
4777 u32 advertising = bp->advertising;
4779 if (cmd->autoneg == AUTONEG_ENABLE) {
4780 autoneg |= AUTONEG_SPEED;
4782 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4784 /* allow advertising 1 speed */
4785 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4786 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4787 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4788 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4790 if (bp->phy_flags & PHY_SERDES_FLAG)
4793 advertising = cmd->advertising;
4796 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4797 advertising = cmd->advertising;
4799 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4803 if (bp->phy_flags & PHY_SERDES_FLAG) {
4804 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4807 advertising = ETHTOOL_ALL_COPPER_SPEED;
4810 advertising |= ADVERTISED_Autoneg;
4813 if (bp->phy_flags & PHY_SERDES_FLAG) {
4814 if ((cmd->speed != SPEED_1000 &&
4815 cmd->speed != SPEED_2500) ||
4816 (cmd->duplex != DUPLEX_FULL))
4819 if (cmd->speed == SPEED_2500 &&
4820 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4823 else if (cmd->speed == SPEED_1000) {
4826 autoneg &= ~AUTONEG_SPEED;
4827 req_line_speed = cmd->speed;
4828 req_duplex = cmd->duplex;
4832 bp->autoneg = autoneg;
4833 bp->advertising = advertising;
4834 bp->req_line_speed = req_line_speed;
4835 bp->req_duplex = req_duplex;
4837 spin_lock_bh(&bp->phy_lock);
4841 spin_unlock_bh(&bp->phy_lock);
4847 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4849 struct bnx2 *bp = netdev_priv(dev);
4851 strcpy(info->driver, DRV_MODULE_NAME);
4852 strcpy(info->version, DRV_MODULE_VERSION);
4853 strcpy(info->bus_info, pci_name(bp->pdev));
4854 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4855 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4856 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4857 info->fw_version[1] = info->fw_version[3] = '.';
4858 info->fw_version[5] = 0;
4861 #define BNX2_REGDUMP_LEN (32 * 1024)
4864 bnx2_get_regs_len(struct net_device *dev)
4866 return BNX2_REGDUMP_LEN;
4870 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4872 u32 *p = _p, i, offset;
4874 struct bnx2 *bp = netdev_priv(dev);
4875 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4876 0x0800, 0x0880, 0x0c00, 0x0c10,
4877 0x0c30, 0x0d08, 0x1000, 0x101c,
4878 0x1040, 0x1048, 0x1080, 0x10a4,
4879 0x1400, 0x1490, 0x1498, 0x14f0,
4880 0x1500, 0x155c, 0x1580, 0x15dc,
4881 0x1600, 0x1658, 0x1680, 0x16d8,
4882 0x1800, 0x1820, 0x1840, 0x1854,
4883 0x1880, 0x1894, 0x1900, 0x1984,
4884 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4885 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4886 0x2000, 0x2030, 0x23c0, 0x2400,
4887 0x2800, 0x2820, 0x2830, 0x2850,
4888 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4889 0x3c00, 0x3c94, 0x4000, 0x4010,
4890 0x4080, 0x4090, 0x43c0, 0x4458,
4891 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4892 0x4fc0, 0x5010, 0x53c0, 0x5444,
4893 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4894 0x5fc0, 0x6000, 0x6400, 0x6428,
4895 0x6800, 0x6848, 0x684c, 0x6860,
4896 0x6888, 0x6910, 0x8000 };
4900 memset(p, 0, BNX2_REGDUMP_LEN);
4902 if (!netif_running(bp->dev))
4906 offset = reg_boundaries[0];
4908 while (offset < BNX2_REGDUMP_LEN) {
4909 *p++ = REG_RD(bp, offset);
4911 if (offset == reg_boundaries[i + 1]) {
4912 offset = reg_boundaries[i + 2];
4913 p = (u32 *) (orig_p + offset);
4920 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4922 struct bnx2 *bp = netdev_priv(dev);
4924 if (bp->flags & NO_WOL_FLAG) {
4929 wol->supported = WAKE_MAGIC;
4931 wol->wolopts = WAKE_MAGIC;
4935 memset(&wol->sopass, 0, sizeof(wol->sopass));
4939 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4941 struct bnx2 *bp = netdev_priv(dev);
4943 if (wol->wolopts & ~WAKE_MAGIC)
4946 if (wol->wolopts & WAKE_MAGIC) {
4947 if (bp->flags & NO_WOL_FLAG)
4959 bnx2_nway_reset(struct net_device *dev)
4961 struct bnx2 *bp = netdev_priv(dev);
4964 if (!(bp->autoneg & AUTONEG_SPEED)) {
4968 spin_lock_bh(&bp->phy_lock);
4970 /* Force a link down visible on the other side */
4971 if (bp->phy_flags & PHY_SERDES_FLAG) {
4972 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4973 spin_unlock_bh(&bp->phy_lock);
4977 spin_lock_bh(&bp->phy_lock);
4979 bp->current_interval = SERDES_AN_TIMEOUT;
4980 bp->serdes_an_pending = 1;
4981 mod_timer(&bp->timer, jiffies + bp->current_interval);
4984 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4985 bmcr &= ~BMCR_LOOPBACK;
4986 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4988 spin_unlock_bh(&bp->phy_lock);
4994 bnx2_get_eeprom_len(struct net_device *dev)
4996 struct bnx2 *bp = netdev_priv(dev);
4998 if (bp->flash_info == NULL)
5001 return (int) bp->flash_size;
5005 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5008 struct bnx2 *bp = netdev_priv(dev);
5011 /* parameters already validated in ethtool_get_eeprom */
5013 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5019 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5022 struct bnx2 *bp = netdev_priv(dev);
5025 /* parameters already validated in ethtool_set_eeprom */
5027 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5033 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5035 struct bnx2 *bp = netdev_priv(dev);
5037 memset(coal, 0, sizeof(struct ethtool_coalesce));
5039 coal->rx_coalesce_usecs = bp->rx_ticks;
5040 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5041 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5042 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5044 coal->tx_coalesce_usecs = bp->tx_ticks;
5045 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5046 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5047 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5049 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5055 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5057 struct bnx2 *bp = netdev_priv(dev);
5059 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5060 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5062 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5063 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5065 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5066 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5068 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5069 if (bp->rx_quick_cons_trip_int > 0xff)
5070 bp->rx_quick_cons_trip_int = 0xff;
5072 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5073 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5075 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5076 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5078 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5079 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5081 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5082 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5085 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5086 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5087 bp->stats_ticks &= 0xffff00;
5089 if (netif_running(bp->dev)) {
5090 bnx2_netif_stop(bp);
5092 bnx2_netif_start(bp);
5099 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5101 struct bnx2 *bp = netdev_priv(dev);
5103 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5104 ering->rx_mini_max_pending = 0;
5105 ering->rx_jumbo_max_pending = 0;
5107 ering->rx_pending = bp->rx_ring_size;
5108 ering->rx_mini_pending = 0;
5109 ering->rx_jumbo_pending = 0;
5111 ering->tx_max_pending = MAX_TX_DESC_CNT;
5112 ering->tx_pending = bp->tx_ring_size;
5116 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5118 struct bnx2 *bp = netdev_priv(dev);
5120 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5121 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5122 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5126 if (netif_running(bp->dev)) {
5127 bnx2_netif_stop(bp);
5128 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5133 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5134 bp->tx_ring_size = ering->tx_pending;
5136 if (netif_running(bp->dev)) {
5139 rc = bnx2_alloc_mem(bp);
5143 bnx2_netif_start(bp);
5150 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5152 struct bnx2 *bp = netdev_priv(dev);
5154 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5155 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5156 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5160 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5162 struct bnx2 *bp = netdev_priv(dev);
5164 bp->req_flow_ctrl = 0;
5165 if (epause->rx_pause)
5166 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5167 if (epause->tx_pause)
5168 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5170 if (epause->autoneg) {
5171 bp->autoneg |= AUTONEG_FLOW_CTRL;
5174 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5177 spin_lock_bh(&bp->phy_lock);
5181 spin_unlock_bh(&bp->phy_lock);
5187 bnx2_get_rx_csum(struct net_device *dev)
5189 struct bnx2 *bp = netdev_priv(dev);
5195 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5197 struct bnx2 *bp = netdev_priv(dev);
5204 bnx2_set_tso(struct net_device *dev, u32 data)
5207 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5209 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5213 #define BNX2_NUM_STATS 46
5216 char string[ETH_GSTRING_LEN];
5217 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5219 { "rx_error_bytes" },
5221 { "tx_error_bytes" },
5222 { "rx_ucast_packets" },
5223 { "rx_mcast_packets" },
5224 { "rx_bcast_packets" },
5225 { "tx_ucast_packets" },
5226 { "tx_mcast_packets" },
5227 { "tx_bcast_packets" },
5228 { "tx_mac_errors" },
5229 { "tx_carrier_errors" },
5230 { "rx_crc_errors" },
5231 { "rx_align_errors" },
5232 { "tx_single_collisions" },
5233 { "tx_multi_collisions" },
5235 { "tx_excess_collisions" },
5236 { "tx_late_collisions" },
5237 { "tx_total_collisions" },
5240 { "rx_undersize_packets" },
5241 { "rx_oversize_packets" },
5242 { "rx_64_byte_packets" },
5243 { "rx_65_to_127_byte_packets" },
5244 { "rx_128_to_255_byte_packets" },
5245 { "rx_256_to_511_byte_packets" },
5246 { "rx_512_to_1023_byte_packets" },
5247 { "rx_1024_to_1522_byte_packets" },
5248 { "rx_1523_to_9022_byte_packets" },
5249 { "tx_64_byte_packets" },
5250 { "tx_65_to_127_byte_packets" },
5251 { "tx_128_to_255_byte_packets" },
5252 { "tx_256_to_511_byte_packets" },
5253 { "tx_512_to_1023_byte_packets" },
5254 { "tx_1024_to_1522_byte_packets" },
5255 { "tx_1523_to_9022_byte_packets" },
5256 { "rx_xon_frames" },
5257 { "rx_xoff_frames" },
5258 { "tx_xon_frames" },
5259 { "tx_xoff_frames" },
5260 { "rx_mac_ctrl_frames" },
5261 { "rx_filtered_packets" },
5263 { "rx_fw_discards" },
5266 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5268 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5269 STATS_OFFSET32(stat_IfHCInOctets_hi),
5270 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5271 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5272 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5273 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5274 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5275 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5276 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5277 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5278 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5279 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5280 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5281 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5282 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5283 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5284 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5285 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5286 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5287 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5288 STATS_OFFSET32(stat_EtherStatsCollisions),
5289 STATS_OFFSET32(stat_EtherStatsFragments),
5290 STATS_OFFSET32(stat_EtherStatsJabbers),
5291 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5292 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5293 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5294 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5295 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5296 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5297 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5298 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5300 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5307 STATS_OFFSET32(stat_XonPauseFramesReceived),
5308 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5309 STATS_OFFSET32(stat_OutXonSent),
5310 STATS_OFFSET32(stat_OutXoffSent),
5311 STATS_OFFSET32(stat_MacControlFramesReceived),
5312 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5313 STATS_OFFSET32(stat_IfInMBUFDiscards),
5314 STATS_OFFSET32(stat_FwRxDrop),
5317 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5318 * skipped because of errata.
5320 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5321 8,0,8,8,8,8,8,8,8,8,
5322 4,0,4,4,4,4,4,4,4,4,
5323 4,4,4,4,4,4,4,4,4,4,
5324 4,4,4,4,4,4,4,4,4,4,
5328 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5329 8,0,8,8,8,8,8,8,8,8,
5330 4,4,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,4,4,4,4,
5332 4,4,4,4,4,4,4,4,4,4,
5336 #define BNX2_NUM_TESTS 6
5339 char string[ETH_GSTRING_LEN];
5340 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5341 { "register_test (offline)" },
5342 { "memory_test (offline)" },
5343 { "loopback_test (offline)" },
5344 { "nvram_test (online)" },
5345 { "interrupt_test (online)" },
5346 { "link_test (online)" },
5350 bnx2_self_test_count(struct net_device *dev)
5352 return BNX2_NUM_TESTS;
5356 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5358 struct bnx2 *bp = netdev_priv(dev);
5360 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5361 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5364 bnx2_netif_stop(bp);
5365 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5368 if (bnx2_test_registers(bp) != 0) {
5370 etest->flags |= ETH_TEST_FL_FAILED;
5372 if (bnx2_test_memory(bp) != 0) {
5374 etest->flags |= ETH_TEST_FL_FAILED;
5376 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5377 etest->flags |= ETH_TEST_FL_FAILED;
5379 if (!netif_running(bp->dev)) {
5380 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5384 bnx2_netif_start(bp);
5387 /* wait for link up */
5388 for (i = 0; i < 7; i++) {
5391 msleep_interruptible(1000);
5395 if (bnx2_test_nvram(bp) != 0) {
5397 etest->flags |= ETH_TEST_FL_FAILED;
5399 if (bnx2_test_intr(bp) != 0) {
5401 etest->flags |= ETH_TEST_FL_FAILED;
5404 if (bnx2_test_link(bp) != 0) {
5406 etest->flags |= ETH_TEST_FL_FAILED;
5412 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5414 switch (stringset) {
5416 memcpy(buf, bnx2_stats_str_arr,
5417 sizeof(bnx2_stats_str_arr));
5420 memcpy(buf, bnx2_tests_str_arr,
5421 sizeof(bnx2_tests_str_arr));
5427 bnx2_get_stats_count(struct net_device *dev)
5429 return BNX2_NUM_STATS;
5433 bnx2_get_ethtool_stats(struct net_device *dev,
5434 struct ethtool_stats *stats, u64 *buf)
5436 struct bnx2 *bp = netdev_priv(dev);
5438 u32 *hw_stats = (u32 *) bp->stats_blk;
5439 u8 *stats_len_arr = NULL;
5441 if (hw_stats == NULL) {
5442 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5446 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5447 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5448 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5449 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5450 stats_len_arr = bnx2_5706_stats_len_arr;
5452 stats_len_arr = bnx2_5708_stats_len_arr;
5454 for (i = 0; i < BNX2_NUM_STATS; i++) {
5455 if (stats_len_arr[i] == 0) {
5456 /* skip this counter */
5460 if (stats_len_arr[i] == 4) {
5461 /* 4-byte counter */
5463 *(hw_stats + bnx2_stats_offset_arr[i]);
5466 /* 8-byte counter */
5467 buf[i] = (((u64) *(hw_stats +
5468 bnx2_stats_offset_arr[i])) << 32) +
5469 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5474 bnx2_phys_id(struct net_device *dev, u32 data)
5476 struct bnx2 *bp = netdev_priv(dev);
5483 save = REG_RD(bp, BNX2_MISC_CFG);
5484 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5486 for (i = 0; i < (data * 2); i++) {
5488 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5491 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5492 BNX2_EMAC_LED_1000MB_OVERRIDE |
5493 BNX2_EMAC_LED_100MB_OVERRIDE |
5494 BNX2_EMAC_LED_10MB_OVERRIDE |
5495 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5496 BNX2_EMAC_LED_TRAFFIC);
5498 msleep_interruptible(500);
5499 if (signal_pending(current))
5502 REG_WR(bp, BNX2_EMAC_LED, 0);
5503 REG_WR(bp, BNX2_MISC_CFG, save);
5507 static const struct ethtool_ops bnx2_ethtool_ops = {
5508 .get_settings = bnx2_get_settings,
5509 .set_settings = bnx2_set_settings,
5510 .get_drvinfo = bnx2_get_drvinfo,
5511 .get_regs_len = bnx2_get_regs_len,
5512 .get_regs = bnx2_get_regs,
5513 .get_wol = bnx2_get_wol,
5514 .set_wol = bnx2_set_wol,
5515 .nway_reset = bnx2_nway_reset,
5516 .get_link = ethtool_op_get_link,
5517 .get_eeprom_len = bnx2_get_eeprom_len,
5518 .get_eeprom = bnx2_get_eeprom,
5519 .set_eeprom = bnx2_set_eeprom,
5520 .get_coalesce = bnx2_get_coalesce,
5521 .set_coalesce = bnx2_set_coalesce,
5522 .get_ringparam = bnx2_get_ringparam,
5523 .set_ringparam = bnx2_set_ringparam,
5524 .get_pauseparam = bnx2_get_pauseparam,
5525 .set_pauseparam = bnx2_set_pauseparam,
5526 .get_rx_csum = bnx2_get_rx_csum,
5527 .set_rx_csum = bnx2_set_rx_csum,
5528 .get_tx_csum = ethtool_op_get_tx_csum,
5529 .set_tx_csum = ethtool_op_set_tx_csum,
5530 .get_sg = ethtool_op_get_sg,
5531 .set_sg = ethtool_op_set_sg,
5533 .get_tso = ethtool_op_get_tso,
5534 .set_tso = bnx2_set_tso,
5536 .self_test_count = bnx2_self_test_count,
5537 .self_test = bnx2_self_test,
5538 .get_strings = bnx2_get_strings,
5539 .phys_id = bnx2_phys_id,
5540 .get_stats_count = bnx2_get_stats_count,
5541 .get_ethtool_stats = bnx2_get_ethtool_stats,
5542 .get_perm_addr = ethtool_op_get_perm_addr,
5545 /* Called with rtnl_lock */
5547 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5549 struct mii_ioctl_data *data = if_mii(ifr);
5550 struct bnx2 *bp = netdev_priv(dev);
5555 data->phy_id = bp->phy_addr;
5561 spin_lock_bh(&bp->phy_lock);
5562 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5563 spin_unlock_bh(&bp->phy_lock);
5565 data->val_out = mii_regval;
5571 if (!capable(CAP_NET_ADMIN))
5574 spin_lock_bh(&bp->phy_lock);
5575 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5576 spin_unlock_bh(&bp->phy_lock);
5587 /* Called with rtnl_lock */
5589 bnx2_change_mac_addr(struct net_device *dev, void *p)
5591 struct sockaddr *addr = p;
5592 struct bnx2 *bp = netdev_priv(dev);
5594 if (!is_valid_ether_addr(addr->sa_data))
5597 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5598 if (netif_running(dev))
5599 bnx2_set_mac_addr(bp);
5604 /* Called with rtnl_lock */
5606 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5608 struct bnx2 *bp = netdev_priv(dev);
5610 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5611 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5615 if (netif_running(dev)) {
5616 bnx2_netif_stop(bp);
5620 bnx2_netif_start(bp);
5625 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5627 poll_bnx2(struct net_device *dev)
5629 struct bnx2 *bp = netdev_priv(dev);
5631 disable_irq(bp->pdev->irq);
5632 bnx2_interrupt(bp->pdev->irq, dev);
5633 enable_irq(bp->pdev->irq);
5637 static int __devinit
5638 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5641 unsigned long mem_len;
5645 SET_MODULE_OWNER(dev);
5646 SET_NETDEV_DEV(dev, &pdev->dev);
5647 bp = netdev_priv(dev);
5652 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5653 rc = pci_enable_device(pdev);
5655 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5659 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5661 "Cannot find PCI device base address, aborting.\n");
5663 goto err_out_disable;
5666 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5668 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5669 goto err_out_disable;
5672 pci_set_master(pdev);
5674 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5675 if (bp->pm_cap == 0) {
5677 "Cannot find power management capability, aborting.\n");
5679 goto err_out_release;
5682 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5683 bp->flags |= USING_DAC_FLAG;
5684 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5686 "pci_set_consistent_dma_mask failed, aborting.\n");
5688 goto err_out_release;
5691 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5692 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5694 goto err_out_release;
5700 spin_lock_init(&bp->phy_lock);
5701 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5703 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5704 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5705 dev->mem_end = dev->mem_start + mem_len;
5706 dev->irq = pdev->irq;
5708 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5711 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5713 goto err_out_release;
5716 /* Configure byte swap and enable write to the reg_window registers.
5717 * Rely on CPU to do target byte swapping on big endian systems
5718 * The chip's target access swapping will not swap all accesses
5720 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5721 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5722 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5724 bnx2_set_power_state(bp, PCI_D0);
5726 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5728 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5729 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5730 if (bp->pcix_cap == 0) {
5732 "Cannot find PCIX capability, aborting.\n");
5738 /* Get bus information. */
5739 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5740 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5743 bp->flags |= PCIX_FLAG;
5745 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5747 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5749 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5750 bp->bus_speed_mhz = 133;
5753 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5754 bp->bus_speed_mhz = 100;
5757 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5758 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5759 bp->bus_speed_mhz = 66;
5762 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5763 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5764 bp->bus_speed_mhz = 50;
5767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5768 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5770 bp->bus_speed_mhz = 33;
5775 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5776 bp->bus_speed_mhz = 66;
5778 bp->bus_speed_mhz = 33;
5781 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5782 bp->flags |= PCI_32BIT_FLAG;
5784 /* 5706A0 may falsely detect SERR and PERR. */
5785 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5786 reg = REG_RD(bp, PCI_COMMAND);
5787 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5788 REG_WR(bp, PCI_COMMAND, reg);
5790 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5791 !(bp->flags & PCIX_FLAG)) {
5794 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5798 bnx2_init_nvram(bp);
5800 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5802 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5803 BNX2_SHM_HDR_SIGNATURE_SIG)
5804 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5806 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5808 /* Get the permanent MAC address. First we need to make sure the
5809 * firmware is actually running.
5811 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5813 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5814 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5815 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5820 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5822 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5823 bp->mac_addr[0] = (u8) (reg >> 8);
5824 bp->mac_addr[1] = (u8) reg;
5826 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5827 bp->mac_addr[2] = (u8) (reg >> 24);
5828 bp->mac_addr[3] = (u8) (reg >> 16);
5829 bp->mac_addr[4] = (u8) (reg >> 8);
5830 bp->mac_addr[5] = (u8) reg;
5832 bp->tx_ring_size = MAX_TX_DESC_CNT;
5833 bnx2_set_rx_ring_size(bp, 255);
5837 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5839 bp->tx_quick_cons_trip_int = 20;
5840 bp->tx_quick_cons_trip = 20;
5841 bp->tx_ticks_int = 80;
5844 bp->rx_quick_cons_trip_int = 6;
5845 bp->rx_quick_cons_trip = 6;
5846 bp->rx_ticks_int = 18;
5849 bp->stats_ticks = 1000000 & 0xffff00;
5851 bp->timer_interval = HZ;
5852 bp->current_interval = HZ;
5856 /* Disable WOL support if we are running on a SERDES chip. */
5857 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5858 bp->phy_flags |= PHY_SERDES_FLAG;
5859 bp->flags |= NO_WOL_FLAG;
5860 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5862 reg = REG_RD_IND(bp, bp->shmem_base +
5863 BNX2_SHARED_HW_CFG_CONFIG);
5864 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5865 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5869 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5870 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5871 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5872 bp->flags |= NO_WOL_FLAG;
5874 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5875 bp->tx_quick_cons_trip_int =
5876 bp->tx_quick_cons_trip;
5877 bp->tx_ticks_int = bp->tx_ticks;
5878 bp->rx_quick_cons_trip_int =
5879 bp->rx_quick_cons_trip;
5880 bp->rx_ticks_int = bp->rx_ticks;
5881 bp->comp_prod_trip_int = bp->comp_prod_trip;
5882 bp->com_ticks_int = bp->com_ticks;
5883 bp->cmd_ticks_int = bp->cmd_ticks;
5886 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5888 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5889 * with byte enables disabled on the unused 32-bit word. This is legal
5890 * but causes problems on the AMD 8132 which will eventually stop
5891 * responding after a while.
5893 * AMD believes this incompatibility is unique to the 5706, and
5894 * prefers to locally disable MSI rather than globally disabling it
5895 * using pci_msi_quirk.
5897 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5898 struct pci_dev *amd_8132 = NULL;
5900 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5901 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5905 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5906 if (rev >= 0x10 && rev <= 0x13) {
5908 pci_dev_put(amd_8132);
5914 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5915 bp->req_line_speed = 0;
5916 if (bp->phy_flags & PHY_SERDES_FLAG) {
5917 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5919 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5920 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5921 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5923 bp->req_line_speed = bp->line_speed = SPEED_1000;
5924 bp->req_duplex = DUPLEX_FULL;
5928 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5931 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5933 init_timer(&bp->timer);
5934 bp->timer.expires = RUN_AT(bp->timer_interval);
5935 bp->timer.data = (unsigned long) bp;
5936 bp->timer.function = bnx2_timer;
5942 iounmap(bp->regview);
5947 pci_release_regions(pdev);
5950 pci_disable_device(pdev);
5951 pci_set_drvdata(pdev, NULL);
5957 static int __devinit
5958 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5960 static int version_printed = 0;
5961 struct net_device *dev = NULL;
5965 if (version_printed++ == 0)
5966 printk(KERN_INFO "%s", version);
5968 /* dev zeroed in init_etherdev */
5969 dev = alloc_etherdev(sizeof(*bp));
5974 rc = bnx2_init_board(pdev, dev);
5980 dev->open = bnx2_open;
5981 dev->hard_start_xmit = bnx2_start_xmit;
5982 dev->stop = bnx2_close;
5983 dev->get_stats = bnx2_get_stats;
5984 dev->set_multicast_list = bnx2_set_rx_mode;
5985 dev->do_ioctl = bnx2_ioctl;
5986 dev->set_mac_address = bnx2_change_mac_addr;
5987 dev->change_mtu = bnx2_change_mtu;
5988 dev->tx_timeout = bnx2_tx_timeout;
5989 dev->watchdog_timeo = TX_TIMEOUT;
5991 dev->vlan_rx_register = bnx2_vlan_rx_register;
5992 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5994 dev->poll = bnx2_poll;
5995 dev->ethtool_ops = &bnx2_ethtool_ops;
5998 bp = netdev_priv(dev);
6000 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6001 dev->poll_controller = poll_bnx2;
6004 if ((rc = register_netdev(dev))) {
6005 dev_err(&pdev->dev, "Cannot register net device\n");
6007 iounmap(bp->regview);
6008 pci_release_regions(pdev);
6009 pci_disable_device(pdev);
6010 pci_set_drvdata(pdev, NULL);
6015 pci_set_drvdata(pdev, dev);
6017 memcpy(dev->dev_addr, bp->mac_addr, 6);
6018 memcpy(dev->perm_addr, bp->mac_addr, 6);
6019 bp->name = board_info[ent->driver_data].name,
6020 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6024 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6025 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6026 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6027 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6032 printk("node addr ");
6033 for (i = 0; i < 6; i++)
6034 printk("%2.2x", dev->dev_addr[i]);
6037 dev->features |= NETIF_F_SG;
6038 if (bp->flags & USING_DAC_FLAG)
6039 dev->features |= NETIF_F_HIGHDMA;
6040 dev->features |= NETIF_F_IP_CSUM;
6042 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6045 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6048 netif_carrier_off(bp->dev);
6053 static void __devexit
6054 bnx2_remove_one(struct pci_dev *pdev)
6056 struct net_device *dev = pci_get_drvdata(pdev);
6057 struct bnx2 *bp = netdev_priv(dev);
6059 flush_scheduled_work();
6061 unregister_netdev(dev);
6064 iounmap(bp->regview);
6067 pci_release_regions(pdev);
6068 pci_disable_device(pdev);
6069 pci_set_drvdata(pdev, NULL);
6073 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6075 struct net_device *dev = pci_get_drvdata(pdev);
6076 struct bnx2 *bp = netdev_priv(dev);
6079 if (!netif_running(dev))
6082 flush_scheduled_work();
6083 bnx2_netif_stop(bp);
6084 netif_device_detach(dev);
6085 del_timer_sync(&bp->timer);
6086 if (bp->flags & NO_WOL_FLAG)
6087 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6089 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6091 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6092 bnx2_reset_chip(bp, reset_code);
6094 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6099 bnx2_resume(struct pci_dev *pdev)
6101 struct net_device *dev = pci_get_drvdata(pdev);
6102 struct bnx2 *bp = netdev_priv(dev);
6104 if (!netif_running(dev))
6107 bnx2_set_power_state(bp, PCI_D0);
6108 netif_device_attach(dev);
6110 bnx2_netif_start(bp);
6114 static struct pci_driver bnx2_pci_driver = {
6115 .name = DRV_MODULE_NAME,
6116 .id_table = bnx2_pci_tbl,
6117 .probe = bnx2_init_one,
6118 .remove = __devexit_p(bnx2_remove_one),
6119 .suspend = bnx2_suspend,
6120 .resume = bnx2_resume,
6123 static int __init bnx2_init(void)
6125 return pci_register_driver(&bnx2_pci_driver);
6128 static void __exit bnx2_cleanup(void)
6130 pci_unregister_driver(&bnx2_pci_driver);
6133 module_init(bnx2_init);
6134 module_exit(bnx2_cleanup);