1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.43"
60 #define DRV_MODULE_RELDATE "June 28, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
264 for (i = 0; i < 50; i++) {
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
321 for (i = 0; i < 50; i++) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
350 bnx2_disable_int(struct bnx2 *bp)
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
358 bnx2_enable_int(struct bnx2 *bp)
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
371 bnx2_disable_int_sync(struct bnx2 *bp)
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
379 bnx2_netif_stop(struct bnx2 *bp)
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2 *bp)
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
402 bnx2_free_mem(struct bnx2 *bp)
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
433 bnx2_alloc_mem(struct bnx2 *bp)
435 int i, status_blk_size;
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
439 if (bp->tx_buf_ring == NULL)
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
451 if (bp->rx_buf_ring == NULL)
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
477 memset(bp->status_blk, 0, bp->status_stats_size);
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
492 bnx2_report_fw_link(struct bnx2 *bp)
494 u32 fw_link_status = 0;
499 switch (bp->line_speed) {
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
548 bnx2_report_link(struct bnx2 *bp)
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
554 printk("%d Mbps ", bp->line_speed);
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
579 bnx2_report_fw_link(bp);
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
585 u32 local_adv, remote_adv;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
597 if (bp->duplex != DUPLEX_FULL) {
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
653 bp->flow_ctrl = FLOW_CTRL_TX;
659 bnx2_5708s_linkup(struct bnx2 *bp)
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
682 bp->duplex = DUPLEX_HALF;
688 bnx2_5706s_linkup(struct bnx2 *bp)
690 u32 bmcr, local_adv, remote_adv, common;
693 bp->line_speed = SPEED_1000;
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
700 bp->duplex = DUPLEX_HALF;
703 if (!(bmcr & BMCR_ANENABLE)) {
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
725 bnx2_copper_linkup(struct bnx2 *bp)
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
777 bp->line_speed = SPEED_10;
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
783 bp->duplex = DUPLEX_HALF;
791 bnx2_set_mac_link(struct bnx2 *bp)
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
809 switch (bp->line_speed) {
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
817 val |= BNX2_EMAC_MODE_PORT_MII;
820 val |= BNX2_EMAC_MODE_25G;
823 val |= BNX2_EMAC_MODE_PORT_GMII;
828 val |= BNX2_EMAC_MODE_PORT_GMII;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
858 bnx2_set_link(struct bnx2 *bp)
863 if (bp->loopback == MAC_LOOPBACK) {
868 link_up = bp->link_up;
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
881 bmsr &= ~BMSR_LSTATUS;
884 if (bmsr & BMSR_LSTATUS) {
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
894 bnx2_copper_linkup(bp);
896 bnx2_resolve_flow_ctrl(bp);
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 if (!(bmcr & BMCR_ANENABLE)) {
906 bnx2_write_phy(bp, MII_BMCR, bmcr |
910 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
914 if (bp->link_up != link_up) {
915 bnx2_report_link(bp);
918 bnx2_set_mac_link(bp);
924 bnx2_reset_phy(struct bnx2 *bp)
929 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931 #define PHY_RESET_MAX_WAIT 100
932 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
935 bnx2_read_phy(bp, MII_BMCR, ®);
936 if (!(reg & BMCR_RESET)) {
941 if (i == PHY_RESET_MAX_WAIT) {
948 bnx2_phy_get_pause_adv(struct bnx2 *bp)
952 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
953 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955 if (bp->phy_flags & PHY_SERDES_FLAG) {
956 adv = ADVERTISE_1000XPAUSE;
959 adv = ADVERTISE_PAUSE_CAP;
962 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
963 if (bp->phy_flags & PHY_SERDES_FLAG) {
964 adv = ADVERTISE_1000XPSE_ASYM;
967 adv = ADVERTISE_PAUSE_ASYM;
970 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
971 if (bp->phy_flags & PHY_SERDES_FLAG) {
972 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
975 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
982 bnx2_setup_serdes_phy(struct bnx2 *bp)
987 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 int force_link_down = 0;
991 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
992 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
993 if (up1 & BCM5708S_UP1_2G5) {
994 up1 &= ~BCM5708S_UP1_2G5;
995 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1000 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1003 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1004 new_bmcr = bmcr & ~BMCR_ANENABLE;
1005 new_bmcr |= BMCR_SPEED1000;
1006 if (bp->req_duplex == DUPLEX_FULL) {
1007 adv |= ADVERTISE_1000XFULL;
1008 new_bmcr |= BMCR_FULLDPLX;
1011 adv |= ADVERTISE_1000XHALF;
1012 new_bmcr &= ~BMCR_FULLDPLX;
1014 if ((new_bmcr != bmcr) || (force_link_down)) {
1015 /* Force a link down visible on the other side */
1017 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1018 ~(ADVERTISE_1000XFULL |
1019 ADVERTISE_1000XHALF));
1020 bnx2_write_phy(bp, MII_BMCR, bmcr |
1021 BMCR_ANRESTART | BMCR_ANENABLE);
1024 netif_carrier_off(bp->dev);
1025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1027 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1028 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1033 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1034 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039 if (bp->advertising & ADVERTISED_1000baseT_Full)
1040 new_adv |= ADVERTISE_1000XFULL;
1042 new_adv |= bnx2_phy_get_pause_adv(bp);
1044 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1045 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1047 bp->serdes_an_pending = 0;
1048 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1049 /* Force a link down visible on the other side */
1053 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1054 for (i = 0; i < 110; i++) {
1059 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1060 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1062 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1063 /* Speed up link-up time when the link partner
1064 * does not autonegotiate which is very common
1065 * in blade servers. Some blade servers use
1066 * IPMI for kerboard input and it's important
1067 * to minimize link disruptions. Autoneg. involves
1068 * exchanging base pages plus 3 next pages and
1069 * normally completes in about 120 msec.
1071 bp->current_interval = SERDES_AN_TIMEOUT;
1072 bp->serdes_an_pending = 1;
1073 mod_timer(&bp->timer, jiffies + bp->current_interval);
1080 #define ETHTOOL_ALL_FIBRE_SPEED \
1081 (ADVERTISED_1000baseT_Full)
1083 #define ETHTOOL_ALL_COPPER_SPEED \
1084 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1086 ADVERTISED_1000baseT_Full)
1088 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1089 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1091 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1094 bnx2_setup_copper_phy(struct bnx2 *bp)
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1101 if (bp->autoneg & AUTONEG_SPEED) {
1102 u32 adv_reg, adv1000_reg;
1103 u32 new_adv_reg = 0;
1104 u32 new_adv1000_reg = 0;
1106 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1107 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1108 ADVERTISE_PAUSE_ASYM);
1110 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1111 adv1000_reg &= PHY_ALL_1000_SPEED;
1113 if (bp->advertising & ADVERTISED_10baseT_Half)
1114 new_adv_reg |= ADVERTISE_10HALF;
1115 if (bp->advertising & ADVERTISED_10baseT_Full)
1116 new_adv_reg |= ADVERTISE_10FULL;
1117 if (bp->advertising & ADVERTISED_100baseT_Half)
1118 new_adv_reg |= ADVERTISE_100HALF;
1119 if (bp->advertising & ADVERTISED_100baseT_Full)
1120 new_adv_reg |= ADVERTISE_100FULL;
1121 if (bp->advertising & ADVERTISED_1000baseT_Full)
1122 new_adv1000_reg |= ADVERTISE_1000FULL;
1124 new_adv_reg |= ADVERTISE_CSMA;
1126 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1128 if ((adv1000_reg != new_adv1000_reg) ||
1129 (adv_reg != new_adv_reg) ||
1130 ((bmcr & BMCR_ANENABLE) == 0)) {
1132 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1133 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1134 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1137 else if (bp->link_up) {
1138 /* Flow ctrl may have changed from auto to forced */
1139 /* or vice-versa. */
1141 bnx2_resolve_flow_ctrl(bp);
1142 bnx2_set_mac_link(bp);
1148 if (bp->req_line_speed == SPEED_100) {
1149 new_bmcr |= BMCR_SPEED100;
1151 if (bp->req_duplex == DUPLEX_FULL) {
1152 new_bmcr |= BMCR_FULLDPLX;
1154 if (new_bmcr != bmcr) {
1158 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1159 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1161 if (bmsr & BMSR_LSTATUS) {
1162 /* Force link down */
1163 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1169 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1172 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1174 /* Normally, the new speed is setup after the link has
1175 * gone down and up again. In some cases, link will not go
1176 * down so we need to set up the new speed here.
1178 if (bmsr & BMSR_LSTATUS) {
1179 bp->line_speed = bp->req_line_speed;
1180 bp->duplex = bp->req_duplex;
1181 bnx2_resolve_flow_ctrl(bp);
1182 bnx2_set_mac_link(bp);
1189 bnx2_setup_phy(struct bnx2 *bp)
1191 if (bp->loopback == MAC_LOOPBACK)
1194 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195 return (bnx2_setup_serdes_phy(bp));
1198 return (bnx2_setup_copper_phy(bp));
1203 bnx2_init_5708s_phy(struct bnx2 *bp)
1207 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1208 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1211 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1212 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1213 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1215 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1216 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1217 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1219 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1220 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1221 val |= BCM5708S_UP1_2G5;
1222 bnx2_write_phy(bp, BCM5708S_UP1, val);
1225 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1226 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1227 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1228 /* increase tx signal amplitude */
1229 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1230 BCM5708S_BLK_ADDR_TX_MISC);
1231 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1232 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1233 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1237 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1238 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1243 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1244 BNX2_SHARED_HW_CFG_CONFIG);
1245 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_TX_MISC);
1248 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1250 BCM5708S_BLK_ADDR_DIG);
1257 bnx2_init_5706s_phy(struct bnx2 *bp)
1259 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1261 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1262 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1265 if (bp->dev->mtu > 1500) {
1268 /* Set extended packet length bit */
1269 bnx2_write_phy(bp, 0x18, 0x7);
1270 bnx2_read_phy(bp, 0x18, &val);
1271 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1273 bnx2_write_phy(bp, 0x1c, 0x6c00);
1274 bnx2_read_phy(bp, 0x1c, &val);
1275 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1280 bnx2_write_phy(bp, 0x18, 0x7);
1281 bnx2_read_phy(bp, 0x18, &val);
1282 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1284 bnx2_write_phy(bp, 0x1c, 0x6c00);
1285 bnx2_read_phy(bp, 0x1c, &val);
1286 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1293 bnx2_init_copper_phy(struct bnx2 *bp)
1297 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1299 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1300 bnx2_write_phy(bp, 0x18, 0x0c00);
1301 bnx2_write_phy(bp, 0x17, 0x000a);
1302 bnx2_write_phy(bp, 0x15, 0x310b);
1303 bnx2_write_phy(bp, 0x17, 0x201f);
1304 bnx2_write_phy(bp, 0x15, 0x9506);
1305 bnx2_write_phy(bp, 0x17, 0x401f);
1306 bnx2_write_phy(bp, 0x15, 0x14e2);
1307 bnx2_write_phy(bp, 0x18, 0x0400);
1310 if (bp->dev->mtu > 1500) {
1311 /* Set extended packet length bit */
1312 bnx2_write_phy(bp, 0x18, 0x7);
1313 bnx2_read_phy(bp, 0x18, &val);
1314 bnx2_write_phy(bp, 0x18, val | 0x4000);
1316 bnx2_read_phy(bp, 0x10, &val);
1317 bnx2_write_phy(bp, 0x10, val | 0x1);
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1324 bnx2_read_phy(bp, 0x10, &val);
1325 bnx2_write_phy(bp, 0x10, val & ~0x1);
1328 /* ethernet@wirespeed */
1329 bnx2_write_phy(bp, 0x18, 0x7007);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1337 bnx2_init_phy(struct bnx2 *bp)
1342 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1343 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1345 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1349 bnx2_read_phy(bp, MII_PHYSID1, &val);
1350 bp->phy_id = val << 16;
1351 bnx2_read_phy(bp, MII_PHYSID2, &val);
1352 bp->phy_id |= val & 0xffff;
1354 if (bp->phy_flags & PHY_SERDES_FLAG) {
1355 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1356 rc = bnx2_init_5706s_phy(bp);
1357 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1358 rc = bnx2_init_5708s_phy(bp);
1361 rc = bnx2_init_copper_phy(bp);
1370 bnx2_set_mac_loopback(struct bnx2 *bp)
1374 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1375 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1376 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1377 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1382 static int bnx2_test_link(struct bnx2 *);
1385 bnx2_set_phy_loopback(struct bnx2 *bp)
1390 spin_lock_bh(&bp->phy_lock);
1391 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1393 spin_unlock_bh(&bp->phy_lock);
1397 for (i = 0; i < 10; i++) {
1398 if (bnx2_test_link(bp) == 0)
1403 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1404 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1405 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1406 BNX2_EMAC_MODE_25G);
1408 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1409 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1415 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1421 msg_data |= bp->fw_wr_seq;
1423 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1425 /* wait for an acknowledgement. */
1426 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1429 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1431 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1434 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1437 /* If we timed out, inform the firmware that this is the case. */
1438 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1440 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1443 msg_data &= ~BNX2_DRV_MSG_CODE;
1444 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1446 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1451 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1458 bnx2_init_context(struct bnx2 *bp)
1464 u32 vcid_addr, pcid_addr, offset;
1468 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1471 vcid_addr = GET_PCID_ADDR(vcid);
1473 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1478 pcid_addr = GET_PCID_ADDR(new_vcid);
1481 vcid_addr = GET_CID_ADDR(vcid);
1482 pcid_addr = vcid_addr;
1485 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1486 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1488 /* Zero out the context. */
1489 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1490 CTX_WR(bp, 0x00, offset, 0);
1493 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1494 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1499 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1505 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1506 if (good_mbuf == NULL) {
1507 printk(KERN_ERR PFX "Failed to allocate memory in "
1508 "bnx2_alloc_bad_rbuf\n");
1512 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1513 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1517 /* Allocate a bunch of mbufs and save the good ones in an array. */
1518 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1519 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1520 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1522 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1524 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1526 /* The addresses with Bit 9 set are bad memory blocks. */
1527 if (!(val & (1 << 9))) {
1528 good_mbuf[good_mbuf_cnt] = (u16) val;
1532 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1535 /* Free the good ones back to the mbuf pool thus discarding
1536 * all the bad ones. */
1537 while (good_mbuf_cnt) {
1540 val = good_mbuf[good_mbuf_cnt];
1541 val = (val << 9) | val | 1;
1543 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1550 bnx2_set_mac_addr(struct bnx2 *bp)
1553 u8 *mac_addr = bp->dev->dev_addr;
1555 val = (mac_addr[0] << 8) | mac_addr[1];
1557 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1559 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1560 (mac_addr[4] << 8) | mac_addr[5];
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1566 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1568 struct sk_buff *skb;
1569 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1572 unsigned long align;
1574 skb = dev_alloc_skb(bp->rx_buf_size);
1579 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1580 skb_reserve(skb, 8 - align);
1584 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1585 PCI_DMA_FROMDEVICE);
1588 pci_unmap_addr_set(rx_buf, mapping, mapping);
1590 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1591 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1593 bp->rx_prod_bseq += bp->rx_buf_use_size;
1599 bnx2_phy_int(struct bnx2 *bp)
1601 u32 new_link_state, old_link_state;
1603 new_link_state = bp->status_blk->status_attn_bits &
1604 STATUS_ATTN_BITS_LINK_STATE;
1605 old_link_state = bp->status_blk->status_attn_bits_ack &
1606 STATUS_ATTN_BITS_LINK_STATE;
1607 if (new_link_state != old_link_state) {
1608 if (new_link_state) {
1609 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1610 STATUS_ATTN_BITS_LINK_STATE);
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1621 bnx2_tx_int(struct bnx2 *bp)
1623 struct status_block *sblk = bp->status_blk;
1624 u16 hw_cons, sw_cons, sw_ring_cons;
1627 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1628 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1631 sw_cons = bp->tx_cons;
1633 while (sw_cons != hw_cons) {
1634 struct sw_bd *tx_buf;
1635 struct sk_buff *skb;
1638 sw_ring_cons = TX_RING_IDX(sw_cons);
1640 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1643 /* partial BD completions possible with TSO packets */
1644 if (skb_is_gso(skb)) {
1645 u16 last_idx, last_ring_idx;
1647 last_idx = sw_cons +
1648 skb_shinfo(skb)->nr_frags + 1;
1649 last_ring_idx = sw_ring_cons +
1650 skb_shinfo(skb)->nr_frags + 1;
1651 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1654 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1659 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1660 skb_headlen(skb), PCI_DMA_TODEVICE);
1663 last = skb_shinfo(skb)->nr_frags;
1665 for (i = 0; i < last; i++) {
1666 sw_cons = NEXT_TX_BD(sw_cons);
1668 pci_unmap_page(bp->pdev,
1670 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1672 skb_shinfo(skb)->frags[i].size,
1676 sw_cons = NEXT_TX_BD(sw_cons);
1678 tx_free_bd += last + 1;
1682 hw_cons = bp->hw_tx_cons =
1683 sblk->status_tx_quick_consumer_index0;
1685 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1690 bp->tx_cons = sw_cons;
1691 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1692 * before checking for netif_queue_stopped(). Without the
1693 * memory barrier, there is a small possibility that bnx2_start_xmit()
1694 * will miss it and cause the queue to be stopped forever.
1698 if (unlikely(netif_queue_stopped(bp->dev)) &&
1699 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1700 netif_tx_lock(bp->dev);
1701 if ((netif_queue_stopped(bp->dev)) &&
1702 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1703 netif_wake_queue(bp->dev);
1704 netif_tx_unlock(bp->dev);
1709 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1712 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1713 struct rx_bd *cons_bd, *prod_bd;
1715 cons_rx_buf = &bp->rx_buf_ring[cons];
1716 prod_rx_buf = &bp->rx_buf_ring[prod];
1718 pci_dma_sync_single_for_device(bp->pdev,
1719 pci_unmap_addr(cons_rx_buf, mapping),
1720 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1722 bp->rx_prod_bseq += bp->rx_buf_use_size;
1724 prod_rx_buf->skb = skb;
1729 pci_unmap_addr_set(prod_rx_buf, mapping,
1730 pci_unmap_addr(cons_rx_buf, mapping));
1732 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1733 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1734 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1735 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1739 bnx2_rx_int(struct bnx2 *bp, int budget)
1741 struct status_block *sblk = bp->status_blk;
1742 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1743 struct l2_fhdr *rx_hdr;
1746 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1747 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1750 sw_cons = bp->rx_cons;
1751 sw_prod = bp->rx_prod;
1753 /* Memory barrier necessary as speculative reads of the rx
1754 * buffer can be ahead of the index in the status block
1757 while (sw_cons != hw_cons) {
1760 struct sw_bd *rx_buf;
1761 struct sk_buff *skb;
1762 dma_addr_t dma_addr;
1764 sw_ring_cons = RX_RING_IDX(sw_cons);
1765 sw_ring_prod = RX_RING_IDX(sw_prod);
1767 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1772 dma_addr = pci_unmap_addr(rx_buf, mapping);
1774 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1775 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1777 rx_hdr = (struct l2_fhdr *) skb->data;
1778 len = rx_hdr->l2_fhdr_pkt_len - 4;
1780 if ((status = rx_hdr->l2_fhdr_status) &
1781 (L2_FHDR_ERRORS_BAD_CRC |
1782 L2_FHDR_ERRORS_PHY_DECODE |
1783 L2_FHDR_ERRORS_ALIGNMENT |
1784 L2_FHDR_ERRORS_TOO_SHORT |
1785 L2_FHDR_ERRORS_GIANT_FRAME)) {
1790 /* Since we don't have a jumbo ring, copy small packets
1793 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1794 struct sk_buff *new_skb;
1796 new_skb = dev_alloc_skb(len + 2);
1797 if (new_skb == NULL)
1801 memcpy(new_skb->data,
1802 skb->data + bp->rx_offset - 2,
1805 skb_reserve(new_skb, 2);
1806 skb_put(new_skb, len);
1807 new_skb->dev = bp->dev;
1809 bnx2_reuse_rx_skb(bp, skb,
1810 sw_ring_cons, sw_ring_prod);
1814 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1815 pci_unmap_single(bp->pdev, dma_addr,
1816 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1818 skb_reserve(skb, bp->rx_offset);
1823 bnx2_reuse_rx_skb(bp, skb,
1824 sw_ring_cons, sw_ring_prod);
1828 skb->protocol = eth_type_trans(skb, bp->dev);
1830 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1831 (ntohs(skb->protocol) != 0x8100)) {
1838 skb->ip_summed = CHECKSUM_NONE;
1840 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1841 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1843 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1844 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1845 skb->ip_summed = CHECKSUM_UNNECESSARY;
1849 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1850 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1851 rx_hdr->l2_fhdr_vlan_tag);
1855 netif_receive_skb(skb);
1857 bp->dev->last_rx = jiffies;
1861 sw_cons = NEXT_RX_BD(sw_cons);
1862 sw_prod = NEXT_RX_BD(sw_prod);
1864 if ((rx_pkt == budget))
1867 /* Refresh hw_cons to see if there is new work */
1868 if (sw_cons == hw_cons) {
1869 hw_cons = bp->hw_rx_cons =
1870 sblk->status_rx_quick_consumer_index0;
1871 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1876 bp->rx_cons = sw_cons;
1877 bp->rx_prod = sw_prod;
1879 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1881 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1889 /* MSI ISR - The only difference between this and the INTx ISR
1890 * is that the MSI interrupt is always serviced.
1893 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1895 struct net_device *dev = dev_instance;
1896 struct bnx2 *bp = netdev_priv(dev);
1898 prefetch(bp->status_blk);
1899 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1900 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1901 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1903 /* Return here if interrupt is disabled. */
1904 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1907 netif_rx_schedule(dev);
1913 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1915 struct net_device *dev = dev_instance;
1916 struct bnx2 *bp = netdev_priv(dev);
1918 /* When using INTx, it is possible for the interrupt to arrive
1919 * at the CPU before the status block posted prior to the
1920 * interrupt. Reading a register will flush the status block.
1921 * When using MSI, the MSI message will always complete after
1922 * the status block write.
1924 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1925 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1926 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1929 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1930 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1931 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1933 /* Return here if interrupt is shared and is disabled. */
1934 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1937 netif_rx_schedule(dev);
1943 bnx2_has_work(struct bnx2 *bp)
1945 struct status_block *sblk = bp->status_blk;
1947 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1948 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1951 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1959 bnx2_poll(struct net_device *dev, int *budget)
1961 struct bnx2 *bp = netdev_priv(dev);
1963 if ((bp->status_blk->status_attn_bits &
1964 STATUS_ATTN_BITS_LINK_STATE) !=
1965 (bp->status_blk->status_attn_bits_ack &
1966 STATUS_ATTN_BITS_LINK_STATE)) {
1968 spin_lock(&bp->phy_lock);
1970 spin_unlock(&bp->phy_lock);
1972 /* This is needed to take care of transient status
1973 * during link changes.
1975 REG_WR(bp, BNX2_HC_COMMAND,
1976 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1977 REG_RD(bp, BNX2_HC_COMMAND);
1980 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1983 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1984 int orig_budget = *budget;
1987 if (orig_budget > dev->quota)
1988 orig_budget = dev->quota;
1990 work_done = bnx2_rx_int(bp, orig_budget);
1991 *budget -= work_done;
1992 dev->quota -= work_done;
1995 bp->last_status_idx = bp->status_blk->status_idx;
1998 if (!bnx2_has_work(bp)) {
1999 netif_rx_complete(dev);
2000 if (likely(bp->flags & USING_MSI_FLAG)) {
2001 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2002 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2003 bp->last_status_idx);
2006 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2007 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2008 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2009 bp->last_status_idx);
2011 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2012 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2013 bp->last_status_idx);
2020 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2021 * from set_multicast.
2024 bnx2_set_rx_mode(struct net_device *dev)
2026 struct bnx2 *bp = netdev_priv(dev);
2027 u32 rx_mode, sort_mode;
2030 spin_lock_bh(&bp->phy_lock);
2032 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2033 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2034 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2036 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2037 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2039 if (!(bp->flags & ASF_ENABLE_FLAG))
2040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2042 if (dev->flags & IFF_PROMISC) {
2043 /* Promiscuous mode. */
2044 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2045 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2047 else if (dev->flags & IFF_ALLMULTI) {
2048 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2049 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2052 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2055 /* Accept one or more multicast(s). */
2056 struct dev_mc_list *mclist;
2057 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2062 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2064 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2065 i++, mclist = mclist->next) {
2067 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2069 regidx = (bit & 0xe0) >> 5;
2071 mc_filter[regidx] |= (1 << bit);
2074 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2075 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2079 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2082 if (rx_mode != bp->rx_mode) {
2083 bp->rx_mode = rx_mode;
2084 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2087 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2088 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2089 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2091 spin_unlock_bh(&bp->phy_lock);
2094 #define FW_BUF_SIZE 0x8000
2097 bnx2_gunzip_init(struct bnx2 *bp)
2099 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2102 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2105 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2106 if (bp->strm->workspace == NULL)
2116 vfree(bp->gunzip_buf);
2117 bp->gunzip_buf = NULL;
2120 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2121 "uncompression.\n", bp->dev->name);
2126 bnx2_gunzip_end(struct bnx2 *bp)
2128 kfree(bp->strm->workspace);
2133 if (bp->gunzip_buf) {
2134 vfree(bp->gunzip_buf);
2135 bp->gunzip_buf = NULL;
2140 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2144 /* check gzip header */
2145 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2151 if (zbuf[3] & FNAME)
2152 while ((zbuf[n++] != 0) && (n < len));
2154 bp->strm->next_in = zbuf + n;
2155 bp->strm->avail_in = len - n;
2156 bp->strm->next_out = bp->gunzip_buf;
2157 bp->strm->avail_out = FW_BUF_SIZE;
2159 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2163 rc = zlib_inflate(bp->strm, Z_FINISH);
2165 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2166 *outbuf = bp->gunzip_buf;
2168 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2169 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2170 bp->dev->name, bp->strm->msg);
2172 zlib_inflateEnd(bp->strm);
2174 if (rc == Z_STREAM_END)
2181 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2188 for (i = 0; i < rv2p_code_len; i += 8) {
2189 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2191 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2194 if (rv2p_proc == RV2P_PROC1) {
2195 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2196 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2199 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2204 /* Reset the processor, un-stall is done later. */
2205 if (rv2p_proc == RV2P_PROC1) {
2206 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2209 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2214 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2220 val = REG_RD_IND(bp, cpu_reg->mode);
2221 val |= cpu_reg->mode_value_halt;
2222 REG_WR_IND(bp, cpu_reg->mode, val);
2223 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2225 /* Load the Text area. */
2226 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2230 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2231 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2235 /* Load the Data area. */
2236 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2240 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2241 REG_WR_IND(bp, offset, fw->data[j]);
2245 /* Load the SBSS area. */
2246 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2250 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2251 REG_WR_IND(bp, offset, fw->sbss[j]);
2255 /* Load the BSS area. */
2256 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2260 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2261 REG_WR_IND(bp, offset, fw->bss[j]);
2265 /* Load the Read-Only area. */
2266 offset = cpu_reg->spad_base +
2267 (fw->rodata_addr - cpu_reg->mips_view_base);
2271 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2272 REG_WR_IND(bp, offset, fw->rodata[j]);
2276 /* Clear the pre-fetch instruction. */
2277 REG_WR_IND(bp, cpu_reg->inst, 0);
2278 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2280 /* Start the CPU. */
2281 val = REG_RD_IND(bp, cpu_reg->mode);
2282 val &= ~cpu_reg->mode_value_halt;
2283 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2284 REG_WR_IND(bp, cpu_reg->mode, val);
2288 bnx2_init_cpus(struct bnx2 *bp)
2290 struct cpu_reg cpu_reg;
2296 if ((rc = bnx2_gunzip_init(bp)) != 0)
2299 /* Initialize the RV2P processor. */
2300 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2305 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2307 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2312 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2314 /* Initialize the RX Processor. */
2315 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2316 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2317 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2318 cpu_reg.state = BNX2_RXP_CPU_STATE;
2319 cpu_reg.state_value_clear = 0xffffff;
2320 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2321 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2322 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2323 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2324 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2325 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2326 cpu_reg.mips_view_base = 0x8000000;
2328 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2329 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2330 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2331 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2333 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2334 fw.text_len = bnx2_RXP_b06FwTextLen;
2337 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2344 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2345 fw.data_len = bnx2_RXP_b06FwDataLen;
2347 fw.data = bnx2_RXP_b06FwData;
2349 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2350 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2352 fw.sbss = bnx2_RXP_b06FwSbss;
2354 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2355 fw.bss_len = bnx2_RXP_b06FwBssLen;
2357 fw.bss = bnx2_RXP_b06FwBss;
2359 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2360 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2361 fw.rodata_index = 0;
2362 fw.rodata = bnx2_RXP_b06FwRodata;
2364 load_cpu_fw(bp, &cpu_reg, &fw);
2366 /* Initialize the TX Processor. */
2367 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2368 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2369 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2370 cpu_reg.state = BNX2_TXP_CPU_STATE;
2371 cpu_reg.state_value_clear = 0xffffff;
2372 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2373 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2374 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2375 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2376 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2377 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2378 cpu_reg.mips_view_base = 0x8000000;
2380 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2381 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2382 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2383 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2385 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2386 fw.text_len = bnx2_TXP_b06FwTextLen;
2389 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2396 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2397 fw.data_len = bnx2_TXP_b06FwDataLen;
2399 fw.data = bnx2_TXP_b06FwData;
2401 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2402 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2404 fw.sbss = bnx2_TXP_b06FwSbss;
2406 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2407 fw.bss_len = bnx2_TXP_b06FwBssLen;
2409 fw.bss = bnx2_TXP_b06FwBss;
2411 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2412 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2413 fw.rodata_index = 0;
2414 fw.rodata = bnx2_TXP_b06FwRodata;
2416 load_cpu_fw(bp, &cpu_reg, &fw);
2418 /* Initialize the TX Patch-up Processor. */
2419 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2420 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2421 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2422 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2423 cpu_reg.state_value_clear = 0xffffff;
2424 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2425 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2426 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2427 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2428 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2429 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2430 cpu_reg.mips_view_base = 0x8000000;
2432 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2433 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2434 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2435 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2437 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2438 fw.text_len = bnx2_TPAT_b06FwTextLen;
2441 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2448 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2449 fw.data_len = bnx2_TPAT_b06FwDataLen;
2451 fw.data = bnx2_TPAT_b06FwData;
2453 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2454 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2456 fw.sbss = bnx2_TPAT_b06FwSbss;
2458 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2459 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2461 fw.bss = bnx2_TPAT_b06FwBss;
2463 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2464 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2465 fw.rodata_index = 0;
2466 fw.rodata = bnx2_TPAT_b06FwRodata;
2468 load_cpu_fw(bp, &cpu_reg, &fw);
2470 /* Initialize the Completion Processor. */
2471 cpu_reg.mode = BNX2_COM_CPU_MODE;
2472 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2473 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2474 cpu_reg.state = BNX2_COM_CPU_STATE;
2475 cpu_reg.state_value_clear = 0xffffff;
2476 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2477 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2478 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2479 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2480 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2481 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2482 cpu_reg.mips_view_base = 0x8000000;
2484 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2485 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2486 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2487 fw.start_addr = bnx2_COM_b06FwStartAddr;
2489 fw.text_addr = bnx2_COM_b06FwTextAddr;
2490 fw.text_len = bnx2_COM_b06FwTextLen;
2493 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2500 fw.data_addr = bnx2_COM_b06FwDataAddr;
2501 fw.data_len = bnx2_COM_b06FwDataLen;
2503 fw.data = bnx2_COM_b06FwData;
2505 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2506 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2508 fw.sbss = bnx2_COM_b06FwSbss;
2510 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2511 fw.bss_len = bnx2_COM_b06FwBssLen;
2513 fw.bss = bnx2_COM_b06FwBss;
2515 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2516 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2517 fw.rodata_index = 0;
2518 fw.rodata = bnx2_COM_b06FwRodata;
2520 load_cpu_fw(bp, &cpu_reg, &fw);
2523 bnx2_gunzip_end(bp);
2528 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2532 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540 PCI_PM_CTRL_PME_STATUS);
2542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543 /* delay required during transition out of D3hot */
2546 val = REG_RD(bp, BNX2_EMAC_MODE);
2547 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548 val &= ~BNX2_EMAC_MODE_MPKT;
2549 REG_WR(bp, BNX2_EMAC_MODE, val);
2551 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553 REG_WR(bp, BNX2_RPM_CONFIG, val);
2564 autoneg = bp->autoneg;
2565 advertising = bp->advertising;
2567 bp->autoneg = AUTONEG_SPEED;
2568 bp->advertising = ADVERTISED_10baseT_Half |
2569 ADVERTISED_10baseT_Full |
2570 ADVERTISED_100baseT_Half |
2571 ADVERTISED_100baseT_Full |
2574 bnx2_setup_copper_phy(bp);
2576 bp->autoneg = autoneg;
2577 bp->advertising = advertising;
2579 bnx2_set_mac_addr(bp);
2581 val = REG_RD(bp, BNX2_EMAC_MODE);
2583 /* Enable port mode. */
2584 val &= ~BNX2_EMAC_MODE_PORT;
2585 val |= BNX2_EMAC_MODE_PORT_MII |
2586 BNX2_EMAC_MODE_MPKT_RCVD |
2587 BNX2_EMAC_MODE_ACPI_RCVD |
2588 BNX2_EMAC_MODE_MPKT;
2590 REG_WR(bp, BNX2_EMAC_MODE, val);
2592 /* receive all multicast */
2593 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2597 REG_WR(bp, BNX2_EMAC_RX_MODE,
2598 BNX2_EMAC_RX_MODE_SORT_MODE);
2600 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601 BNX2_RPM_SORT_USER0_MC_EN;
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605 BNX2_RPM_SORT_USER0_ENA);
2607 /* Need to enable EMAC and RPM for WOL. */
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2613 val = REG_RD(bp, BNX2_RPM_CONFIG);
2614 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615 REG_WR(bp, BNX2_RPM_CONFIG, val);
2617 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2623 if (!(bp->flags & NO_WOL_FLAG))
2624 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2642 /* No more memory access after this point until
2643 * device is brought back to D0.
2655 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2660 /* Request access to the flash interface. */
2661 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670 if (j >= NVRAM_TIMEOUT_COUNT)
2677 bnx2_release_nvram_lock(struct bnx2 *bp)
2682 /* Relinquish nvram interface. */
2683 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2685 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693 if (j >= NVRAM_TIMEOUT_COUNT)
2701 bnx2_enable_nvram_write(struct bnx2 *bp)
2705 val = REG_RD(bp, BNX2_MISC_CFG);
2706 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2708 if (!bp->flash_info->buffered) {
2711 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712 REG_WR(bp, BNX2_NVM_COMMAND,
2713 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2715 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2718 val = REG_RD(bp, BNX2_NVM_COMMAND);
2719 if (val & BNX2_NVM_COMMAND_DONE)
2723 if (j >= NVRAM_TIMEOUT_COUNT)
2730 bnx2_disable_nvram_write(struct bnx2 *bp)
2734 val = REG_RD(bp, BNX2_MISC_CFG);
2735 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2740 bnx2_enable_nvram_access(struct bnx2 *bp)
2744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745 /* Enable both bits, even on read. */
2746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2747 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2751 bnx2_disable_nvram_access(struct bnx2 *bp)
2755 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756 /* Disable both bits, even after read. */
2757 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2758 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2763 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2768 if (bp->flash_info->buffered)
2769 /* Buffered flash, no erase needed */
2772 /* Build an erase command */
2773 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774 BNX2_NVM_COMMAND_DOIT;
2776 /* Need to clear DONE bit separately. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2779 /* Address of the NVRAM to read from. */
2780 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2782 /* Issue an erase command. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2785 /* Wait for completion. */
2786 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2791 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792 if (val & BNX2_NVM_COMMAND_DONE)
2796 if (j >= NVRAM_TIMEOUT_COUNT)
2803 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2808 /* Build the command word. */
2809 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2811 /* Calculate an offset of a buffered flash. */
2812 if (bp->flash_info->buffered) {
2813 offset = ((offset / bp->flash_info->page_size) <<
2814 bp->flash_info->page_bits) +
2815 (offset % bp->flash_info->page_size);
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2824 /* Issue a read command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE) {
2835 val = REG_RD(bp, BNX2_NVM_READ);
2837 val = be32_to_cpu(val);
2838 memcpy(ret_val, &val, 4);
2842 if (j >= NVRAM_TIMEOUT_COUNT)
2850 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2855 /* Build the command word. */
2856 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2858 /* Calculate an offset of a buffered flash. */
2859 if (bp->flash_info->buffered) {
2860 offset = ((offset / bp->flash_info->page_size) <<
2861 bp->flash_info->page_bits) +
2862 (offset % bp->flash_info->page_size);
2865 /* Need to clear DONE bit separately. */
2866 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2868 memcpy(&val32, val, 4);
2869 val32 = cpu_to_be32(val32);
2871 /* Write the data. */
2872 REG_WR(bp, BNX2_NVM_WRITE, val32);
2874 /* Address of the NVRAM to write to. */
2875 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2877 /* Issue the write command. */
2878 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2880 /* Wait for completion. */
2881 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2884 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2887 if (j >= NVRAM_TIMEOUT_COUNT)
2894 bnx2_init_nvram(struct bnx2 *bp)
2897 int j, entry_count, rc;
2898 struct flash_spec *flash;
2900 /* Determine the selected interface. */
2901 val = REG_RD(bp, BNX2_NVM_CFG1);
2903 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2906 if (val & 0x40000000) {
2908 /* Flash interface has been reconfigured */
2909 for (j = 0, flash = &flash_table[0]; j < entry_count;
2911 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2913 bp->flash_info = flash;
2920 /* Not yet been reconfigured */
2922 if (val & (1 << 23))
2923 mask = FLASH_BACKUP_STRAP_MASK;
2925 mask = FLASH_STRAP_MASK;
2927 for (j = 0, flash = &flash_table[0]; j < entry_count;
2930 if ((val & mask) == (flash->strapping & mask)) {
2931 bp->flash_info = flash;
2933 /* Request access to the flash interface. */
2934 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2937 /* Enable access to flash interface */
2938 bnx2_enable_nvram_access(bp);
2940 /* Reconfigure the flash interface */
2941 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2946 /* Disable access to flash interface */
2947 bnx2_disable_nvram_access(bp);
2948 bnx2_release_nvram_lock(bp);
2953 } /* if (val & 0x40000000) */
2955 if (j == entry_count) {
2956 bp->flash_info = NULL;
2957 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2961 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2964 bp->flash_size = val;
2966 bp->flash_size = bp->flash_info->total_size;
2972 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976 u32 cmd_flags, offset32, len32, extra;
2981 /* Request access to the flash interface. */
2982 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2985 /* Enable access to flash interface */
2986 bnx2_enable_nvram_access(bp);
2999 pre_len = 4 - (offset & 3);
3001 if (pre_len >= len32) {
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004 BNX2_NVM_COMMAND_LAST;
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3010 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3015 memcpy(ret_buf, buf + (offset & 3), pre_len);
3022 extra = 4 - (len32 & 3);
3023 len32 = (len32 + 4) & ~3;
3030 cmd_flags = BNX2_NVM_COMMAND_LAST;
3032 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033 BNX2_NVM_COMMAND_LAST;
3035 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3037 memcpy(ret_buf, buf, 4 - extra);
3039 else if (len32 > 0) {
3042 /* Read the first word. */
3046 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3050 /* Advance to the next dword. */
3055 while (len32 > 4 && rc == 0) {
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3058 /* Advance to the next dword. */
3067 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3070 memcpy(ret_buf, buf, 4 - extra);
3073 /* Disable access to flash interface */
3074 bnx2_disable_nvram_access(bp);
3076 bnx2_release_nvram_lock(bp);
3082 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3085 u32 written, offset32, len32;
3086 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3088 int align_start, align_end;
3093 align_start = align_end = 0;
3095 if ((align_start = (offset32 & 3))) {
3097 len32 += align_start;
3098 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3103 if ((len32 > 4) || !align_start) {
3104 align_end = 4 - (len32 & 3);
3106 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3113 if (align_start || align_end) {
3114 buf = kmalloc(len32, GFP_KERNEL);
3118 memcpy(buf, start, 4);
3121 memcpy(buf + len32 - 4, end, 4);
3123 memcpy(buf + align_start, data_buf, buf_size);
3126 if (bp->flash_info->buffered == 0) {
3127 flash_buffer = kmalloc(264, GFP_KERNEL);
3128 if (flash_buffer == NULL) {
3130 goto nvram_write_end;
3135 while ((written < len32) && (rc == 0)) {
3136 u32 page_start, page_end, data_start, data_end;
3137 u32 addr, cmd_flags;
3140 /* Find the page_start addr */
3141 page_start = offset32 + written;
3142 page_start -= (page_start % bp->flash_info->page_size);
3143 /* Find the page_end addr */
3144 page_end = page_start + bp->flash_info->page_size;
3145 /* Find the data_start addr */
3146 data_start = (written == 0) ? offset32 : page_start;
3147 /* Find the data_end addr */
3148 data_end = (page_end > offset32 + len32) ?
3149 (offset32 + len32) : page_end;
3151 /* Request access to the flash interface. */
3152 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3153 goto nvram_write_end;
3155 /* Enable access to flash interface */
3156 bnx2_enable_nvram_access(bp);
3158 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3159 if (bp->flash_info->buffered == 0) {
3162 /* Read the whole page into the buffer
3163 * (non-buffer flash only) */
3164 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3165 if (j == (bp->flash_info->page_size - 4)) {
3166 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3168 rc = bnx2_nvram_read_dword(bp,
3174 goto nvram_write_end;
3180 /* Enable writes to flash interface (unlock write-protect) */
3181 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3182 goto nvram_write_end;
3184 /* Erase the page */
3185 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3186 goto nvram_write_end;
3188 /* Re-enable the write again for the actual write */
3189 bnx2_enable_nvram_write(bp);
3191 /* Loop to write back the buffer data from page_start to
3194 if (bp->flash_info->buffered == 0) {
3195 for (addr = page_start; addr < data_start;
3196 addr += 4, i += 4) {
3198 rc = bnx2_nvram_write_dword(bp, addr,
3199 &flash_buffer[i], cmd_flags);
3202 goto nvram_write_end;
3208 /* Loop to write the new data from data_start to data_end */
3209 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3210 if ((addr == page_end - 4) ||
3211 ((bp->flash_info->buffered) &&
3212 (addr == data_end - 4))) {
3214 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3216 rc = bnx2_nvram_write_dword(bp, addr, buf,
3220 goto nvram_write_end;
3226 /* Loop to write back the buffer data from data_end
3228 if (bp->flash_info->buffered == 0) {
3229 for (addr = data_end; addr < page_end;
3230 addr += 4, i += 4) {
3232 if (addr == page_end-4) {
3233 cmd_flags = BNX2_NVM_COMMAND_LAST;
3235 rc = bnx2_nvram_write_dword(bp, addr,
3236 &flash_buffer[i], cmd_flags);
3239 goto nvram_write_end;
3245 /* Disable writes to flash interface (lock write-protect) */
3246 bnx2_disable_nvram_write(bp);
3248 /* Disable access to flash interface */
3249 bnx2_disable_nvram_access(bp);
3250 bnx2_release_nvram_lock(bp);
3252 /* Increment written */
3253 written += data_end - data_start;
3257 if (bp->flash_info->buffered == 0)
3258 kfree(flash_buffer);
3260 if (align_start || align_end)
3266 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3271 /* Wait for the current PCI transaction to complete before
3272 * issuing a reset. */
3273 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3274 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3278 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3281 /* Wait for the firmware to tell us it is ok to issue a reset. */
3282 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3284 /* Deposit a driver reset signature so the firmware knows that
3285 * this is a soft reset. */
3286 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3287 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3289 /* Do a dummy read to force the chip to complete all current transaction
3290 * before we issue a reset. */
3291 val = REG_RD(bp, BNX2_MISC_ID);
3293 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3294 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3295 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3298 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3300 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3301 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3304 /* Reset takes approximate 30 usec */
3305 for (i = 0; i < 10; i++) {
3306 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3307 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3308 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3314 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3315 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3316 printk(KERN_ERR PFX "Chip reset did not complete\n");
3320 /* Make sure byte swapping is properly configured. */
3321 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3322 if (val != 0x01020304) {
3323 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3327 /* Wait for the firmware to finish its initialization. */
3328 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3332 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3333 /* Adjust the voltage regular to two steps lower. The default
3334 * of this register is 0x0000000e. */
3335 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3337 /* Remove bad rbuf memory from the free pool. */
3338 rc = bnx2_alloc_bad_rbuf(bp);
3345 bnx2_init_chip(struct bnx2 *bp)
3350 /* Make sure the interrupt is not active. */
3351 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3353 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3354 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3356 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3358 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3359 DMA_READ_CHANS << 12 |
3360 DMA_WRITE_CHANS << 16;
3362 val |= (0x2 << 20) | (1 << 11);
3364 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3367 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3368 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3369 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3371 REG_WR(bp, BNX2_DMA_CONFIG, val);
3373 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3374 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3375 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3376 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3379 if (bp->flags & PCIX_FLAG) {
3382 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3384 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3385 val16 & ~PCI_X_CMD_ERO);
3388 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3389 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3390 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3391 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3393 /* Initialize context mapping and zero out the quick contexts. The
3394 * context block must have already been enabled. */
3395 bnx2_init_context(bp);
3397 if ((rc = bnx2_init_cpus(bp)) != 0)
3400 bnx2_init_nvram(bp);
3402 bnx2_set_mac_addr(bp);
3404 val = REG_RD(bp, BNX2_MQ_CONFIG);
3405 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3406 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3407 REG_WR(bp, BNX2_MQ_CONFIG, val);
3409 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3410 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3411 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3413 val = (BCM_PAGE_BITS - 8) << 24;
3414 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3416 /* Configure page size. */
3417 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3418 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3419 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3420 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3422 val = bp->mac_addr[0] +
3423 (bp->mac_addr[1] << 8) +
3424 (bp->mac_addr[2] << 16) +
3426 (bp->mac_addr[4] << 8) +
3427 (bp->mac_addr[5] << 16);
3428 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3430 /* Program the MTU. Also include 4 bytes for CRC32. */
3431 val = bp->dev->mtu + ETH_HLEN + 4;
3432 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3433 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3434 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3436 bp->last_status_idx = 0;
3437 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3439 /* Set up how to generate a link change interrupt. */
3440 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3442 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3443 (u64) bp->status_blk_mapping & 0xffffffff);
3444 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3446 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3447 (u64) bp->stats_blk_mapping & 0xffffffff);
3448 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3449 (u64) bp->stats_blk_mapping >> 32);
3451 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3452 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3454 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3455 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3457 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3458 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3460 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3462 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3464 REG_WR(bp, BNX2_HC_COM_TICKS,
3465 (bp->com_ticks_int << 16) | bp->com_ticks);
3467 REG_WR(bp, BNX2_HC_CMD_TICKS,
3468 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3470 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3471 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3473 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3474 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3476 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3477 BNX2_HC_CONFIG_TX_TMR_MODE |
3478 BNX2_HC_CONFIG_COLLECT_STATS);
3481 /* Clear internal stats counters. */
3482 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3484 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3486 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3487 BNX2_PORT_FEATURE_ASF_ENABLED)
3488 bp->flags |= ASF_ENABLE_FLAG;
3490 /* Initialize the receive filter. */
3491 bnx2_set_rx_mode(bp->dev);
3493 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3496 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3497 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3501 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3508 bnx2_init_tx_ring(struct bnx2 *bp)
3513 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3515 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3517 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3518 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3523 bp->tx_prod_bseq = 0;
3525 val = BNX2_L2CTX_TYPE_TYPE_L2;
3526 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3527 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3529 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3531 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3533 val = (u64) bp->tx_desc_mapping >> 32;
3534 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3536 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3537 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3541 bnx2_init_rx_ring(struct bnx2 *bp)
3545 u16 prod, ring_prod;
3548 /* 8 for CRC and VLAN */
3549 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3550 /* 8 for alignment */
3551 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3553 ring_prod = prod = bp->rx_prod = 0;
3556 bp->rx_prod_bseq = 0;
3558 for (i = 0; i < bp->rx_max_ring; i++) {
3561 rxbd = &bp->rx_desc_ring[i][0];
3562 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3563 rxbd->rx_bd_len = bp->rx_buf_use_size;
3564 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3566 if (i == (bp->rx_max_ring - 1))
3570 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3571 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3575 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3576 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3578 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3580 val = (u64) bp->rx_desc_mapping[0] >> 32;
3581 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3583 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3584 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3586 for (i = 0; i < bp->rx_ring_size; i++) {
3587 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3590 prod = NEXT_RX_BD(prod);
3591 ring_prod = RX_RING_IDX(prod);
3595 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3597 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3601 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3605 bp->rx_ring_size = size;
3607 while (size > MAX_RX_DESC_CNT) {
3608 size -= MAX_RX_DESC_CNT;
3611 /* round to next power of 2 */
3613 while ((max & num_rings) == 0)
3616 if (num_rings != max)
3619 bp->rx_max_ring = max;
3620 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3624 bnx2_free_tx_skbs(struct bnx2 *bp)
3628 if (bp->tx_buf_ring == NULL)
3631 for (i = 0; i < TX_DESC_CNT; ) {
3632 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3633 struct sk_buff *skb = tx_buf->skb;
3641 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3642 skb_headlen(skb), PCI_DMA_TODEVICE);
3646 last = skb_shinfo(skb)->nr_frags;
3647 for (j = 0; j < last; j++) {
3648 tx_buf = &bp->tx_buf_ring[i + j + 1];
3649 pci_unmap_page(bp->pdev,
3650 pci_unmap_addr(tx_buf, mapping),
3651 skb_shinfo(skb)->frags[j].size,
3661 bnx2_free_rx_skbs(struct bnx2 *bp)
3665 if (bp->rx_buf_ring == NULL)
3668 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3669 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3670 struct sk_buff *skb = rx_buf->skb;
3675 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3676 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3685 bnx2_free_skbs(struct bnx2 *bp)
3687 bnx2_free_tx_skbs(bp);
3688 bnx2_free_rx_skbs(bp);
3692 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3696 rc = bnx2_reset_chip(bp, reset_code);
3701 if ((rc = bnx2_init_chip(bp)) != 0)
3704 bnx2_init_tx_ring(bp);
3705 bnx2_init_rx_ring(bp);
3710 bnx2_init_nic(struct bnx2 *bp)
3714 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3723 bnx2_test_registers(struct bnx2 *bp)
3727 static const struct {
3733 { 0x006c, 0, 0x00000000, 0x0000003f },
3734 { 0x0090, 0, 0xffffffff, 0x00000000 },
3735 { 0x0094, 0, 0x00000000, 0x00000000 },
3737 { 0x0404, 0, 0x00003f00, 0x00000000 },
3738 { 0x0418, 0, 0x00000000, 0xffffffff },
3739 { 0x041c, 0, 0x00000000, 0xffffffff },
3740 { 0x0420, 0, 0x00000000, 0x80ffffff },
3741 { 0x0424, 0, 0x00000000, 0x00000000 },
3742 { 0x0428, 0, 0x00000000, 0x00000001 },
3743 { 0x0450, 0, 0x00000000, 0x0000ffff },
3744 { 0x0454, 0, 0x00000000, 0xffffffff },
3745 { 0x0458, 0, 0x00000000, 0xffffffff },
3747 { 0x0808, 0, 0x00000000, 0xffffffff },
3748 { 0x0854, 0, 0x00000000, 0xffffffff },
3749 { 0x0868, 0, 0x00000000, 0x77777777 },
3750 { 0x086c, 0, 0x00000000, 0x77777777 },
3751 { 0x0870, 0, 0x00000000, 0x77777777 },
3752 { 0x0874, 0, 0x00000000, 0x77777777 },
3754 { 0x0c00, 0, 0x00000000, 0x00000001 },
3755 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3756 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3758 { 0x1000, 0, 0x00000000, 0x00000001 },
3759 { 0x1004, 0, 0x00000000, 0x000f0001 },
3761 { 0x1408, 0, 0x01c00800, 0x00000000 },
3762 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3763 { 0x14a8, 0, 0x00000000, 0x000001ff },
3764 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3765 { 0x14b0, 0, 0x00000002, 0x00000001 },
3766 { 0x14b8, 0, 0x00000000, 0x00000000 },
3767 { 0x14c0, 0, 0x00000000, 0x00000009 },
3768 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3769 { 0x14cc, 0, 0x00000000, 0x00000001 },
3770 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3772 { 0x1800, 0, 0x00000000, 0x00000001 },
3773 { 0x1804, 0, 0x00000000, 0x00000003 },
3775 { 0x2800, 0, 0x00000000, 0x00000001 },
3776 { 0x2804, 0, 0x00000000, 0x00003f01 },
3777 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3778 { 0x2810, 0, 0xffff0000, 0x00000000 },
3779 { 0x2814, 0, 0xffff0000, 0x00000000 },
3780 { 0x2818, 0, 0xffff0000, 0x00000000 },
3781 { 0x281c, 0, 0xffff0000, 0x00000000 },
3782 { 0x2834, 0, 0xffffffff, 0x00000000 },
3783 { 0x2840, 0, 0x00000000, 0xffffffff },
3784 { 0x2844, 0, 0x00000000, 0xffffffff },
3785 { 0x2848, 0, 0xffffffff, 0x00000000 },
3786 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3788 { 0x2c00, 0, 0x00000000, 0x00000011 },
3789 { 0x2c04, 0, 0x00000000, 0x00030007 },
3791 { 0x3c00, 0, 0x00000000, 0x00000001 },
3792 { 0x3c04, 0, 0x00000000, 0x00070000 },
3793 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3794 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3795 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3796 { 0x3c14, 0, 0x00000000, 0xffffffff },
3797 { 0x3c18, 0, 0x00000000, 0xffffffff },
3798 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3799 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3801 { 0x5004, 0, 0x00000000, 0x0000007f },
3802 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3803 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3805 { 0x5c00, 0, 0x00000000, 0x00000001 },
3806 { 0x5c04, 0, 0x00000000, 0x0003000f },
3807 { 0x5c08, 0, 0x00000003, 0x00000000 },
3808 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3809 { 0x5c10, 0, 0x00000000, 0xffffffff },
3810 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3811 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3812 { 0x5c88, 0, 0x00000000, 0x00077373 },
3813 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3815 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3816 { 0x680c, 0, 0xffffffff, 0x00000000 },
3817 { 0x6810, 0, 0xffffffff, 0x00000000 },
3818 { 0x6814, 0, 0xffffffff, 0x00000000 },
3819 { 0x6818, 0, 0xffffffff, 0x00000000 },
3820 { 0x681c, 0, 0xffffffff, 0x00000000 },
3821 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3822 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3823 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3824 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3825 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3826 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3827 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3828 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3829 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3830 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3831 { 0x684c, 0, 0xffffffff, 0x00000000 },
3832 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3833 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3834 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3835 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3836 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3837 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3839 { 0xffff, 0, 0x00000000, 0x00000000 },
3843 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3844 u32 offset, rw_mask, ro_mask, save_val, val;
3846 offset = (u32) reg_tbl[i].offset;
3847 rw_mask = reg_tbl[i].rw_mask;
3848 ro_mask = reg_tbl[i].ro_mask;
3850 save_val = readl(bp->regview + offset);
3852 writel(0, bp->regview + offset);
3854 val = readl(bp->regview + offset);
3855 if ((val & rw_mask) != 0) {
3859 if ((val & ro_mask) != (save_val & ro_mask)) {
3863 writel(0xffffffff, bp->regview + offset);
3865 val = readl(bp->regview + offset);
3866 if ((val & rw_mask) != rw_mask) {
3870 if ((val & ro_mask) != (save_val & ro_mask)) {
3874 writel(save_val, bp->regview + offset);
3878 writel(save_val, bp->regview + offset);
3886 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3888 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3889 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3892 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3895 for (offset = 0; offset < size; offset += 4) {
3897 REG_WR_IND(bp, start + offset, test_pattern[i]);
3899 if (REG_RD_IND(bp, start + offset) !=
3909 bnx2_test_memory(struct bnx2 *bp)
3913 static const struct {
3917 { 0x60000, 0x4000 },
3918 { 0xa0000, 0x3000 },
3919 { 0xe0000, 0x4000 },
3920 { 0x120000, 0x4000 },
3921 { 0x1a0000, 0x4000 },
3922 { 0x160000, 0x4000 },
3926 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3927 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3928 mem_tbl[i].len)) != 0) {
3936 #define BNX2_MAC_LOOPBACK 0
3937 #define BNX2_PHY_LOOPBACK 1
3940 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3942 unsigned int pkt_size, num_pkts, i;
3943 struct sk_buff *skb, *rx_skb;
3944 unsigned char *packet;
3945 u16 rx_start_idx, rx_idx;
3948 struct sw_bd *rx_buf;
3949 struct l2_fhdr *rx_hdr;
3952 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3953 bp->loopback = MAC_LOOPBACK;
3954 bnx2_set_mac_loopback(bp);
3956 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3958 bnx2_set_phy_loopback(bp);
3964 skb = dev_alloc_skb(pkt_size);
3967 packet = skb_put(skb, pkt_size);
3968 memcpy(packet, bp->mac_addr, 6);
3969 memset(packet + 6, 0x0, 8);
3970 for (i = 14; i < pkt_size; i++)
3971 packet[i] = (unsigned char) (i & 0xff);
3973 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3976 REG_WR(bp, BNX2_HC_COMMAND,
3977 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3979 REG_RD(bp, BNX2_HC_COMMAND);
3982 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3986 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3988 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3989 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3990 txbd->tx_bd_mss_nbytes = pkt_size;
3991 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3994 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3995 bp->tx_prod_bseq += pkt_size;
3997 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3998 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4002 REG_WR(bp, BNX2_HC_COMMAND,
4003 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4005 REG_RD(bp, BNX2_HC_COMMAND);
4009 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4012 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4013 goto loopback_test_done;
4016 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4017 if (rx_idx != rx_start_idx + num_pkts) {
4018 goto loopback_test_done;
4021 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4022 rx_skb = rx_buf->skb;
4024 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4025 skb_reserve(rx_skb, bp->rx_offset);
4027 pci_dma_sync_single_for_cpu(bp->pdev,
4028 pci_unmap_addr(rx_buf, mapping),
4029 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4031 if (rx_hdr->l2_fhdr_status &
4032 (L2_FHDR_ERRORS_BAD_CRC |
4033 L2_FHDR_ERRORS_PHY_DECODE |
4034 L2_FHDR_ERRORS_ALIGNMENT |
4035 L2_FHDR_ERRORS_TOO_SHORT |
4036 L2_FHDR_ERRORS_GIANT_FRAME)) {
4038 goto loopback_test_done;
4041 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4042 goto loopback_test_done;
4045 for (i = 14; i < pkt_size; i++) {
4046 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4047 goto loopback_test_done;
4058 #define BNX2_MAC_LOOPBACK_FAILED 1
4059 #define BNX2_PHY_LOOPBACK_FAILED 2
4060 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4061 BNX2_PHY_LOOPBACK_FAILED)
4064 bnx2_test_loopback(struct bnx2 *bp)
4068 if (!netif_running(bp->dev))
4069 return BNX2_LOOPBACK_FAILED;
4071 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4072 spin_lock_bh(&bp->phy_lock);
4074 spin_unlock_bh(&bp->phy_lock);
4075 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4076 rc |= BNX2_MAC_LOOPBACK_FAILED;
4077 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4078 rc |= BNX2_PHY_LOOPBACK_FAILED;
4082 #define NVRAM_SIZE 0x200
4083 #define CRC32_RESIDUAL 0xdebb20e3
4086 bnx2_test_nvram(struct bnx2 *bp)
4088 u32 buf[NVRAM_SIZE / 4];
4089 u8 *data = (u8 *) buf;
4093 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4094 goto test_nvram_done;
4096 magic = be32_to_cpu(buf[0]);
4097 if (magic != 0x669955aa) {
4099 goto test_nvram_done;
4102 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4103 goto test_nvram_done;
4105 csum = ether_crc_le(0x100, data);
4106 if (csum != CRC32_RESIDUAL) {
4108 goto test_nvram_done;
4111 csum = ether_crc_le(0x100, data + 0x100);
4112 if (csum != CRC32_RESIDUAL) {
4121 bnx2_test_link(struct bnx2 *bp)
4125 spin_lock_bh(&bp->phy_lock);
4126 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4127 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4128 spin_unlock_bh(&bp->phy_lock);
4130 if (bmsr & BMSR_LSTATUS) {
4137 bnx2_test_intr(struct bnx2 *bp)
4142 if (!netif_running(bp->dev))
4145 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4147 /* This register is not touched during run-time. */
4148 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4149 REG_RD(bp, BNX2_HC_COMMAND);
4151 for (i = 0; i < 10; i++) {
4152 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4158 msleep_interruptible(10);
4167 bnx2_timer(unsigned long data)
4169 struct bnx2 *bp = (struct bnx2 *) data;
4172 if (!netif_running(bp->dev))
4175 if (atomic_read(&bp->intr_sem) != 0)
4176 goto bnx2_restart_timer;
4178 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4179 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4181 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4183 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4184 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4186 spin_lock(&bp->phy_lock);
4187 if (bp->serdes_an_pending) {
4188 bp->serdes_an_pending--;
4190 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4193 bp->current_interval = bp->timer_interval;
4195 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4197 if (bmcr & BMCR_ANENABLE) {
4200 bnx2_write_phy(bp, 0x1c, 0x7c00);
4201 bnx2_read_phy(bp, 0x1c, &phy1);
4203 bnx2_write_phy(bp, 0x17, 0x0f01);
4204 bnx2_read_phy(bp, 0x15, &phy2);
4205 bnx2_write_phy(bp, 0x17, 0x0f01);
4206 bnx2_read_phy(bp, 0x15, &phy2);
4208 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4209 !(phy2 & 0x20)) { /* no CONFIG */
4211 bmcr &= ~BMCR_ANENABLE;
4212 bmcr |= BMCR_SPEED1000 |
4214 bnx2_write_phy(bp, MII_BMCR, bmcr);
4216 PHY_PARALLEL_DETECT_FLAG;
4220 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4221 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4224 bnx2_write_phy(bp, 0x17, 0x0f01);
4225 bnx2_read_phy(bp, 0x15, &phy2);
4229 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4230 bmcr |= BMCR_ANENABLE;
4231 bnx2_write_phy(bp, MII_BMCR, bmcr);
4233 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4238 bp->current_interval = bp->timer_interval;
4240 spin_unlock(&bp->phy_lock);
4244 mod_timer(&bp->timer, jiffies + bp->current_interval);
4247 /* Called with rtnl_lock */
4249 bnx2_open(struct net_device *dev)
4251 struct bnx2 *bp = netdev_priv(dev);
4254 bnx2_set_power_state(bp, PCI_D0);
4255 bnx2_disable_int(bp);
4257 rc = bnx2_alloc_mem(bp);
4261 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4262 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4265 if (pci_enable_msi(bp->pdev) == 0) {
4266 bp->flags |= USING_MSI_FLAG;
4267 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4271 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4272 IRQF_SHARED, dev->name, dev);
4276 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4284 rc = bnx2_init_nic(bp);
4287 free_irq(bp->pdev->irq, dev);
4288 if (bp->flags & USING_MSI_FLAG) {
4289 pci_disable_msi(bp->pdev);
4290 bp->flags &= ~USING_MSI_FLAG;
4297 mod_timer(&bp->timer, jiffies + bp->current_interval);
4299 atomic_set(&bp->intr_sem, 0);
4301 bnx2_enable_int(bp);
4303 if (bp->flags & USING_MSI_FLAG) {
4304 /* Test MSI to make sure it is working
4305 * If MSI test fails, go back to INTx mode
4307 if (bnx2_test_intr(bp) != 0) {
4308 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4309 " using MSI, switching to INTx mode. Please"
4310 " report this failure to the PCI maintainer"
4311 " and include system chipset information.\n",
4314 bnx2_disable_int(bp);
4315 free_irq(bp->pdev->irq, dev);
4316 pci_disable_msi(bp->pdev);
4317 bp->flags &= ~USING_MSI_FLAG;
4319 rc = bnx2_init_nic(bp);
4322 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4323 IRQF_SHARED, dev->name, dev);
4328 del_timer_sync(&bp->timer);
4331 bnx2_enable_int(bp);
4334 if (bp->flags & USING_MSI_FLAG) {
4335 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4338 netif_start_queue(dev);
4344 bnx2_reset_task(void *data)
4346 struct bnx2 *bp = data;
4348 if (!netif_running(bp->dev))
4351 bp->in_reset_task = 1;
4352 bnx2_netif_stop(bp);
4356 atomic_set(&bp->intr_sem, 1);
4357 bnx2_netif_start(bp);
4358 bp->in_reset_task = 0;
4362 bnx2_tx_timeout(struct net_device *dev)
4364 struct bnx2 *bp = netdev_priv(dev);
4366 /* This allows the netif to be shutdown gracefully before resetting */
4367 schedule_work(&bp->reset_task);
4371 /* Called with rtnl_lock */
4373 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4375 struct bnx2 *bp = netdev_priv(dev);
4377 bnx2_netif_stop(bp);
4380 bnx2_set_rx_mode(dev);
4382 bnx2_netif_start(bp);
4385 /* Called with rtnl_lock */
4387 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4389 struct bnx2 *bp = netdev_priv(dev);
4391 bnx2_netif_stop(bp);
4394 bp->vlgrp->vlan_devices[vid] = NULL;
4395 bnx2_set_rx_mode(dev);
4397 bnx2_netif_start(bp);
4401 /* Called with netif_tx_lock.
4402 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4403 * netif_wake_queue().
4406 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4408 struct bnx2 *bp = netdev_priv(dev);
4411 struct sw_bd *tx_buf;
4412 u32 len, vlan_tag_flags, last_frag, mss;
4413 u16 prod, ring_prod;
4416 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4417 netif_stop_queue(dev);
4418 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4421 return NETDEV_TX_BUSY;
4423 len = skb_headlen(skb);
4425 ring_prod = TX_RING_IDX(prod);
4428 if (skb->ip_summed == CHECKSUM_HW) {
4429 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4432 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4434 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4437 if ((mss = skb_shinfo(skb)->gso_size) &&
4438 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4439 u32 tcp_opt_len, ip_tcp_len;
4441 if (skb_header_cloned(skb) &&
4442 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4444 return NETDEV_TX_OK;
4447 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4448 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4451 if (skb->h.th->doff > 5) {
4452 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4454 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4456 skb->nh.iph->check = 0;
4457 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4459 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4463 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4464 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4465 (tcp_opt_len >> 2)) << 8;
4474 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4476 tx_buf = &bp->tx_buf_ring[ring_prod];
4478 pci_unmap_addr_set(tx_buf, mapping, mapping);
4480 txbd = &bp->tx_desc_ring[ring_prod];
4482 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4483 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4484 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4485 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4487 last_frag = skb_shinfo(skb)->nr_frags;
4489 for (i = 0; i < last_frag; i++) {
4490 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4492 prod = NEXT_TX_BD(prod);
4493 ring_prod = TX_RING_IDX(prod);
4494 txbd = &bp->tx_desc_ring[ring_prod];
4497 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4498 len, PCI_DMA_TODEVICE);
4499 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4502 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4503 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4504 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4505 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4508 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4510 prod = NEXT_TX_BD(prod);
4511 bp->tx_prod_bseq += skb->len;
4513 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4514 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4519 dev->trans_start = jiffies;
4521 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4522 netif_stop_queue(dev);
4523 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4524 netif_wake_queue(dev);
4527 return NETDEV_TX_OK;
4530 /* Called with rtnl_lock */
4532 bnx2_close(struct net_device *dev)
4534 struct bnx2 *bp = netdev_priv(dev);
4537 /* Calling flush_scheduled_work() may deadlock because
4538 * linkwatch_event() may be on the workqueue and it will try to get
4539 * the rtnl_lock which we are holding.
4541 while (bp->in_reset_task)
4544 bnx2_netif_stop(bp);
4545 del_timer_sync(&bp->timer);
4546 if (bp->flags & NO_WOL_FLAG)
4547 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4549 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4551 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4552 bnx2_reset_chip(bp, reset_code);
4553 free_irq(bp->pdev->irq, dev);
4554 if (bp->flags & USING_MSI_FLAG) {
4555 pci_disable_msi(bp->pdev);
4556 bp->flags &= ~USING_MSI_FLAG;
4561 netif_carrier_off(bp->dev);
4562 bnx2_set_power_state(bp, PCI_D3hot);
4566 #define GET_NET_STATS64(ctr) \
4567 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4568 (unsigned long) (ctr##_lo)
4570 #define GET_NET_STATS32(ctr) \
4573 #if (BITS_PER_LONG == 64)
4574 #define GET_NET_STATS GET_NET_STATS64
4576 #define GET_NET_STATS GET_NET_STATS32
4579 static struct net_device_stats *
4580 bnx2_get_stats(struct net_device *dev)
4582 struct bnx2 *bp = netdev_priv(dev);
4583 struct statistics_block *stats_blk = bp->stats_blk;
4584 struct net_device_stats *net_stats = &bp->net_stats;
4586 if (bp->stats_blk == NULL) {
4589 net_stats->rx_packets =
4590 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4591 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4592 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4594 net_stats->tx_packets =
4595 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4596 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4597 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4599 net_stats->rx_bytes =
4600 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4602 net_stats->tx_bytes =
4603 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4605 net_stats->multicast =
4606 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4608 net_stats->collisions =
4609 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4611 net_stats->rx_length_errors =
4612 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4613 stats_blk->stat_EtherStatsOverrsizePkts);
4615 net_stats->rx_over_errors =
4616 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4618 net_stats->rx_frame_errors =
4619 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4621 net_stats->rx_crc_errors =
4622 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4624 net_stats->rx_errors = net_stats->rx_length_errors +
4625 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4626 net_stats->rx_crc_errors;
4628 net_stats->tx_aborted_errors =
4629 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4630 stats_blk->stat_Dot3StatsLateCollisions);
4632 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4633 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4634 net_stats->tx_carrier_errors = 0;
4636 net_stats->tx_carrier_errors =
4638 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4641 net_stats->tx_errors =
4643 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4645 net_stats->tx_aborted_errors +
4646 net_stats->tx_carrier_errors;
4648 net_stats->rx_missed_errors =
4649 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4650 stats_blk->stat_FwRxDrop);
4655 /* All ethtool functions called with rtnl_lock */
4658 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4660 struct bnx2 *bp = netdev_priv(dev);
4662 cmd->supported = SUPPORTED_Autoneg;
4663 if (bp->phy_flags & PHY_SERDES_FLAG) {
4664 cmd->supported |= SUPPORTED_1000baseT_Full |
4667 cmd->port = PORT_FIBRE;
4670 cmd->supported |= SUPPORTED_10baseT_Half |
4671 SUPPORTED_10baseT_Full |
4672 SUPPORTED_100baseT_Half |
4673 SUPPORTED_100baseT_Full |
4674 SUPPORTED_1000baseT_Full |
4677 cmd->port = PORT_TP;
4680 cmd->advertising = bp->advertising;
4682 if (bp->autoneg & AUTONEG_SPEED) {
4683 cmd->autoneg = AUTONEG_ENABLE;
4686 cmd->autoneg = AUTONEG_DISABLE;
4689 if (netif_carrier_ok(dev)) {
4690 cmd->speed = bp->line_speed;
4691 cmd->duplex = bp->duplex;
4698 cmd->transceiver = XCVR_INTERNAL;
4699 cmd->phy_address = bp->phy_addr;
4705 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4707 struct bnx2 *bp = netdev_priv(dev);
4708 u8 autoneg = bp->autoneg;
4709 u8 req_duplex = bp->req_duplex;
4710 u16 req_line_speed = bp->req_line_speed;
4711 u32 advertising = bp->advertising;
4713 if (cmd->autoneg == AUTONEG_ENABLE) {
4714 autoneg |= AUTONEG_SPEED;
4716 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4718 /* allow advertising 1 speed */
4719 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4720 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4721 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4722 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4724 if (bp->phy_flags & PHY_SERDES_FLAG)
4727 advertising = cmd->advertising;
4730 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4731 advertising = cmd->advertising;
4733 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4737 if (bp->phy_flags & PHY_SERDES_FLAG) {
4738 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4741 advertising = ETHTOOL_ALL_COPPER_SPEED;
4744 advertising |= ADVERTISED_Autoneg;
4747 if (bp->phy_flags & PHY_SERDES_FLAG) {
4748 if ((cmd->speed != SPEED_1000) ||
4749 (cmd->duplex != DUPLEX_FULL)) {
4753 else if (cmd->speed == SPEED_1000) {
4756 autoneg &= ~AUTONEG_SPEED;
4757 req_line_speed = cmd->speed;
4758 req_duplex = cmd->duplex;
4762 bp->autoneg = autoneg;
4763 bp->advertising = advertising;
4764 bp->req_line_speed = req_line_speed;
4765 bp->req_duplex = req_duplex;
4767 spin_lock_bh(&bp->phy_lock);
4771 spin_unlock_bh(&bp->phy_lock);
4777 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4779 struct bnx2 *bp = netdev_priv(dev);
4781 strcpy(info->driver, DRV_MODULE_NAME);
4782 strcpy(info->version, DRV_MODULE_VERSION);
4783 strcpy(info->bus_info, pci_name(bp->pdev));
4784 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4785 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4786 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4787 info->fw_version[1] = info->fw_version[3] = '.';
4788 info->fw_version[5] = 0;
4791 #define BNX2_REGDUMP_LEN (32 * 1024)
4794 bnx2_get_regs_len(struct net_device *dev)
4796 return BNX2_REGDUMP_LEN;
4800 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4802 u32 *p = _p, i, offset;
4804 struct bnx2 *bp = netdev_priv(dev);
4805 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4806 0x0800, 0x0880, 0x0c00, 0x0c10,
4807 0x0c30, 0x0d08, 0x1000, 0x101c,
4808 0x1040, 0x1048, 0x1080, 0x10a4,
4809 0x1400, 0x1490, 0x1498, 0x14f0,
4810 0x1500, 0x155c, 0x1580, 0x15dc,
4811 0x1600, 0x1658, 0x1680, 0x16d8,
4812 0x1800, 0x1820, 0x1840, 0x1854,
4813 0x1880, 0x1894, 0x1900, 0x1984,
4814 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4815 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4816 0x2000, 0x2030, 0x23c0, 0x2400,
4817 0x2800, 0x2820, 0x2830, 0x2850,
4818 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4819 0x3c00, 0x3c94, 0x4000, 0x4010,
4820 0x4080, 0x4090, 0x43c0, 0x4458,
4821 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4822 0x4fc0, 0x5010, 0x53c0, 0x5444,
4823 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4824 0x5fc0, 0x6000, 0x6400, 0x6428,
4825 0x6800, 0x6848, 0x684c, 0x6860,
4826 0x6888, 0x6910, 0x8000 };
4830 memset(p, 0, BNX2_REGDUMP_LEN);
4832 if (!netif_running(bp->dev))
4836 offset = reg_boundaries[0];
4838 while (offset < BNX2_REGDUMP_LEN) {
4839 *p++ = REG_RD(bp, offset);
4841 if (offset == reg_boundaries[i + 1]) {
4842 offset = reg_boundaries[i + 2];
4843 p = (u32 *) (orig_p + offset);
4850 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4852 struct bnx2 *bp = netdev_priv(dev);
4854 if (bp->flags & NO_WOL_FLAG) {
4859 wol->supported = WAKE_MAGIC;
4861 wol->wolopts = WAKE_MAGIC;
4865 memset(&wol->sopass, 0, sizeof(wol->sopass));
4869 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4871 struct bnx2 *bp = netdev_priv(dev);
4873 if (wol->wolopts & ~WAKE_MAGIC)
4876 if (wol->wolopts & WAKE_MAGIC) {
4877 if (bp->flags & NO_WOL_FLAG)
4889 bnx2_nway_reset(struct net_device *dev)
4891 struct bnx2 *bp = netdev_priv(dev);
4894 if (!(bp->autoneg & AUTONEG_SPEED)) {
4898 spin_lock_bh(&bp->phy_lock);
4900 /* Force a link down visible on the other side */
4901 if (bp->phy_flags & PHY_SERDES_FLAG) {
4902 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4903 spin_unlock_bh(&bp->phy_lock);
4907 spin_lock_bh(&bp->phy_lock);
4908 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4909 bp->current_interval = SERDES_AN_TIMEOUT;
4910 bp->serdes_an_pending = 1;
4911 mod_timer(&bp->timer, jiffies + bp->current_interval);
4915 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4916 bmcr &= ~BMCR_LOOPBACK;
4917 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4919 spin_unlock_bh(&bp->phy_lock);
4925 bnx2_get_eeprom_len(struct net_device *dev)
4927 struct bnx2 *bp = netdev_priv(dev);
4929 if (bp->flash_info == NULL)
4932 return (int) bp->flash_size;
4936 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4939 struct bnx2 *bp = netdev_priv(dev);
4942 /* parameters already validated in ethtool_get_eeprom */
4944 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4950 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4953 struct bnx2 *bp = netdev_priv(dev);
4956 /* parameters already validated in ethtool_set_eeprom */
4958 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4964 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4966 struct bnx2 *bp = netdev_priv(dev);
4968 memset(coal, 0, sizeof(struct ethtool_coalesce));
4970 coal->rx_coalesce_usecs = bp->rx_ticks;
4971 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4972 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4973 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4975 coal->tx_coalesce_usecs = bp->tx_ticks;
4976 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4977 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4978 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4980 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4986 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4988 struct bnx2 *bp = netdev_priv(dev);
4990 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4991 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4993 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4994 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4996 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4997 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4999 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5000 if (bp->rx_quick_cons_trip_int > 0xff)
5001 bp->rx_quick_cons_trip_int = 0xff;
5003 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5004 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5006 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5007 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5009 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5010 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5012 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5013 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5016 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5017 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5018 bp->stats_ticks &= 0xffff00;
5020 if (netif_running(bp->dev)) {
5021 bnx2_netif_stop(bp);
5023 bnx2_netif_start(bp);
5030 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5032 struct bnx2 *bp = netdev_priv(dev);
5034 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5035 ering->rx_mini_max_pending = 0;
5036 ering->rx_jumbo_max_pending = 0;
5038 ering->rx_pending = bp->rx_ring_size;
5039 ering->rx_mini_pending = 0;
5040 ering->rx_jumbo_pending = 0;
5042 ering->tx_max_pending = MAX_TX_DESC_CNT;
5043 ering->tx_pending = bp->tx_ring_size;
5047 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5049 struct bnx2 *bp = netdev_priv(dev);
5051 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5052 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5053 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5057 if (netif_running(bp->dev)) {
5058 bnx2_netif_stop(bp);
5059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5064 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5065 bp->tx_ring_size = ering->tx_pending;
5067 if (netif_running(bp->dev)) {
5070 rc = bnx2_alloc_mem(bp);
5074 bnx2_netif_start(bp);
5081 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5083 struct bnx2 *bp = netdev_priv(dev);
5085 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5086 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5087 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5091 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5093 struct bnx2 *bp = netdev_priv(dev);
5095 bp->req_flow_ctrl = 0;
5096 if (epause->rx_pause)
5097 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5098 if (epause->tx_pause)
5099 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5101 if (epause->autoneg) {
5102 bp->autoneg |= AUTONEG_FLOW_CTRL;
5105 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5108 spin_lock_bh(&bp->phy_lock);
5112 spin_unlock_bh(&bp->phy_lock);
5118 bnx2_get_rx_csum(struct net_device *dev)
5120 struct bnx2 *bp = netdev_priv(dev);
5126 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5128 struct bnx2 *bp = netdev_priv(dev);
5135 bnx2_set_tso(struct net_device *dev, u32 data)
5138 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5140 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5144 #define BNX2_NUM_STATS 46
5147 char string[ETH_GSTRING_LEN];
5148 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5150 { "rx_error_bytes" },
5152 { "tx_error_bytes" },
5153 { "rx_ucast_packets" },
5154 { "rx_mcast_packets" },
5155 { "rx_bcast_packets" },
5156 { "tx_ucast_packets" },
5157 { "tx_mcast_packets" },
5158 { "tx_bcast_packets" },
5159 { "tx_mac_errors" },
5160 { "tx_carrier_errors" },
5161 { "rx_crc_errors" },
5162 { "rx_align_errors" },
5163 { "tx_single_collisions" },
5164 { "tx_multi_collisions" },
5166 { "tx_excess_collisions" },
5167 { "tx_late_collisions" },
5168 { "tx_total_collisions" },
5171 { "rx_undersize_packets" },
5172 { "rx_oversize_packets" },
5173 { "rx_64_byte_packets" },
5174 { "rx_65_to_127_byte_packets" },
5175 { "rx_128_to_255_byte_packets" },
5176 { "rx_256_to_511_byte_packets" },
5177 { "rx_512_to_1023_byte_packets" },
5178 { "rx_1024_to_1522_byte_packets" },
5179 { "rx_1523_to_9022_byte_packets" },
5180 { "tx_64_byte_packets" },
5181 { "tx_65_to_127_byte_packets" },
5182 { "tx_128_to_255_byte_packets" },
5183 { "tx_256_to_511_byte_packets" },
5184 { "tx_512_to_1023_byte_packets" },
5185 { "tx_1024_to_1522_byte_packets" },
5186 { "tx_1523_to_9022_byte_packets" },
5187 { "rx_xon_frames" },
5188 { "rx_xoff_frames" },
5189 { "tx_xon_frames" },
5190 { "tx_xoff_frames" },
5191 { "rx_mac_ctrl_frames" },
5192 { "rx_filtered_packets" },
5194 { "rx_fw_discards" },
5197 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5199 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5200 STATS_OFFSET32(stat_IfHCInOctets_hi),
5201 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5202 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5203 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5204 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5205 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5206 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5207 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5208 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5209 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5210 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5211 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5212 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5213 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5214 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5215 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5216 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5217 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5218 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5219 STATS_OFFSET32(stat_EtherStatsCollisions),
5220 STATS_OFFSET32(stat_EtherStatsFragments),
5221 STATS_OFFSET32(stat_EtherStatsJabbers),
5222 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5223 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5224 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5225 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5226 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5227 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5228 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5229 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5230 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5231 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5232 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5233 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5234 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5235 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5236 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5237 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5238 STATS_OFFSET32(stat_XonPauseFramesReceived),
5239 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5240 STATS_OFFSET32(stat_OutXonSent),
5241 STATS_OFFSET32(stat_OutXoffSent),
5242 STATS_OFFSET32(stat_MacControlFramesReceived),
5243 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5244 STATS_OFFSET32(stat_IfInMBUFDiscards),
5245 STATS_OFFSET32(stat_FwRxDrop),
5248 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5249 * skipped because of errata.
5251 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5252 8,0,8,8,8,8,8,8,8,8,
5253 4,0,4,4,4,4,4,4,4,4,
5254 4,4,4,4,4,4,4,4,4,4,
5255 4,4,4,4,4,4,4,4,4,4,
5259 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5260 8,0,8,8,8,8,8,8,8,8,
5261 4,4,4,4,4,4,4,4,4,4,
5262 4,4,4,4,4,4,4,4,4,4,
5263 4,4,4,4,4,4,4,4,4,4,
5267 #define BNX2_NUM_TESTS 6
5270 char string[ETH_GSTRING_LEN];
5271 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5272 { "register_test (offline)" },
5273 { "memory_test (offline)" },
5274 { "loopback_test (offline)" },
5275 { "nvram_test (online)" },
5276 { "interrupt_test (online)" },
5277 { "link_test (online)" },
5281 bnx2_self_test_count(struct net_device *dev)
5283 return BNX2_NUM_TESTS;
5287 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5289 struct bnx2 *bp = netdev_priv(dev);
5291 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5292 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5293 bnx2_netif_stop(bp);
5294 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5297 if (bnx2_test_registers(bp) != 0) {
5299 etest->flags |= ETH_TEST_FL_FAILED;
5301 if (bnx2_test_memory(bp) != 0) {
5303 etest->flags |= ETH_TEST_FL_FAILED;
5305 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5306 etest->flags |= ETH_TEST_FL_FAILED;
5308 if (!netif_running(bp->dev)) {
5309 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5313 bnx2_netif_start(bp);
5316 /* wait for link up */
5317 msleep_interruptible(3000);
5318 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5319 msleep_interruptible(4000);
5322 if (bnx2_test_nvram(bp) != 0) {
5324 etest->flags |= ETH_TEST_FL_FAILED;
5326 if (bnx2_test_intr(bp) != 0) {
5328 etest->flags |= ETH_TEST_FL_FAILED;
5331 if (bnx2_test_link(bp) != 0) {
5333 etest->flags |= ETH_TEST_FL_FAILED;
5339 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5341 switch (stringset) {
5343 memcpy(buf, bnx2_stats_str_arr,
5344 sizeof(bnx2_stats_str_arr));
5347 memcpy(buf, bnx2_tests_str_arr,
5348 sizeof(bnx2_tests_str_arr));
5354 bnx2_get_stats_count(struct net_device *dev)
5356 return BNX2_NUM_STATS;
5360 bnx2_get_ethtool_stats(struct net_device *dev,
5361 struct ethtool_stats *stats, u64 *buf)
5363 struct bnx2 *bp = netdev_priv(dev);
5365 u32 *hw_stats = (u32 *) bp->stats_blk;
5366 u8 *stats_len_arr = NULL;
5368 if (hw_stats == NULL) {
5369 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5373 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5374 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5375 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5376 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5377 stats_len_arr = bnx2_5706_stats_len_arr;
5379 stats_len_arr = bnx2_5708_stats_len_arr;
5381 for (i = 0; i < BNX2_NUM_STATS; i++) {
5382 if (stats_len_arr[i] == 0) {
5383 /* skip this counter */
5387 if (stats_len_arr[i] == 4) {
5388 /* 4-byte counter */
5390 *(hw_stats + bnx2_stats_offset_arr[i]);
5393 /* 8-byte counter */
5394 buf[i] = (((u64) *(hw_stats +
5395 bnx2_stats_offset_arr[i])) << 32) +
5396 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5401 bnx2_phys_id(struct net_device *dev, u32 data)
5403 struct bnx2 *bp = netdev_priv(dev);
5410 save = REG_RD(bp, BNX2_MISC_CFG);
5411 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5413 for (i = 0; i < (data * 2); i++) {
5415 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5418 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5419 BNX2_EMAC_LED_1000MB_OVERRIDE |
5420 BNX2_EMAC_LED_100MB_OVERRIDE |
5421 BNX2_EMAC_LED_10MB_OVERRIDE |
5422 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5423 BNX2_EMAC_LED_TRAFFIC);
5425 msleep_interruptible(500);
5426 if (signal_pending(current))
5429 REG_WR(bp, BNX2_EMAC_LED, 0);
5430 REG_WR(bp, BNX2_MISC_CFG, save);
5434 static struct ethtool_ops bnx2_ethtool_ops = {
5435 .get_settings = bnx2_get_settings,
5436 .set_settings = bnx2_set_settings,
5437 .get_drvinfo = bnx2_get_drvinfo,
5438 .get_regs_len = bnx2_get_regs_len,
5439 .get_regs = bnx2_get_regs,
5440 .get_wol = bnx2_get_wol,
5441 .set_wol = bnx2_set_wol,
5442 .nway_reset = bnx2_nway_reset,
5443 .get_link = ethtool_op_get_link,
5444 .get_eeprom_len = bnx2_get_eeprom_len,
5445 .get_eeprom = bnx2_get_eeprom,
5446 .set_eeprom = bnx2_set_eeprom,
5447 .get_coalesce = bnx2_get_coalesce,
5448 .set_coalesce = bnx2_set_coalesce,
5449 .get_ringparam = bnx2_get_ringparam,
5450 .set_ringparam = bnx2_set_ringparam,
5451 .get_pauseparam = bnx2_get_pauseparam,
5452 .set_pauseparam = bnx2_set_pauseparam,
5453 .get_rx_csum = bnx2_get_rx_csum,
5454 .set_rx_csum = bnx2_set_rx_csum,
5455 .get_tx_csum = ethtool_op_get_tx_csum,
5456 .set_tx_csum = ethtool_op_set_tx_csum,
5457 .get_sg = ethtool_op_get_sg,
5458 .set_sg = ethtool_op_set_sg,
5460 .get_tso = ethtool_op_get_tso,
5461 .set_tso = bnx2_set_tso,
5463 .self_test_count = bnx2_self_test_count,
5464 .self_test = bnx2_self_test,
5465 .get_strings = bnx2_get_strings,
5466 .phys_id = bnx2_phys_id,
5467 .get_stats_count = bnx2_get_stats_count,
5468 .get_ethtool_stats = bnx2_get_ethtool_stats,
5469 .get_perm_addr = ethtool_op_get_perm_addr,
5472 /* Called with rtnl_lock */
5474 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5476 struct mii_ioctl_data *data = if_mii(ifr);
5477 struct bnx2 *bp = netdev_priv(dev);
5482 data->phy_id = bp->phy_addr;
5488 spin_lock_bh(&bp->phy_lock);
5489 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5490 spin_unlock_bh(&bp->phy_lock);
5492 data->val_out = mii_regval;
5498 if (!capable(CAP_NET_ADMIN))
5501 spin_lock_bh(&bp->phy_lock);
5502 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5503 spin_unlock_bh(&bp->phy_lock);
5514 /* Called with rtnl_lock */
5516 bnx2_change_mac_addr(struct net_device *dev, void *p)
5518 struct sockaddr *addr = p;
5519 struct bnx2 *bp = netdev_priv(dev);
5521 if (!is_valid_ether_addr(addr->sa_data))
5524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5525 if (netif_running(dev))
5526 bnx2_set_mac_addr(bp);
5531 /* Called with rtnl_lock */
5533 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5535 struct bnx2 *bp = netdev_priv(dev);
5537 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5538 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5542 if (netif_running(dev)) {
5543 bnx2_netif_stop(bp);
5547 bnx2_netif_start(bp);
5552 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5554 poll_bnx2(struct net_device *dev)
5556 struct bnx2 *bp = netdev_priv(dev);
5558 disable_irq(bp->pdev->irq);
5559 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5560 enable_irq(bp->pdev->irq);
5564 static int __devinit
5565 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5568 unsigned long mem_len;
5572 SET_MODULE_OWNER(dev);
5573 SET_NETDEV_DEV(dev, &pdev->dev);
5574 bp = netdev_priv(dev);
5579 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5580 rc = pci_enable_device(pdev);
5582 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5586 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5588 "Cannot find PCI device base address, aborting.\n");
5590 goto err_out_disable;
5593 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5595 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5596 goto err_out_disable;
5599 pci_set_master(pdev);
5601 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5602 if (bp->pm_cap == 0) {
5604 "Cannot find power management capability, aborting.\n");
5606 goto err_out_release;
5609 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5610 if (bp->pcix_cap == 0) {
5611 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5613 goto err_out_release;
5616 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5617 bp->flags |= USING_DAC_FLAG;
5618 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5620 "pci_set_consistent_dma_mask failed, aborting.\n");
5622 goto err_out_release;
5625 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5626 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5628 goto err_out_release;
5634 spin_lock_init(&bp->phy_lock);
5635 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5637 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5638 mem_len = MB_GET_CID_ADDR(17);
5639 dev->mem_end = dev->mem_start + mem_len;
5640 dev->irq = pdev->irq;
5642 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5645 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5647 goto err_out_release;
5650 /* Configure byte swap and enable write to the reg_window registers.
5651 * Rely on CPU to do target byte swapping on big endian systems
5652 * The chip's target access swapping will not swap all accesses
5654 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5655 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5656 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5658 bnx2_set_power_state(bp, PCI_D0);
5660 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5662 /* Get bus information. */
5663 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5664 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5667 bp->flags |= PCIX_FLAG;
5669 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5671 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5673 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5674 bp->bus_speed_mhz = 133;
5677 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5678 bp->bus_speed_mhz = 100;
5681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5682 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5683 bp->bus_speed_mhz = 66;
5686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5687 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5688 bp->bus_speed_mhz = 50;
5691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5692 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5693 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5694 bp->bus_speed_mhz = 33;
5699 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5700 bp->bus_speed_mhz = 66;
5702 bp->bus_speed_mhz = 33;
5705 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5706 bp->flags |= PCI_32BIT_FLAG;
5708 /* 5706A0 may falsely detect SERR and PERR. */
5709 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5710 reg = REG_RD(bp, PCI_COMMAND);
5711 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5712 REG_WR(bp, PCI_COMMAND, reg);
5714 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5715 !(bp->flags & PCIX_FLAG)) {
5718 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5722 bnx2_init_nvram(bp);
5724 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5726 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5727 BNX2_SHM_HDR_SIGNATURE_SIG)
5728 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5730 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5732 /* Get the permanent MAC address. First we need to make sure the
5733 * firmware is actually running.
5735 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5737 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5738 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5739 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5744 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5746 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5747 bp->mac_addr[0] = (u8) (reg >> 8);
5748 bp->mac_addr[1] = (u8) reg;
5750 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5751 bp->mac_addr[2] = (u8) (reg >> 24);
5752 bp->mac_addr[3] = (u8) (reg >> 16);
5753 bp->mac_addr[4] = (u8) (reg >> 8);
5754 bp->mac_addr[5] = (u8) reg;
5756 bp->tx_ring_size = MAX_TX_DESC_CNT;
5757 bnx2_set_rx_ring_size(bp, 100);
5761 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5763 bp->tx_quick_cons_trip_int = 20;
5764 bp->tx_quick_cons_trip = 20;
5765 bp->tx_ticks_int = 80;
5768 bp->rx_quick_cons_trip_int = 6;
5769 bp->rx_quick_cons_trip = 6;
5770 bp->rx_ticks_int = 18;
5773 bp->stats_ticks = 1000000 & 0xffff00;
5775 bp->timer_interval = HZ;
5776 bp->current_interval = HZ;
5780 /* Disable WOL support if we are running on a SERDES chip. */
5781 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5782 bp->phy_flags |= PHY_SERDES_FLAG;
5783 bp->flags |= NO_WOL_FLAG;
5784 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5786 reg = REG_RD_IND(bp, bp->shmem_base +
5787 BNX2_SHARED_HW_CFG_CONFIG);
5788 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5789 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5793 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5794 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5795 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5796 bp->flags |= NO_WOL_FLAG;
5798 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5799 bp->tx_quick_cons_trip_int =
5800 bp->tx_quick_cons_trip;
5801 bp->tx_ticks_int = bp->tx_ticks;
5802 bp->rx_quick_cons_trip_int =
5803 bp->rx_quick_cons_trip;
5804 bp->rx_ticks_int = bp->rx_ticks;
5805 bp->comp_prod_trip_int = bp->comp_prod_trip;
5806 bp->com_ticks_int = bp->com_ticks;
5807 bp->cmd_ticks_int = bp->cmd_ticks;
5810 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5811 bp->req_line_speed = 0;
5812 if (bp->phy_flags & PHY_SERDES_FLAG) {
5813 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5815 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5816 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5817 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5819 bp->req_line_speed = bp->line_speed = SPEED_1000;
5820 bp->req_duplex = DUPLEX_FULL;
5824 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5827 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5829 init_timer(&bp->timer);
5830 bp->timer.expires = RUN_AT(bp->timer_interval);
5831 bp->timer.data = (unsigned long) bp;
5832 bp->timer.function = bnx2_timer;
5838 iounmap(bp->regview);
5843 pci_release_regions(pdev);
5846 pci_disable_device(pdev);
5847 pci_set_drvdata(pdev, NULL);
5853 static int __devinit
5854 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5856 static int version_printed = 0;
5857 struct net_device *dev = NULL;
5861 if (version_printed++ == 0)
5862 printk(KERN_INFO "%s", version);
5864 /* dev zeroed in init_etherdev */
5865 dev = alloc_etherdev(sizeof(*bp));
5870 rc = bnx2_init_board(pdev, dev);
5876 dev->open = bnx2_open;
5877 dev->hard_start_xmit = bnx2_start_xmit;
5878 dev->stop = bnx2_close;
5879 dev->get_stats = bnx2_get_stats;
5880 dev->set_multicast_list = bnx2_set_rx_mode;
5881 dev->do_ioctl = bnx2_ioctl;
5882 dev->set_mac_address = bnx2_change_mac_addr;
5883 dev->change_mtu = bnx2_change_mtu;
5884 dev->tx_timeout = bnx2_tx_timeout;
5885 dev->watchdog_timeo = TX_TIMEOUT;
5887 dev->vlan_rx_register = bnx2_vlan_rx_register;
5888 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5890 dev->poll = bnx2_poll;
5891 dev->ethtool_ops = &bnx2_ethtool_ops;
5894 bp = netdev_priv(dev);
5896 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5897 dev->poll_controller = poll_bnx2;
5900 if ((rc = register_netdev(dev))) {
5901 dev_err(&pdev->dev, "Cannot register net device\n");
5903 iounmap(bp->regview);
5904 pci_release_regions(pdev);
5905 pci_disable_device(pdev);
5906 pci_set_drvdata(pdev, NULL);
5911 pci_set_drvdata(pdev, dev);
5913 memcpy(dev->dev_addr, bp->mac_addr, 6);
5914 memcpy(dev->perm_addr, bp->mac_addr, 6);
5915 bp->name = board_info[ent->driver_data].name,
5916 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5920 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5921 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5922 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5923 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5928 printk("node addr ");
5929 for (i = 0; i < 6; i++)
5930 printk("%2.2x", dev->dev_addr[i]);
5933 dev->features |= NETIF_F_SG;
5934 if (bp->flags & USING_DAC_FLAG)
5935 dev->features |= NETIF_F_HIGHDMA;
5936 dev->features |= NETIF_F_IP_CSUM;
5938 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5941 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5944 netif_carrier_off(bp->dev);
5949 static void __devexit
5950 bnx2_remove_one(struct pci_dev *pdev)
5952 struct net_device *dev = pci_get_drvdata(pdev);
5953 struct bnx2 *bp = netdev_priv(dev);
5955 flush_scheduled_work();
5957 unregister_netdev(dev);
5960 iounmap(bp->regview);
5963 pci_release_regions(pdev);
5964 pci_disable_device(pdev);
5965 pci_set_drvdata(pdev, NULL);
5969 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5971 struct net_device *dev = pci_get_drvdata(pdev);
5972 struct bnx2 *bp = netdev_priv(dev);
5975 if (!netif_running(dev))
5978 flush_scheduled_work();
5979 bnx2_netif_stop(bp);
5980 netif_device_detach(dev);
5981 del_timer_sync(&bp->timer);
5982 if (bp->flags & NO_WOL_FLAG)
5983 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5985 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5987 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5988 bnx2_reset_chip(bp, reset_code);
5990 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5995 bnx2_resume(struct pci_dev *pdev)
5997 struct net_device *dev = pci_get_drvdata(pdev);
5998 struct bnx2 *bp = netdev_priv(dev);
6000 if (!netif_running(dev))
6003 bnx2_set_power_state(bp, PCI_D0);
6004 netif_device_attach(dev);
6006 bnx2_netif_start(bp);
6010 static struct pci_driver bnx2_pci_driver = {
6011 .name = DRV_MODULE_NAME,
6012 .id_table = bnx2_pci_tbl,
6013 .probe = bnx2_init_one,
6014 .remove = __devexit_p(bnx2_remove_one),
6015 .suspend = bnx2_suspend,
6016 .resume = bnx2_resume,
6019 static int __init bnx2_init(void)
6021 return pci_module_init(&bnx2_pci_driver);
6024 static void __exit bnx2_cleanup(void)
6026 pci_unregister_driver(&bnx2_pci_driver);
6029 module_init(bnx2_init);
6030 module_exit(bnx2_cleanup);