1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8"
58 #define DRV_MODULE_RELDATE "April 24, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
123 static struct flash_spec flash_table[] =
126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
227 return (bp->tx_ring_size - diff);
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
288 for (i = 0; i < 50; i++) {
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
345 for (i = 0; i < 50; i++) {
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 bnx2_disable_int(struct bnx2 *bp)
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382 bnx2_enable_int(struct bnx2 *bp)
384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
395 bnx2_disable_int_sync(struct bnx2 *bp)
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
403 bnx2_netif_stop(struct bnx2 *bp)
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
414 bnx2_netif_start(struct bnx2 *bp)
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
426 bnx2_free_mem(struct bnx2 *bp)
430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
438 if (bp->status_blk) {
439 pci_free_consistent(bp->pdev, bp->status_stats_size,
440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
442 bp->stats_blk = NULL;
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
460 vfree(bp->rx_buf_ring);
461 bp->rx_buf_ring = NULL;
465 bnx2_alloc_mem(struct bnx2 *bp)
467 int i, status_blk_size;
469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
471 if (bp->tx_buf_ring == NULL)
474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
483 if (bp->rx_buf_ring == NULL)
486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
509 memset(bp->status_blk, 0, bp->status_stats_size);
511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
536 bnx2_report_fw_link(struct bnx2 *bp)
538 u32 fw_link_status = 0;
543 switch (bp->line_speed) {
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
575 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
576 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592 bnx2_report_link(struct bnx2 *bp)
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
598 printk("%d Mbps ", bp->line_speed);
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
603 printk("half duplex");
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
612 printk(", transmit ");
614 printk("flow control ON");
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
623 bnx2_report_fw_link(bp);
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
629 u32 local_adv, remote_adv;
632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
641 if (bp->duplex != DUPLEX_FULL) {
645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
657 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
658 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
697 bp->flow_ctrl = FLOW_CTRL_TX;
703 bnx2_5708s_linkup(struct bnx2 *bp)
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
726 bp->duplex = DUPLEX_HALF;
732 bnx2_5706s_linkup(struct bnx2 *bp)
734 u32 bmcr, local_adv, remote_adv, common;
737 bp->line_speed = SPEED_1000;
739 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
744 bp->duplex = DUPLEX_HALF;
747 if (!(bmcr & BMCR_ANENABLE)) {
751 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
752 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
761 bp->duplex = DUPLEX_HALF;
769 bnx2_copper_linkup(struct bnx2 *bp)
773 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
790 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
791 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
821 bp->line_speed = SPEED_10;
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
827 bp->duplex = DUPLEX_HALF;
835 bnx2_set_mac_link(struct bnx2 *bp)
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850 BNX2_EMAC_MODE_25G_MODE);
853 switch (bp->line_speed) {
855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
861 val |= BNX2_EMAC_MODE_PORT_MII;
864 val |= BNX2_EMAC_MODE_25G_MODE;
867 val |= BNX2_EMAC_MODE_PORT_GMII;
872 val |= BNX2_EMAC_MODE_PORT_GMII;
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
902 bnx2_test_and_enable_2g5(struct bnx2 *bp)
907 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
910 if (bp->autoneg & AUTONEG_SPEED)
911 bp->advertising |= ADVERTISED_2500baseX_Full;
913 bnx2_read_phy(bp, bp->mii_up1, &up1);
914 if (!(up1 & BCM5708S_UP1_2G5)) {
915 up1 |= BCM5708S_UP1_2G5;
916 bnx2_write_phy(bp, bp->mii_up1, up1);
924 bnx2_test_and_disable_2g5(struct bnx2 *bp)
929 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
932 bnx2_read_phy(bp, bp->mii_up1, &up1);
933 if (up1 & BCM5708S_UP1_2G5) {
934 up1 &= ~BCM5708S_UP1_2G5;
935 bnx2_write_phy(bp, bp->mii_up1, up1);
943 bnx2_enable_forced_2g5(struct bnx2 *bp)
947 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
950 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
951 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
952 bmcr |= BCM5708S_BMCR_FORCE_2500;
955 if (bp->autoneg & AUTONEG_SPEED) {
956 bmcr &= ~BMCR_ANENABLE;
957 if (bp->req_duplex == DUPLEX_FULL)
958 bmcr |= BMCR_FULLDPLX;
960 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
964 bnx2_disable_forced_2g5(struct bnx2 *bp)
968 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
971 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
972 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
973 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
976 if (bp->autoneg & AUTONEG_SPEED)
977 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
978 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
982 bnx2_set_link(struct bnx2 *bp)
987 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
992 link_up = bp->link_up;
994 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
995 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
997 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
998 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1001 val = REG_RD(bp, BNX2_EMAC_STATUS);
1002 if (val & BNX2_EMAC_STATUS_LINK)
1003 bmsr |= BMSR_LSTATUS;
1005 bmsr &= ~BMSR_LSTATUS;
1008 if (bmsr & BMSR_LSTATUS) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1013 bnx2_5706s_linkup(bp);
1014 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1015 bnx2_5708s_linkup(bp);
1018 bnx2_copper_linkup(bp);
1020 bnx2_resolve_flow_ctrl(bp);
1023 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1024 (bp->autoneg & AUTONEG_SPEED))
1025 bnx2_disable_forced_2g5(bp);
1027 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1031 if (bp->link_up != link_up) {
1032 bnx2_report_link(bp);
1035 bnx2_set_mac_link(bp);
1041 bnx2_reset_phy(struct bnx2 *bp)
1046 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1048 #define PHY_RESET_MAX_WAIT 100
1049 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1052 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1053 if (!(reg & BMCR_RESET)) {
1058 if (i == PHY_RESET_MAX_WAIT) {
1065 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1069 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1070 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1072 if (bp->phy_flags & PHY_SERDES_FLAG) {
1073 adv = ADVERTISE_1000XPAUSE;
1076 adv = ADVERTISE_PAUSE_CAP;
1079 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1080 if (bp->phy_flags & PHY_SERDES_FLAG) {
1081 adv = ADVERTISE_1000XPSE_ASYM;
1084 adv = ADVERTISE_PAUSE_ASYM;
1087 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1088 if (bp->phy_flags & PHY_SERDES_FLAG) {
1089 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1092 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1099 bnx2_setup_serdes_phy(struct bnx2 *bp)
1104 if (!(bp->autoneg & AUTONEG_SPEED)) {
1106 int force_link_down = 0;
1108 if (bp->req_line_speed == SPEED_2500) {
1109 if (!bnx2_test_and_enable_2g5(bp))
1110 force_link_down = 1;
1111 } else if (bp->req_line_speed == SPEED_1000) {
1112 if (bnx2_test_and_disable_2g5(bp))
1113 force_link_down = 1;
1115 bnx2_read_phy(bp, bp->mii_adv, &adv);
1116 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1118 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1119 new_bmcr = bmcr & ~BMCR_ANENABLE;
1120 new_bmcr |= BMCR_SPEED1000;
1122 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1123 if (bp->req_line_speed == SPEED_2500)
1124 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1126 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1129 if (bp->req_duplex == DUPLEX_FULL) {
1130 adv |= ADVERTISE_1000XFULL;
1131 new_bmcr |= BMCR_FULLDPLX;
1134 adv |= ADVERTISE_1000XHALF;
1135 new_bmcr &= ~BMCR_FULLDPLX;
1137 if ((new_bmcr != bmcr) || (force_link_down)) {
1138 /* Force a link down visible on the other side */
1140 bnx2_write_phy(bp, bp->mii_adv, adv &
1141 ~(ADVERTISE_1000XFULL |
1142 ADVERTISE_1000XHALF));
1143 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1144 BMCR_ANRESTART | BMCR_ANENABLE);
1147 netif_carrier_off(bp->dev);
1148 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1149 bnx2_report_link(bp);
1151 bnx2_write_phy(bp, bp->mii_adv, adv);
1152 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1154 bnx2_resolve_flow_ctrl(bp);
1155 bnx2_set_mac_link(bp);
1160 bnx2_test_and_enable_2g5(bp);
1162 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163 new_adv |= ADVERTISE_1000XFULL;
1165 new_adv |= bnx2_phy_get_pause_adv(bp);
1167 bnx2_read_phy(bp, bp->mii_adv, &adv);
1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170 bp->serdes_an_pending = 0;
1171 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1172 /* Force a link down visible on the other side */
1174 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1175 spin_unlock_bh(&bp->phy_lock);
1177 spin_lock_bh(&bp->phy_lock);
1180 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1181 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1183 /* Speed up link-up time when the link partner
1184 * does not autonegotiate which is very common
1185 * in blade servers. Some blade servers use
1186 * IPMI for kerboard input and it's important
1187 * to minimize link disruptions. Autoneg. involves
1188 * exchanging base pages plus 3 next pages and
1189 * normally completes in about 120 msec.
1191 bp->current_interval = SERDES_AN_TIMEOUT;
1192 bp->serdes_an_pending = 1;
1193 mod_timer(&bp->timer, jiffies + bp->current_interval);
1195 bnx2_resolve_flow_ctrl(bp);
1196 bnx2_set_mac_link(bp);
1202 #define ETHTOOL_ALL_FIBRE_SPEED \
1203 (ADVERTISED_1000baseT_Full)
1205 #define ETHTOOL_ALL_COPPER_SPEED \
1206 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1207 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1208 ADVERTISED_1000baseT_Full)
1210 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1211 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1213 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1216 bnx2_setup_copper_phy(struct bnx2 *bp)
1221 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1223 if (bp->autoneg & AUTONEG_SPEED) {
1224 u32 adv_reg, adv1000_reg;
1225 u32 new_adv_reg = 0;
1226 u32 new_adv1000_reg = 0;
1228 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1229 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1230 ADVERTISE_PAUSE_ASYM);
1232 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1233 adv1000_reg &= PHY_ALL_1000_SPEED;
1235 if (bp->advertising & ADVERTISED_10baseT_Half)
1236 new_adv_reg |= ADVERTISE_10HALF;
1237 if (bp->advertising & ADVERTISED_10baseT_Full)
1238 new_adv_reg |= ADVERTISE_10FULL;
1239 if (bp->advertising & ADVERTISED_100baseT_Half)
1240 new_adv_reg |= ADVERTISE_100HALF;
1241 if (bp->advertising & ADVERTISED_100baseT_Full)
1242 new_adv_reg |= ADVERTISE_100FULL;
1243 if (bp->advertising & ADVERTISED_1000baseT_Full)
1244 new_adv1000_reg |= ADVERTISE_1000FULL;
1246 new_adv_reg |= ADVERTISE_CSMA;
1248 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1250 if ((adv1000_reg != new_adv1000_reg) ||
1251 (adv_reg != new_adv_reg) ||
1252 ((bmcr & BMCR_ANENABLE) == 0)) {
1254 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1255 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1256 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1259 else if (bp->link_up) {
1260 /* Flow ctrl may have changed from auto to forced */
1261 /* or vice-versa. */
1263 bnx2_resolve_flow_ctrl(bp);
1264 bnx2_set_mac_link(bp);
1270 if (bp->req_line_speed == SPEED_100) {
1271 new_bmcr |= BMCR_SPEED100;
1273 if (bp->req_duplex == DUPLEX_FULL) {
1274 new_bmcr |= BMCR_FULLDPLX;
1276 if (new_bmcr != bmcr) {
1279 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1280 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1282 if (bmsr & BMSR_LSTATUS) {
1283 /* Force link down */
1284 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1285 spin_unlock_bh(&bp->phy_lock);
1287 spin_lock_bh(&bp->phy_lock);
1289 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1290 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1293 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1295 /* Normally, the new speed is setup after the link has
1296 * gone down and up again. In some cases, link will not go
1297 * down so we need to set up the new speed here.
1299 if (bmsr & BMSR_LSTATUS) {
1300 bp->line_speed = bp->req_line_speed;
1301 bp->duplex = bp->req_duplex;
1302 bnx2_resolve_flow_ctrl(bp);
1303 bnx2_set_mac_link(bp);
1310 bnx2_setup_phy(struct bnx2 *bp)
1312 if (bp->loopback == MAC_LOOPBACK)
1315 if (bp->phy_flags & PHY_SERDES_FLAG) {
1316 return (bnx2_setup_serdes_phy(bp));
1319 return (bnx2_setup_copper_phy(bp));
1324 bnx2_init_5708s_phy(struct bnx2 *bp)
1328 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1329 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1330 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1332 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1333 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1334 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1336 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1337 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1338 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1340 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1341 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1342 val |= BCM5708S_UP1_2G5;
1343 bnx2_write_phy(bp, BCM5708S_UP1, val);
1346 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1347 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1348 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1349 /* increase tx signal amplitude */
1350 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1351 BCM5708S_BLK_ADDR_TX_MISC);
1352 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1353 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1354 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1355 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1358 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1359 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1364 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1365 BNX2_SHARED_HW_CFG_CONFIG);
1366 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1367 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1368 BCM5708S_BLK_ADDR_TX_MISC);
1369 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1370 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1371 BCM5708S_BLK_ADDR_DIG);
1378 bnx2_init_5706s_phy(struct bnx2 *bp)
1380 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1382 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1383 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1385 if (bp->dev->mtu > 1500) {
1388 /* Set extended packet length bit */
1389 bnx2_write_phy(bp, 0x18, 0x7);
1390 bnx2_read_phy(bp, 0x18, &val);
1391 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1393 bnx2_write_phy(bp, 0x1c, 0x6c00);
1394 bnx2_read_phy(bp, 0x1c, &val);
1395 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1400 bnx2_write_phy(bp, 0x18, 0x7);
1401 bnx2_read_phy(bp, 0x18, &val);
1402 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1404 bnx2_write_phy(bp, 0x1c, 0x6c00);
1405 bnx2_read_phy(bp, 0x1c, &val);
1406 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1413 bnx2_init_copper_phy(struct bnx2 *bp)
1417 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1418 bnx2_write_phy(bp, 0x18, 0x0c00);
1419 bnx2_write_phy(bp, 0x17, 0x000a);
1420 bnx2_write_phy(bp, 0x15, 0x310b);
1421 bnx2_write_phy(bp, 0x17, 0x201f);
1422 bnx2_write_phy(bp, 0x15, 0x9506);
1423 bnx2_write_phy(bp, 0x17, 0x401f);
1424 bnx2_write_phy(bp, 0x15, 0x14e2);
1425 bnx2_write_phy(bp, 0x18, 0x0400);
1428 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1429 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1430 MII_BNX2_DSP_EXPAND_REG | 0x8);
1431 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1433 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1436 if (bp->dev->mtu > 1500) {
1437 /* Set extended packet length bit */
1438 bnx2_write_phy(bp, 0x18, 0x7);
1439 bnx2_read_phy(bp, 0x18, &val);
1440 bnx2_write_phy(bp, 0x18, val | 0x4000);
1442 bnx2_read_phy(bp, 0x10, &val);
1443 bnx2_write_phy(bp, 0x10, val | 0x1);
1446 bnx2_write_phy(bp, 0x18, 0x7);
1447 bnx2_read_phy(bp, 0x18, &val);
1448 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1450 bnx2_read_phy(bp, 0x10, &val);
1451 bnx2_write_phy(bp, 0x10, val & ~0x1);
1454 /* ethernet@wirespeed */
1455 bnx2_write_phy(bp, 0x18, 0x7007);
1456 bnx2_read_phy(bp, 0x18, &val);
1457 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1463 bnx2_init_phy(struct bnx2 *bp)
1468 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1469 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1471 bp->mii_bmcr = MII_BMCR;
1472 bp->mii_bmsr = MII_BMSR;
1473 bp->mii_adv = MII_ADVERTISE;
1474 bp->mii_lpa = MII_LPA;
1476 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1480 bnx2_read_phy(bp, MII_PHYSID1, &val);
1481 bp->phy_id = val << 16;
1482 bnx2_read_phy(bp, MII_PHYSID2, &val);
1483 bp->phy_id |= val & 0xffff;
1485 if (bp->phy_flags & PHY_SERDES_FLAG) {
1486 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1487 rc = bnx2_init_5706s_phy(bp);
1488 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1489 rc = bnx2_init_5708s_phy(bp);
1492 rc = bnx2_init_copper_phy(bp);
1501 bnx2_set_mac_loopback(struct bnx2 *bp)
1505 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1506 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1507 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1508 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1513 static int bnx2_test_link(struct bnx2 *);
1516 bnx2_set_phy_loopback(struct bnx2 *bp)
1521 spin_lock_bh(&bp->phy_lock);
1522 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1524 spin_unlock_bh(&bp->phy_lock);
1528 for (i = 0; i < 10; i++) {
1529 if (bnx2_test_link(bp) == 0)
1534 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1535 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1536 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1537 BNX2_EMAC_MODE_25G_MODE);
1539 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1540 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1546 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1552 msg_data |= bp->fw_wr_seq;
1554 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1556 /* wait for an acknowledgement. */
1557 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1560 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1562 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1565 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1568 /* If we timed out, inform the firmware that this is the case. */
1569 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1571 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1574 msg_data &= ~BNX2_DRV_MSG_CODE;
1575 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1577 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1582 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1589 bnx2_init_5709_context(struct bnx2 *bp)
1594 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1595 val |= (BCM_PAGE_BITS - 8) << 16;
1596 REG_WR(bp, BNX2_CTX_COMMAND, val);
1597 for (i = 0; i < bp->ctx_pages; i++) {
1600 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1601 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1602 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1603 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1604 (u64) bp->ctx_blk_mapping[i] >> 32);
1605 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1606 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1607 for (j = 0; j < 10; j++) {
1609 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1610 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1614 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1623 bnx2_init_context(struct bnx2 *bp)
1629 u32 vcid_addr, pcid_addr, offset;
1633 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1636 vcid_addr = GET_PCID_ADDR(vcid);
1638 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1643 pcid_addr = GET_PCID_ADDR(new_vcid);
1646 vcid_addr = GET_CID_ADDR(vcid);
1647 pcid_addr = vcid_addr;
1650 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1651 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1653 /* Zero out the context. */
1654 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1655 CTX_WR(bp, 0x00, offset, 0);
1658 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1659 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1664 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1670 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1671 if (good_mbuf == NULL) {
1672 printk(KERN_ERR PFX "Failed to allocate memory in "
1673 "bnx2_alloc_bad_rbuf\n");
1677 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1678 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1682 /* Allocate a bunch of mbufs and save the good ones in an array. */
1683 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1684 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1685 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1687 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1689 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1691 /* The addresses with Bit 9 set are bad memory blocks. */
1692 if (!(val & (1 << 9))) {
1693 good_mbuf[good_mbuf_cnt] = (u16) val;
1697 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1700 /* Free the good ones back to the mbuf pool thus discarding
1701 * all the bad ones. */
1702 while (good_mbuf_cnt) {
1705 val = good_mbuf[good_mbuf_cnt];
1706 val = (val << 9) | val | 1;
1708 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1715 bnx2_set_mac_addr(struct bnx2 *bp)
1718 u8 *mac_addr = bp->dev->dev_addr;
1720 val = (mac_addr[0] << 8) | mac_addr[1];
1722 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1724 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1725 (mac_addr[4] << 8) | mac_addr[5];
1727 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1731 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1733 struct sk_buff *skb;
1734 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1736 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1737 unsigned long align;
1739 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1744 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1745 skb_reserve(skb, BNX2_RX_ALIGN - align);
1747 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1748 PCI_DMA_FROMDEVICE);
1751 pci_unmap_addr_set(rx_buf, mapping, mapping);
1753 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1754 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1756 bp->rx_prod_bseq += bp->rx_buf_use_size;
1762 bnx2_phy_int(struct bnx2 *bp)
1764 u32 new_link_state, old_link_state;
1766 new_link_state = bp->status_blk->status_attn_bits &
1767 STATUS_ATTN_BITS_LINK_STATE;
1768 old_link_state = bp->status_blk->status_attn_bits_ack &
1769 STATUS_ATTN_BITS_LINK_STATE;
1770 if (new_link_state != old_link_state) {
1771 if (new_link_state) {
1772 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1773 STATUS_ATTN_BITS_LINK_STATE);
1776 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1777 STATUS_ATTN_BITS_LINK_STATE);
1784 bnx2_tx_int(struct bnx2 *bp)
1786 struct status_block *sblk = bp->status_blk;
1787 u16 hw_cons, sw_cons, sw_ring_cons;
1790 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1791 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1794 sw_cons = bp->tx_cons;
1796 while (sw_cons != hw_cons) {
1797 struct sw_bd *tx_buf;
1798 struct sk_buff *skb;
1801 sw_ring_cons = TX_RING_IDX(sw_cons);
1803 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1806 /* partial BD completions possible with TSO packets */
1807 if (skb_is_gso(skb)) {
1808 u16 last_idx, last_ring_idx;
1810 last_idx = sw_cons +
1811 skb_shinfo(skb)->nr_frags + 1;
1812 last_ring_idx = sw_ring_cons +
1813 skb_shinfo(skb)->nr_frags + 1;
1814 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1817 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1822 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1823 skb_headlen(skb), PCI_DMA_TODEVICE);
1826 last = skb_shinfo(skb)->nr_frags;
1828 for (i = 0; i < last; i++) {
1829 sw_cons = NEXT_TX_BD(sw_cons);
1831 pci_unmap_page(bp->pdev,
1833 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1835 skb_shinfo(skb)->frags[i].size,
1839 sw_cons = NEXT_TX_BD(sw_cons);
1841 tx_free_bd += last + 1;
1845 hw_cons = bp->hw_tx_cons =
1846 sblk->status_tx_quick_consumer_index0;
1848 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1853 bp->tx_cons = sw_cons;
1854 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1855 * before checking for netif_queue_stopped(). Without the
1856 * memory barrier, there is a small possibility that bnx2_start_xmit()
1857 * will miss it and cause the queue to be stopped forever.
1861 if (unlikely(netif_queue_stopped(bp->dev)) &&
1862 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1863 netif_tx_lock(bp->dev);
1864 if ((netif_queue_stopped(bp->dev)) &&
1865 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1866 netif_wake_queue(bp->dev);
1867 netif_tx_unlock(bp->dev);
1872 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1875 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1876 struct rx_bd *cons_bd, *prod_bd;
1878 cons_rx_buf = &bp->rx_buf_ring[cons];
1879 prod_rx_buf = &bp->rx_buf_ring[prod];
1881 pci_dma_sync_single_for_device(bp->pdev,
1882 pci_unmap_addr(cons_rx_buf, mapping),
1883 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1885 bp->rx_prod_bseq += bp->rx_buf_use_size;
1887 prod_rx_buf->skb = skb;
1892 pci_unmap_addr_set(prod_rx_buf, mapping,
1893 pci_unmap_addr(cons_rx_buf, mapping));
1895 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1896 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1897 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1898 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1902 bnx2_rx_int(struct bnx2 *bp, int budget)
1904 struct status_block *sblk = bp->status_blk;
1905 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1906 struct l2_fhdr *rx_hdr;
1909 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1910 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1913 sw_cons = bp->rx_cons;
1914 sw_prod = bp->rx_prod;
1916 /* Memory barrier necessary as speculative reads of the rx
1917 * buffer can be ahead of the index in the status block
1920 while (sw_cons != hw_cons) {
1923 struct sw_bd *rx_buf;
1924 struct sk_buff *skb;
1925 dma_addr_t dma_addr;
1927 sw_ring_cons = RX_RING_IDX(sw_cons);
1928 sw_ring_prod = RX_RING_IDX(sw_prod);
1930 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1935 dma_addr = pci_unmap_addr(rx_buf, mapping);
1937 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1938 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1940 rx_hdr = (struct l2_fhdr *) skb->data;
1941 len = rx_hdr->l2_fhdr_pkt_len - 4;
1943 if ((status = rx_hdr->l2_fhdr_status) &
1944 (L2_FHDR_ERRORS_BAD_CRC |
1945 L2_FHDR_ERRORS_PHY_DECODE |
1946 L2_FHDR_ERRORS_ALIGNMENT |
1947 L2_FHDR_ERRORS_TOO_SHORT |
1948 L2_FHDR_ERRORS_GIANT_FRAME)) {
1953 /* Since we don't have a jumbo ring, copy small packets
1956 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1957 struct sk_buff *new_skb;
1959 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1960 if (new_skb == NULL)
1964 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1965 new_skb->data, len + 2);
1966 skb_reserve(new_skb, 2);
1967 skb_put(new_skb, len);
1969 bnx2_reuse_rx_skb(bp, skb,
1970 sw_ring_cons, sw_ring_prod);
1974 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1975 pci_unmap_single(bp->pdev, dma_addr,
1976 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1978 skb_reserve(skb, bp->rx_offset);
1983 bnx2_reuse_rx_skb(bp, skb,
1984 sw_ring_cons, sw_ring_prod);
1988 skb->protocol = eth_type_trans(skb, bp->dev);
1990 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1991 (ntohs(skb->protocol) != 0x8100)) {
1998 skb->ip_summed = CHECKSUM_NONE;
2000 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2001 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2003 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2004 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2005 skb->ip_summed = CHECKSUM_UNNECESSARY;
2009 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2010 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2011 rx_hdr->l2_fhdr_vlan_tag);
2015 netif_receive_skb(skb);
2017 bp->dev->last_rx = jiffies;
2021 sw_cons = NEXT_RX_BD(sw_cons);
2022 sw_prod = NEXT_RX_BD(sw_prod);
2024 if ((rx_pkt == budget))
2027 /* Refresh hw_cons to see if there is new work */
2028 if (sw_cons == hw_cons) {
2029 hw_cons = bp->hw_rx_cons =
2030 sblk->status_rx_quick_consumer_index0;
2031 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2036 bp->rx_cons = sw_cons;
2037 bp->rx_prod = sw_prod;
2039 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2041 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2049 /* MSI ISR - The only difference between this and the INTx ISR
2050 * is that the MSI interrupt is always serviced.
2053 bnx2_msi(int irq, void *dev_instance)
2055 struct net_device *dev = dev_instance;
2056 struct bnx2 *bp = netdev_priv(dev);
2058 prefetch(bp->status_blk);
2059 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2060 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2061 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2063 /* Return here if interrupt is disabled. */
2064 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2067 netif_rx_schedule(dev);
2073 bnx2_interrupt(int irq, void *dev_instance)
2075 struct net_device *dev = dev_instance;
2076 struct bnx2 *bp = netdev_priv(dev);
2078 /* When using INTx, it is possible for the interrupt to arrive
2079 * at the CPU before the status block posted prior to the
2080 * interrupt. Reading a register will flush the status block.
2081 * When using MSI, the MSI message will always complete after
2082 * the status block write.
2084 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2085 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2086 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2089 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2091 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2093 /* Return here if interrupt is shared and is disabled. */
2094 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2097 netif_rx_schedule(dev);
2103 bnx2_has_work(struct bnx2 *bp)
2105 struct status_block *sblk = bp->status_blk;
2107 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2108 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2111 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2112 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2119 bnx2_poll(struct net_device *dev, int *budget)
2121 struct bnx2 *bp = netdev_priv(dev);
2123 if ((bp->status_blk->status_attn_bits &
2124 STATUS_ATTN_BITS_LINK_STATE) !=
2125 (bp->status_blk->status_attn_bits_ack &
2126 STATUS_ATTN_BITS_LINK_STATE)) {
2128 spin_lock(&bp->phy_lock);
2130 spin_unlock(&bp->phy_lock);
2132 /* This is needed to take care of transient status
2133 * during link changes.
2135 REG_WR(bp, BNX2_HC_COMMAND,
2136 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2137 REG_RD(bp, BNX2_HC_COMMAND);
2140 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2143 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2144 int orig_budget = *budget;
2147 if (orig_budget > dev->quota)
2148 orig_budget = dev->quota;
2150 work_done = bnx2_rx_int(bp, orig_budget);
2151 *budget -= work_done;
2152 dev->quota -= work_done;
2155 bp->last_status_idx = bp->status_blk->status_idx;
2158 if (!bnx2_has_work(bp)) {
2159 netif_rx_complete(dev);
2160 if (likely(bp->flags & USING_MSI_FLAG)) {
2161 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2162 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2163 bp->last_status_idx);
2166 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2167 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2168 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2169 bp->last_status_idx);
2171 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2172 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2173 bp->last_status_idx);
2180 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2181 * from set_multicast.
2184 bnx2_set_rx_mode(struct net_device *dev)
2186 struct bnx2 *bp = netdev_priv(dev);
2187 u32 rx_mode, sort_mode;
2190 spin_lock_bh(&bp->phy_lock);
2192 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2193 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2194 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2196 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2197 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2199 if (!(bp->flags & ASF_ENABLE_FLAG))
2200 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2202 if (dev->flags & IFF_PROMISC) {
2203 /* Promiscuous mode. */
2204 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2205 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2206 BNX2_RPM_SORT_USER0_PROM_VLAN;
2208 else if (dev->flags & IFF_ALLMULTI) {
2209 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2210 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2213 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2216 /* Accept one or more multicast(s). */
2217 struct dev_mc_list *mclist;
2218 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2223 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2225 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2226 i++, mclist = mclist->next) {
2228 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2230 regidx = (bit & 0xe0) >> 5;
2232 mc_filter[regidx] |= (1 << bit);
2235 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2236 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2240 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2243 if (rx_mode != bp->rx_mode) {
2244 bp->rx_mode = rx_mode;
2245 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2248 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2249 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2250 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2252 spin_unlock_bh(&bp->phy_lock);
2255 #define FW_BUF_SIZE 0x8000
2258 bnx2_gunzip_init(struct bnx2 *bp)
2260 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2263 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2266 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2267 if (bp->strm->workspace == NULL)
2277 vfree(bp->gunzip_buf);
2278 bp->gunzip_buf = NULL;
2281 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2282 "uncompression.\n", bp->dev->name);
2287 bnx2_gunzip_end(struct bnx2 *bp)
2289 kfree(bp->strm->workspace);
2294 if (bp->gunzip_buf) {
2295 vfree(bp->gunzip_buf);
2296 bp->gunzip_buf = NULL;
2301 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2305 /* check gzip header */
2306 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2312 if (zbuf[3] & FNAME)
2313 while ((zbuf[n++] != 0) && (n < len));
2315 bp->strm->next_in = zbuf + n;
2316 bp->strm->avail_in = len - n;
2317 bp->strm->next_out = bp->gunzip_buf;
2318 bp->strm->avail_out = FW_BUF_SIZE;
2320 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2324 rc = zlib_inflate(bp->strm, Z_FINISH);
2326 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2327 *outbuf = bp->gunzip_buf;
2329 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2330 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2331 bp->dev->name, bp->strm->msg);
2333 zlib_inflateEnd(bp->strm);
2335 if (rc == Z_STREAM_END)
2342 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2349 for (i = 0; i < rv2p_code_len; i += 8) {
2350 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2352 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2355 if (rv2p_proc == RV2P_PROC1) {
2356 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2357 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2360 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2361 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2365 /* Reset the processor, un-stall is done later. */
2366 if (rv2p_proc == RV2P_PROC1) {
2367 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2370 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2375 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2382 val = REG_RD_IND(bp, cpu_reg->mode);
2383 val |= cpu_reg->mode_value_halt;
2384 REG_WR_IND(bp, cpu_reg->mode, val);
2385 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2387 /* Load the Text area. */
2388 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2393 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2403 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2404 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2408 /* Load the Data area. */
2409 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2413 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2414 REG_WR_IND(bp, offset, fw->data[j]);
2418 /* Load the SBSS area. */
2419 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2423 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2424 REG_WR_IND(bp, offset, fw->sbss[j]);
2428 /* Load the BSS area. */
2429 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2433 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2434 REG_WR_IND(bp, offset, fw->bss[j]);
2438 /* Load the Read-Only area. */
2439 offset = cpu_reg->spad_base +
2440 (fw->rodata_addr - cpu_reg->mips_view_base);
2444 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2445 REG_WR_IND(bp, offset, fw->rodata[j]);
2449 /* Clear the pre-fetch instruction. */
2450 REG_WR_IND(bp, cpu_reg->inst, 0);
2451 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2453 /* Start the CPU. */
2454 val = REG_RD_IND(bp, cpu_reg->mode);
2455 val &= ~cpu_reg->mode_value_halt;
2456 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2457 REG_WR_IND(bp, cpu_reg->mode, val);
2463 bnx2_init_cpus(struct bnx2 *bp)
2465 struct cpu_reg cpu_reg;
2471 if ((rc = bnx2_gunzip_init(bp)) != 0)
2474 /* Initialize the RV2P processor. */
2475 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2480 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2482 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2487 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2489 /* Initialize the RX Processor. */
2490 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2491 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2492 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2493 cpu_reg.state = BNX2_RXP_CPU_STATE;
2494 cpu_reg.state_value_clear = 0xffffff;
2495 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2496 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2497 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2498 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2499 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2500 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2501 cpu_reg.mips_view_base = 0x8000000;
2503 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2504 fw = &bnx2_rxp_fw_09;
2506 fw = &bnx2_rxp_fw_06;
2508 rc = load_cpu_fw(bp, &cpu_reg, fw);
2512 /* Initialize the TX Processor. */
2513 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2514 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2515 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2516 cpu_reg.state = BNX2_TXP_CPU_STATE;
2517 cpu_reg.state_value_clear = 0xffffff;
2518 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2519 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2520 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2521 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2522 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2523 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2524 cpu_reg.mips_view_base = 0x8000000;
2526 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2527 fw = &bnx2_txp_fw_09;
2529 fw = &bnx2_txp_fw_06;
2531 rc = load_cpu_fw(bp, &cpu_reg, fw);
2535 /* Initialize the TX Patch-up Processor. */
2536 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2537 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2538 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2539 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2540 cpu_reg.state_value_clear = 0xffffff;
2541 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2542 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2543 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2544 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2545 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2546 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2547 cpu_reg.mips_view_base = 0x8000000;
2549 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2550 fw = &bnx2_tpat_fw_09;
2552 fw = &bnx2_tpat_fw_06;
2554 rc = load_cpu_fw(bp, &cpu_reg, fw);
2558 /* Initialize the Completion Processor. */
2559 cpu_reg.mode = BNX2_COM_CPU_MODE;
2560 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2561 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2562 cpu_reg.state = BNX2_COM_CPU_STATE;
2563 cpu_reg.state_value_clear = 0xffffff;
2564 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2565 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2566 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2567 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2568 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2569 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2570 cpu_reg.mips_view_base = 0x8000000;
2572 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2573 fw = &bnx2_com_fw_09;
2575 fw = &bnx2_com_fw_06;
2577 rc = load_cpu_fw(bp, &cpu_reg, fw);
2581 /* Initialize the Command Processor. */
2582 cpu_reg.mode = BNX2_CP_CPU_MODE;
2583 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2584 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2585 cpu_reg.state = BNX2_CP_CPU_STATE;
2586 cpu_reg.state_value_clear = 0xffffff;
2587 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2588 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2589 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2590 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2591 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2592 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2593 cpu_reg.mips_view_base = 0x8000000;
2595 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2596 fw = &bnx2_cp_fw_09;
2598 rc = load_cpu_fw(bp, &cpu_reg, fw);
2603 bnx2_gunzip_end(bp);
2608 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2612 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2618 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2619 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2620 PCI_PM_CTRL_PME_STATUS);
2622 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2623 /* delay required during transition out of D3hot */
2626 val = REG_RD(bp, BNX2_EMAC_MODE);
2627 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2628 val &= ~BNX2_EMAC_MODE_MPKT;
2629 REG_WR(bp, BNX2_EMAC_MODE, val);
2631 val = REG_RD(bp, BNX2_RPM_CONFIG);
2632 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2633 REG_WR(bp, BNX2_RPM_CONFIG, val);
2644 autoneg = bp->autoneg;
2645 advertising = bp->advertising;
2647 bp->autoneg = AUTONEG_SPEED;
2648 bp->advertising = ADVERTISED_10baseT_Half |
2649 ADVERTISED_10baseT_Full |
2650 ADVERTISED_100baseT_Half |
2651 ADVERTISED_100baseT_Full |
2654 bnx2_setup_copper_phy(bp);
2656 bp->autoneg = autoneg;
2657 bp->advertising = advertising;
2659 bnx2_set_mac_addr(bp);
2661 val = REG_RD(bp, BNX2_EMAC_MODE);
2663 /* Enable port mode. */
2664 val &= ~BNX2_EMAC_MODE_PORT;
2665 val |= BNX2_EMAC_MODE_PORT_MII |
2666 BNX2_EMAC_MODE_MPKT_RCVD |
2667 BNX2_EMAC_MODE_ACPI_RCVD |
2668 BNX2_EMAC_MODE_MPKT;
2670 REG_WR(bp, BNX2_EMAC_MODE, val);
2672 /* receive all multicast */
2673 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2674 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2677 REG_WR(bp, BNX2_EMAC_RX_MODE,
2678 BNX2_EMAC_RX_MODE_SORT_MODE);
2680 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2681 BNX2_RPM_SORT_USER0_MC_EN;
2682 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2683 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2684 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2685 BNX2_RPM_SORT_USER0_ENA);
2687 /* Need to enable EMAC and RPM for WOL. */
2688 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2689 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2690 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2691 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2693 val = REG_RD(bp, BNX2_RPM_CONFIG);
2694 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2695 REG_WR(bp, BNX2_RPM_CONFIG, val);
2697 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2700 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2703 if (!(bp->flags & NO_WOL_FLAG))
2704 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2706 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2707 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2708 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2717 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2719 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2722 /* No more memory access after this point until
2723 * device is brought back to D0.
2735 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2740 /* Request access to the flash interface. */
2741 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2742 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2743 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2744 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2750 if (j >= NVRAM_TIMEOUT_COUNT)
2757 bnx2_release_nvram_lock(struct bnx2 *bp)
2762 /* Relinquish nvram interface. */
2763 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2765 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2766 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2767 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2773 if (j >= NVRAM_TIMEOUT_COUNT)
2781 bnx2_enable_nvram_write(struct bnx2 *bp)
2785 val = REG_RD(bp, BNX2_MISC_CFG);
2786 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2788 if (!bp->flash_info->buffered) {
2791 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2792 REG_WR(bp, BNX2_NVM_COMMAND,
2793 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2795 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2798 val = REG_RD(bp, BNX2_NVM_COMMAND);
2799 if (val & BNX2_NVM_COMMAND_DONE)
2803 if (j >= NVRAM_TIMEOUT_COUNT)
2810 bnx2_disable_nvram_write(struct bnx2 *bp)
2814 val = REG_RD(bp, BNX2_MISC_CFG);
2815 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2820 bnx2_enable_nvram_access(struct bnx2 *bp)
2824 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2825 /* Enable both bits, even on read. */
2826 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2827 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2831 bnx2_disable_nvram_access(struct bnx2 *bp)
2835 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2836 /* Disable both bits, even after read. */
2837 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2838 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2839 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2843 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2848 if (bp->flash_info->buffered)
2849 /* Buffered flash, no erase needed */
2852 /* Build an erase command */
2853 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2854 BNX2_NVM_COMMAND_DOIT;
2856 /* Need to clear DONE bit separately. */
2857 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2859 /* Address of the NVRAM to read from. */
2860 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2862 /* Issue an erase command. */
2863 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2865 /* Wait for completion. */
2866 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2871 val = REG_RD(bp, BNX2_NVM_COMMAND);
2872 if (val & BNX2_NVM_COMMAND_DONE)
2876 if (j >= NVRAM_TIMEOUT_COUNT)
2883 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2888 /* Build the command word. */
2889 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2891 /* Calculate an offset of a buffered flash. */
2892 if (bp->flash_info->buffered) {
2893 offset = ((offset / bp->flash_info->page_size) <<
2894 bp->flash_info->page_bits) +
2895 (offset % bp->flash_info->page_size);
2898 /* Need to clear DONE bit separately. */
2899 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2901 /* Address of the NVRAM to read from. */
2902 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2904 /* Issue a read command. */
2905 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2907 /* Wait for completion. */
2908 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2913 val = REG_RD(bp, BNX2_NVM_COMMAND);
2914 if (val & BNX2_NVM_COMMAND_DONE) {
2915 val = REG_RD(bp, BNX2_NVM_READ);
2917 val = be32_to_cpu(val);
2918 memcpy(ret_val, &val, 4);
2922 if (j >= NVRAM_TIMEOUT_COUNT)
2930 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2935 /* Build the command word. */
2936 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2938 /* Calculate an offset of a buffered flash. */
2939 if (bp->flash_info->buffered) {
2940 offset = ((offset / bp->flash_info->page_size) <<
2941 bp->flash_info->page_bits) +
2942 (offset % bp->flash_info->page_size);
2945 /* Need to clear DONE bit separately. */
2946 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2948 memcpy(&val32, val, 4);
2949 val32 = cpu_to_be32(val32);
2951 /* Write the data. */
2952 REG_WR(bp, BNX2_NVM_WRITE, val32);
2954 /* Address of the NVRAM to write to. */
2955 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2957 /* Issue the write command. */
2958 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2960 /* Wait for completion. */
2961 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2964 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2967 if (j >= NVRAM_TIMEOUT_COUNT)
2974 bnx2_init_nvram(struct bnx2 *bp)
2977 int j, entry_count, rc;
2978 struct flash_spec *flash;
2980 /* Determine the selected interface. */
2981 val = REG_RD(bp, BNX2_NVM_CFG1);
2983 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2986 if (val & 0x40000000) {
2988 /* Flash interface has been reconfigured */
2989 for (j = 0, flash = &flash_table[0]; j < entry_count;
2991 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2992 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2993 bp->flash_info = flash;
3000 /* Not yet been reconfigured */
3002 if (val & (1 << 23))
3003 mask = FLASH_BACKUP_STRAP_MASK;
3005 mask = FLASH_STRAP_MASK;
3007 for (j = 0, flash = &flash_table[0]; j < entry_count;
3010 if ((val & mask) == (flash->strapping & mask)) {
3011 bp->flash_info = flash;
3013 /* Request access to the flash interface. */
3014 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3017 /* Enable access to flash interface */
3018 bnx2_enable_nvram_access(bp);
3020 /* Reconfigure the flash interface */
3021 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3022 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3023 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3024 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3026 /* Disable access to flash interface */
3027 bnx2_disable_nvram_access(bp);
3028 bnx2_release_nvram_lock(bp);
3033 } /* if (val & 0x40000000) */
3035 if (j == entry_count) {
3036 bp->flash_info = NULL;
3037 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3041 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3042 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3044 bp->flash_size = val;
3046 bp->flash_size = bp->flash_info->total_size;
3052 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3056 u32 cmd_flags, offset32, len32, extra;
3061 /* Request access to the flash interface. */
3062 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3065 /* Enable access to flash interface */
3066 bnx2_enable_nvram_access(bp);
3079 pre_len = 4 - (offset & 3);
3081 if (pre_len >= len32) {
3083 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3084 BNX2_NVM_COMMAND_LAST;
3087 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3090 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3095 memcpy(ret_buf, buf + (offset & 3), pre_len);
3102 extra = 4 - (len32 & 3);
3103 len32 = (len32 + 4) & ~3;
3110 cmd_flags = BNX2_NVM_COMMAND_LAST;
3112 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3113 BNX2_NVM_COMMAND_LAST;
3115 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3117 memcpy(ret_buf, buf, 4 - extra);
3119 else if (len32 > 0) {
3122 /* Read the first word. */
3126 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3128 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3130 /* Advance to the next dword. */
3135 while (len32 > 4 && rc == 0) {
3136 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3138 /* Advance to the next dword. */
3147 cmd_flags = BNX2_NVM_COMMAND_LAST;
3148 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3150 memcpy(ret_buf, buf, 4 - extra);
3153 /* Disable access to flash interface */
3154 bnx2_disable_nvram_access(bp);
3156 bnx2_release_nvram_lock(bp);
3162 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3165 u32 written, offset32, len32;
3166 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3168 int align_start, align_end;
3173 align_start = align_end = 0;
3175 if ((align_start = (offset32 & 3))) {
3177 len32 += align_start;
3180 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3185 align_end = 4 - (len32 & 3);
3187 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3191 if (align_start || align_end) {
3192 align_buf = kmalloc(len32, GFP_KERNEL);
3193 if (align_buf == NULL)
3196 memcpy(align_buf, start, 4);
3199 memcpy(align_buf + len32 - 4, end, 4);
3201 memcpy(align_buf + align_start, data_buf, buf_size);
3205 if (bp->flash_info->buffered == 0) {
3206 flash_buffer = kmalloc(264, GFP_KERNEL);
3207 if (flash_buffer == NULL) {
3209 goto nvram_write_end;
3214 while ((written < len32) && (rc == 0)) {
3215 u32 page_start, page_end, data_start, data_end;
3216 u32 addr, cmd_flags;
3219 /* Find the page_start addr */
3220 page_start = offset32 + written;
3221 page_start -= (page_start % bp->flash_info->page_size);
3222 /* Find the page_end addr */
3223 page_end = page_start + bp->flash_info->page_size;
3224 /* Find the data_start addr */
3225 data_start = (written == 0) ? offset32 : page_start;
3226 /* Find the data_end addr */
3227 data_end = (page_end > offset32 + len32) ?
3228 (offset32 + len32) : page_end;
3230 /* Request access to the flash interface. */
3231 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3232 goto nvram_write_end;
3234 /* Enable access to flash interface */
3235 bnx2_enable_nvram_access(bp);
3237 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3238 if (bp->flash_info->buffered == 0) {
3241 /* Read the whole page into the buffer
3242 * (non-buffer flash only) */
3243 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3244 if (j == (bp->flash_info->page_size - 4)) {
3245 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3247 rc = bnx2_nvram_read_dword(bp,
3253 goto nvram_write_end;
3259 /* Enable writes to flash interface (unlock write-protect) */
3260 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3261 goto nvram_write_end;
3263 /* Loop to write back the buffer data from page_start to
3266 if (bp->flash_info->buffered == 0) {
3267 /* Erase the page */
3268 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3269 goto nvram_write_end;
3271 /* Re-enable the write again for the actual write */
3272 bnx2_enable_nvram_write(bp);
3274 for (addr = page_start; addr < data_start;
3275 addr += 4, i += 4) {
3277 rc = bnx2_nvram_write_dword(bp, addr,
3278 &flash_buffer[i], cmd_flags);
3281 goto nvram_write_end;
3287 /* Loop to write the new data from data_start to data_end */
3288 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3289 if ((addr == page_end - 4) ||
3290 ((bp->flash_info->buffered) &&
3291 (addr == data_end - 4))) {
3293 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3295 rc = bnx2_nvram_write_dword(bp, addr, buf,
3299 goto nvram_write_end;
3305 /* Loop to write back the buffer data from data_end
3307 if (bp->flash_info->buffered == 0) {
3308 for (addr = data_end; addr < page_end;
3309 addr += 4, i += 4) {
3311 if (addr == page_end-4) {
3312 cmd_flags = BNX2_NVM_COMMAND_LAST;
3314 rc = bnx2_nvram_write_dword(bp, addr,
3315 &flash_buffer[i], cmd_flags);
3318 goto nvram_write_end;
3324 /* Disable writes to flash interface (lock write-protect) */
3325 bnx2_disable_nvram_write(bp);
3327 /* Disable access to flash interface */
3328 bnx2_disable_nvram_access(bp);
3329 bnx2_release_nvram_lock(bp);
3331 /* Increment written */
3332 written += data_end - data_start;
3336 kfree(flash_buffer);
3342 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3347 /* Wait for the current PCI transaction to complete before
3348 * issuing a reset. */
3349 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3350 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3351 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3352 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3353 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3354 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3357 /* Wait for the firmware to tell us it is ok to issue a reset. */
3358 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3360 /* Deposit a driver reset signature so the firmware knows that
3361 * this is a soft reset. */
3362 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3363 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3365 /* Do a dummy read to force the chip to complete all current transaction
3366 * before we issue a reset. */
3367 val = REG_RD(bp, BNX2_MISC_ID);
3369 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3370 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3371 REG_RD(bp, BNX2_MISC_COMMAND);
3374 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3375 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3377 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3380 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3381 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3382 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3385 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3387 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3388 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3389 current->state = TASK_UNINTERRUPTIBLE;
3390 schedule_timeout(HZ / 50);
3393 /* Reset takes approximate 30 usec */
3394 for (i = 0; i < 10; i++) {
3395 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3396 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3397 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3402 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3403 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3404 printk(KERN_ERR PFX "Chip reset did not complete\n");
3409 /* Make sure byte swapping is properly configured. */
3410 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3411 if (val != 0x01020304) {
3412 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3416 /* Wait for the firmware to finish its initialization. */
3417 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3421 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3422 /* Adjust the voltage regular to two steps lower. The default
3423 * of this register is 0x0000000e. */
3424 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3426 /* Remove bad rbuf memory from the free pool. */
3427 rc = bnx2_alloc_bad_rbuf(bp);
3434 bnx2_init_chip(struct bnx2 *bp)
3439 /* Make sure the interrupt is not active. */
3440 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3442 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3443 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3445 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3447 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3448 DMA_READ_CHANS << 12 |
3449 DMA_WRITE_CHANS << 16;
3451 val |= (0x2 << 20) | (1 << 11);
3453 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3456 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3457 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3458 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3460 REG_WR(bp, BNX2_DMA_CONFIG, val);
3462 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3463 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3464 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3465 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3468 if (bp->flags & PCIX_FLAG) {
3471 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3473 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3474 val16 & ~PCI_X_CMD_ERO);
3477 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3478 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3479 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3480 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3482 /* Initialize context mapping and zero out the quick contexts. The
3483 * context block must have already been enabled. */
3484 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3485 bnx2_init_5709_context(bp);
3487 bnx2_init_context(bp);
3489 if ((rc = bnx2_init_cpus(bp)) != 0)
3492 bnx2_init_nvram(bp);
3494 bnx2_set_mac_addr(bp);
3496 val = REG_RD(bp, BNX2_MQ_CONFIG);
3497 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3498 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3499 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3500 val |= BNX2_MQ_CONFIG_HALT_DIS;
3502 REG_WR(bp, BNX2_MQ_CONFIG, val);
3504 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3505 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3506 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3508 val = (BCM_PAGE_BITS - 8) << 24;
3509 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3511 /* Configure page size. */
3512 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3513 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3514 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3515 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3517 val = bp->mac_addr[0] +
3518 (bp->mac_addr[1] << 8) +
3519 (bp->mac_addr[2] << 16) +
3521 (bp->mac_addr[4] << 8) +
3522 (bp->mac_addr[5] << 16);
3523 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3525 /* Program the MTU. Also include 4 bytes for CRC32. */
3526 val = bp->dev->mtu + ETH_HLEN + 4;
3527 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3528 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3529 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3531 bp->last_status_idx = 0;
3532 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3534 /* Set up how to generate a link change interrupt. */
3535 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3537 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3538 (u64) bp->status_blk_mapping & 0xffffffff);
3539 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3541 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3542 (u64) bp->stats_blk_mapping & 0xffffffff);
3543 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3544 (u64) bp->stats_blk_mapping >> 32);
3546 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3547 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3549 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3550 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3552 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3553 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3555 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3557 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3559 REG_WR(bp, BNX2_HC_COM_TICKS,
3560 (bp->com_ticks_int << 16) | bp->com_ticks);
3562 REG_WR(bp, BNX2_HC_CMD_TICKS,
3563 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3565 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3566 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3568 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3569 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3571 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3572 BNX2_HC_CONFIG_TX_TMR_MODE |
3573 BNX2_HC_CONFIG_COLLECT_STATS);
3576 /* Clear internal stats counters. */
3577 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3579 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3581 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3582 BNX2_PORT_FEATURE_ASF_ENABLED)
3583 bp->flags |= ASF_ENABLE_FLAG;
3585 /* Initialize the receive filter. */
3586 bnx2_set_rx_mode(bp->dev);
3588 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3591 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3592 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3596 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3602 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3604 u32 val, offset0, offset1, offset2, offset3;
3606 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3607 offset0 = BNX2_L2CTX_TYPE_XI;
3608 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3609 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3610 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3612 offset0 = BNX2_L2CTX_TYPE;
3613 offset1 = BNX2_L2CTX_CMD_TYPE;
3614 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3615 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3617 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3618 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3620 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3621 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3623 val = (u64) bp->tx_desc_mapping >> 32;
3624 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3626 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3627 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3631 bnx2_init_tx_ring(struct bnx2 *bp)
3636 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3638 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3640 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3641 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3646 bp->tx_prod_bseq = 0;
3649 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3650 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3652 bnx2_init_tx_context(bp, cid);
3656 bnx2_init_rx_ring(struct bnx2 *bp)
3660 u16 prod, ring_prod;
3663 /* 8 for CRC and VLAN */
3664 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3666 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3668 ring_prod = prod = bp->rx_prod = 0;
3671 bp->rx_prod_bseq = 0;
3673 for (i = 0; i < bp->rx_max_ring; i++) {
3676 rxbd = &bp->rx_desc_ring[i][0];
3677 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3678 rxbd->rx_bd_len = bp->rx_buf_use_size;
3679 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3681 if (i == (bp->rx_max_ring - 1))
3685 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3686 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3690 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3691 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3693 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3695 val = (u64) bp->rx_desc_mapping[0] >> 32;
3696 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3698 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3699 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3701 for (i = 0; i < bp->rx_ring_size; i++) {
3702 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3705 prod = NEXT_RX_BD(prod);
3706 ring_prod = RX_RING_IDX(prod);
3710 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3712 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3716 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3720 bp->rx_ring_size = size;
3722 while (size > MAX_RX_DESC_CNT) {
3723 size -= MAX_RX_DESC_CNT;
3726 /* round to next power of 2 */
3728 while ((max & num_rings) == 0)
3731 if (num_rings != max)
3734 bp->rx_max_ring = max;
3735 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3739 bnx2_free_tx_skbs(struct bnx2 *bp)
3743 if (bp->tx_buf_ring == NULL)
3746 for (i = 0; i < TX_DESC_CNT; ) {
3747 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3748 struct sk_buff *skb = tx_buf->skb;
3756 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3757 skb_headlen(skb), PCI_DMA_TODEVICE);
3761 last = skb_shinfo(skb)->nr_frags;
3762 for (j = 0; j < last; j++) {
3763 tx_buf = &bp->tx_buf_ring[i + j + 1];
3764 pci_unmap_page(bp->pdev,
3765 pci_unmap_addr(tx_buf, mapping),
3766 skb_shinfo(skb)->frags[j].size,
3776 bnx2_free_rx_skbs(struct bnx2 *bp)
3780 if (bp->rx_buf_ring == NULL)
3783 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3784 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3785 struct sk_buff *skb = rx_buf->skb;
3790 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3791 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3800 bnx2_free_skbs(struct bnx2 *bp)
3802 bnx2_free_tx_skbs(bp);
3803 bnx2_free_rx_skbs(bp);
3807 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3811 rc = bnx2_reset_chip(bp, reset_code);
3816 if ((rc = bnx2_init_chip(bp)) != 0)
3819 bnx2_init_tx_ring(bp);
3820 bnx2_init_rx_ring(bp);
3825 bnx2_init_nic(struct bnx2 *bp)
3829 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3832 spin_lock_bh(&bp->phy_lock);
3834 spin_unlock_bh(&bp->phy_lock);
3840 bnx2_test_registers(struct bnx2 *bp)
3844 static const struct {
3847 #define BNX2_FL_NOT_5709 1
3851 { 0x006c, 0, 0x00000000, 0x0000003f },
3852 { 0x0090, 0, 0xffffffff, 0x00000000 },
3853 { 0x0094, 0, 0x00000000, 0x00000000 },
3855 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3856 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3857 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3858 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3859 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3860 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3861 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3862 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3863 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3865 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3866 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3867 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3868 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3869 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3870 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3872 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3873 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3874 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
3876 { 0x1000, 0, 0x00000000, 0x00000001 },
3877 { 0x1004, 0, 0x00000000, 0x000f0001 },
3879 { 0x1408, 0, 0x01c00800, 0x00000000 },
3880 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3881 { 0x14a8, 0, 0x00000000, 0x000001ff },
3882 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3883 { 0x14b0, 0, 0x00000002, 0x00000001 },
3884 { 0x14b8, 0, 0x00000000, 0x00000000 },
3885 { 0x14c0, 0, 0x00000000, 0x00000009 },
3886 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3887 { 0x14cc, 0, 0x00000000, 0x00000001 },
3888 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3890 { 0x1800, 0, 0x00000000, 0x00000001 },
3891 { 0x1804, 0, 0x00000000, 0x00000003 },
3893 { 0x2800, 0, 0x00000000, 0x00000001 },
3894 { 0x2804, 0, 0x00000000, 0x00003f01 },
3895 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3896 { 0x2810, 0, 0xffff0000, 0x00000000 },
3897 { 0x2814, 0, 0xffff0000, 0x00000000 },
3898 { 0x2818, 0, 0xffff0000, 0x00000000 },
3899 { 0x281c, 0, 0xffff0000, 0x00000000 },
3900 { 0x2834, 0, 0xffffffff, 0x00000000 },
3901 { 0x2840, 0, 0x00000000, 0xffffffff },
3902 { 0x2844, 0, 0x00000000, 0xffffffff },
3903 { 0x2848, 0, 0xffffffff, 0x00000000 },
3904 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3906 { 0x2c00, 0, 0x00000000, 0x00000011 },
3907 { 0x2c04, 0, 0x00000000, 0x00030007 },
3909 { 0x3c00, 0, 0x00000000, 0x00000001 },
3910 { 0x3c04, 0, 0x00000000, 0x00070000 },
3911 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3912 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3913 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3914 { 0x3c14, 0, 0x00000000, 0xffffffff },
3915 { 0x3c18, 0, 0x00000000, 0xffffffff },
3916 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3917 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3919 { 0x5004, 0, 0x00000000, 0x0000007f },
3920 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3922 { 0x5c00, 0, 0x00000000, 0x00000001 },
3923 { 0x5c04, 0, 0x00000000, 0x0003000f },
3924 { 0x5c08, 0, 0x00000003, 0x00000000 },
3925 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3926 { 0x5c10, 0, 0x00000000, 0xffffffff },
3927 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3928 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3929 { 0x5c88, 0, 0x00000000, 0x00077373 },
3930 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3932 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3933 { 0x680c, 0, 0xffffffff, 0x00000000 },
3934 { 0x6810, 0, 0xffffffff, 0x00000000 },
3935 { 0x6814, 0, 0xffffffff, 0x00000000 },
3936 { 0x6818, 0, 0xffffffff, 0x00000000 },
3937 { 0x681c, 0, 0xffffffff, 0x00000000 },
3938 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3939 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3940 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3941 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3942 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3943 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3944 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3945 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3946 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3947 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3948 { 0x684c, 0, 0xffffffff, 0x00000000 },
3949 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3950 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3951 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3952 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3953 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3954 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3956 { 0xffff, 0, 0x00000000, 0x00000000 },
3961 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3964 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3965 u32 offset, rw_mask, ro_mask, save_val, val;
3966 u16 flags = reg_tbl[i].flags;
3968 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3971 offset = (u32) reg_tbl[i].offset;
3972 rw_mask = reg_tbl[i].rw_mask;
3973 ro_mask = reg_tbl[i].ro_mask;
3975 save_val = readl(bp->regview + offset);
3977 writel(0, bp->regview + offset);
3979 val = readl(bp->regview + offset);
3980 if ((val & rw_mask) != 0) {
3984 if ((val & ro_mask) != (save_val & ro_mask)) {
3988 writel(0xffffffff, bp->regview + offset);
3990 val = readl(bp->regview + offset);
3991 if ((val & rw_mask) != rw_mask) {
3995 if ((val & ro_mask) != (save_val & ro_mask)) {
3999 writel(save_val, bp->regview + offset);
4003 writel(save_val, bp->regview + offset);
4011 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4013 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4014 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4017 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4020 for (offset = 0; offset < size; offset += 4) {
4022 REG_WR_IND(bp, start + offset, test_pattern[i]);
4024 if (REG_RD_IND(bp, start + offset) !=
4034 bnx2_test_memory(struct bnx2 *bp)
4038 static struct mem_entry {
4041 } mem_tbl_5706[] = {
4042 { 0x60000, 0x4000 },
4043 { 0xa0000, 0x3000 },
4044 { 0xe0000, 0x4000 },
4045 { 0x120000, 0x4000 },
4046 { 0x1a0000, 0x4000 },
4047 { 0x160000, 0x4000 },
4051 { 0x60000, 0x4000 },
4052 { 0xa0000, 0x3000 },
4053 { 0xe0000, 0x4000 },
4054 { 0x120000, 0x4000 },
4055 { 0x1a0000, 0x4000 },
4058 struct mem_entry *mem_tbl;
4060 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4061 mem_tbl = mem_tbl_5709;
4063 mem_tbl = mem_tbl_5706;
4065 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4066 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4067 mem_tbl[i].len)) != 0) {
4075 #define BNX2_MAC_LOOPBACK 0
4076 #define BNX2_PHY_LOOPBACK 1
4079 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4081 unsigned int pkt_size, num_pkts, i;
4082 struct sk_buff *skb, *rx_skb;
4083 unsigned char *packet;
4084 u16 rx_start_idx, rx_idx;
4087 struct sw_bd *rx_buf;
4088 struct l2_fhdr *rx_hdr;
4091 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4092 bp->loopback = MAC_LOOPBACK;
4093 bnx2_set_mac_loopback(bp);
4095 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4096 bp->loopback = PHY_LOOPBACK;
4097 bnx2_set_phy_loopback(bp);
4103 skb = netdev_alloc_skb(bp->dev, pkt_size);
4106 packet = skb_put(skb, pkt_size);
4107 memcpy(packet, bp->dev->dev_addr, 6);
4108 memset(packet + 6, 0x0, 8);
4109 for (i = 14; i < pkt_size; i++)
4110 packet[i] = (unsigned char) (i & 0xff);
4112 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4115 REG_WR(bp, BNX2_HC_COMMAND,
4116 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4118 REG_RD(bp, BNX2_HC_COMMAND);
4121 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4125 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4127 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4128 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4129 txbd->tx_bd_mss_nbytes = pkt_size;
4130 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4133 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4134 bp->tx_prod_bseq += pkt_size;
4136 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4137 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4141 REG_WR(bp, BNX2_HC_COMMAND,
4142 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4144 REG_RD(bp, BNX2_HC_COMMAND);
4148 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4151 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4152 goto loopback_test_done;
4155 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4156 if (rx_idx != rx_start_idx + num_pkts) {
4157 goto loopback_test_done;
4160 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4161 rx_skb = rx_buf->skb;
4163 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4164 skb_reserve(rx_skb, bp->rx_offset);
4166 pci_dma_sync_single_for_cpu(bp->pdev,
4167 pci_unmap_addr(rx_buf, mapping),
4168 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4170 if (rx_hdr->l2_fhdr_status &
4171 (L2_FHDR_ERRORS_BAD_CRC |
4172 L2_FHDR_ERRORS_PHY_DECODE |
4173 L2_FHDR_ERRORS_ALIGNMENT |
4174 L2_FHDR_ERRORS_TOO_SHORT |
4175 L2_FHDR_ERRORS_GIANT_FRAME)) {
4177 goto loopback_test_done;
4180 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4181 goto loopback_test_done;
4184 for (i = 14; i < pkt_size; i++) {
4185 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4186 goto loopback_test_done;
4197 #define BNX2_MAC_LOOPBACK_FAILED 1
4198 #define BNX2_PHY_LOOPBACK_FAILED 2
4199 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4200 BNX2_PHY_LOOPBACK_FAILED)
4203 bnx2_test_loopback(struct bnx2 *bp)
4207 if (!netif_running(bp->dev))
4208 return BNX2_LOOPBACK_FAILED;
4210 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4211 spin_lock_bh(&bp->phy_lock);
4213 spin_unlock_bh(&bp->phy_lock);
4214 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4215 rc |= BNX2_MAC_LOOPBACK_FAILED;
4216 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4217 rc |= BNX2_PHY_LOOPBACK_FAILED;
4221 #define NVRAM_SIZE 0x200
4222 #define CRC32_RESIDUAL 0xdebb20e3
4225 bnx2_test_nvram(struct bnx2 *bp)
4227 u32 buf[NVRAM_SIZE / 4];
4228 u8 *data = (u8 *) buf;
4232 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4233 goto test_nvram_done;
4235 magic = be32_to_cpu(buf[0]);
4236 if (magic != 0x669955aa) {
4238 goto test_nvram_done;
4241 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4242 goto test_nvram_done;
4244 csum = ether_crc_le(0x100, data);
4245 if (csum != CRC32_RESIDUAL) {
4247 goto test_nvram_done;
4250 csum = ether_crc_le(0x100, data + 0x100);
4251 if (csum != CRC32_RESIDUAL) {
4260 bnx2_test_link(struct bnx2 *bp)
4264 spin_lock_bh(&bp->phy_lock);
4265 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
4266 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
4267 spin_unlock_bh(&bp->phy_lock);
4269 if (bmsr & BMSR_LSTATUS) {
4276 bnx2_test_intr(struct bnx2 *bp)
4281 if (!netif_running(bp->dev))
4284 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4286 /* This register is not touched during run-time. */
4287 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4288 REG_RD(bp, BNX2_HC_COMMAND);
4290 for (i = 0; i < 10; i++) {
4291 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4297 msleep_interruptible(10);
4306 bnx2_5706_serdes_timer(struct bnx2 *bp)
4308 spin_lock(&bp->phy_lock);
4309 if (bp->serdes_an_pending)
4310 bp->serdes_an_pending--;
4311 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4314 bp->current_interval = bp->timer_interval;
4316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4318 if (bmcr & BMCR_ANENABLE) {
4321 bnx2_write_phy(bp, 0x1c, 0x7c00);
4322 bnx2_read_phy(bp, 0x1c, &phy1);
4324 bnx2_write_phy(bp, 0x17, 0x0f01);
4325 bnx2_read_phy(bp, 0x15, &phy2);
4326 bnx2_write_phy(bp, 0x17, 0x0f01);
4327 bnx2_read_phy(bp, 0x15, &phy2);
4329 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4330 !(phy2 & 0x20)) { /* no CONFIG */
4332 bmcr &= ~BMCR_ANENABLE;
4333 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4335 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4339 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4340 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4343 bnx2_write_phy(bp, 0x17, 0x0f01);
4344 bnx2_read_phy(bp, 0x15, &phy2);
4348 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4349 bmcr |= BMCR_ANENABLE;
4350 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4352 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4355 bp->current_interval = bp->timer_interval;
4357 spin_unlock(&bp->phy_lock);
4361 bnx2_5708_serdes_timer(struct bnx2 *bp)
4363 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4364 bp->serdes_an_pending = 0;
4368 spin_lock(&bp->phy_lock);
4369 if (bp->serdes_an_pending)
4370 bp->serdes_an_pending--;
4371 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4375 if (bmcr & BMCR_ANENABLE) {
4376 bnx2_enable_forced_2g5(bp);
4377 bp->current_interval = SERDES_FORCED_TIMEOUT;
4379 bnx2_disable_forced_2g5(bp);
4380 bp->serdes_an_pending = 2;
4381 bp->current_interval = bp->timer_interval;
4385 bp->current_interval = bp->timer_interval;
4387 spin_unlock(&bp->phy_lock);
4391 bnx2_timer(unsigned long data)
4393 struct bnx2 *bp = (struct bnx2 *) data;
4396 if (!netif_running(bp->dev))
4399 if (atomic_read(&bp->intr_sem) != 0)
4400 goto bnx2_restart_timer;
4402 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4403 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4405 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4407 if (bp->phy_flags & PHY_SERDES_FLAG) {
4408 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4409 bnx2_5706_serdes_timer(bp);
4410 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4411 bnx2_5708_serdes_timer(bp);
4415 mod_timer(&bp->timer, jiffies + bp->current_interval);
4418 /* Called with rtnl_lock */
4420 bnx2_open(struct net_device *dev)
4422 struct bnx2 *bp = netdev_priv(dev);
4425 netif_carrier_off(dev);
4427 bnx2_set_power_state(bp, PCI_D0);
4428 bnx2_disable_int(bp);
4430 rc = bnx2_alloc_mem(bp);
4434 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4435 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4438 if (pci_enable_msi(bp->pdev) == 0) {
4439 bp->flags |= USING_MSI_FLAG;
4440 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4444 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4445 IRQF_SHARED, dev->name, dev);
4449 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4457 rc = bnx2_init_nic(bp);
4460 free_irq(bp->pdev->irq, dev);
4461 if (bp->flags & USING_MSI_FLAG) {
4462 pci_disable_msi(bp->pdev);
4463 bp->flags &= ~USING_MSI_FLAG;
4470 mod_timer(&bp->timer, jiffies + bp->current_interval);
4472 atomic_set(&bp->intr_sem, 0);
4474 bnx2_enable_int(bp);
4476 if (bp->flags & USING_MSI_FLAG) {
4477 /* Test MSI to make sure it is working
4478 * If MSI test fails, go back to INTx mode
4480 if (bnx2_test_intr(bp) != 0) {
4481 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4482 " using MSI, switching to INTx mode. Please"
4483 " report this failure to the PCI maintainer"
4484 " and include system chipset information.\n",
4487 bnx2_disable_int(bp);
4488 free_irq(bp->pdev->irq, dev);
4489 pci_disable_msi(bp->pdev);
4490 bp->flags &= ~USING_MSI_FLAG;
4492 rc = bnx2_init_nic(bp);
4495 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4496 IRQF_SHARED, dev->name, dev);
4501 del_timer_sync(&bp->timer);
4504 bnx2_enable_int(bp);
4507 if (bp->flags & USING_MSI_FLAG) {
4508 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4511 netif_start_queue(dev);
4517 bnx2_reset_task(struct work_struct *work)
4519 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4521 if (!netif_running(bp->dev))
4524 bp->in_reset_task = 1;
4525 bnx2_netif_stop(bp);
4529 atomic_set(&bp->intr_sem, 1);
4530 bnx2_netif_start(bp);
4531 bp->in_reset_task = 0;
4535 bnx2_tx_timeout(struct net_device *dev)
4537 struct bnx2 *bp = netdev_priv(dev);
4539 /* This allows the netif to be shutdown gracefully before resetting */
4540 schedule_work(&bp->reset_task);
4544 /* Called with rtnl_lock */
4546 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4548 struct bnx2 *bp = netdev_priv(dev);
4550 bnx2_netif_stop(bp);
4553 bnx2_set_rx_mode(dev);
4555 bnx2_netif_start(bp);
4558 /* Called with rtnl_lock */
4560 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4562 struct bnx2 *bp = netdev_priv(dev);
4564 bnx2_netif_stop(bp);
4565 vlan_group_set_device(bp->vlgrp, vid, NULL);
4566 bnx2_set_rx_mode(dev);
4568 bnx2_netif_start(bp);
4572 /* Called with netif_tx_lock.
4573 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4574 * netif_wake_queue().
4577 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4579 struct bnx2 *bp = netdev_priv(dev);
4582 struct sw_bd *tx_buf;
4583 u32 len, vlan_tag_flags, last_frag, mss;
4584 u16 prod, ring_prod;
4587 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4588 netif_stop_queue(dev);
4589 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4592 return NETDEV_TX_BUSY;
4594 len = skb_headlen(skb);
4596 ring_prod = TX_RING_IDX(prod);
4599 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4600 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4603 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4605 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4607 if ((mss = skb_shinfo(skb)->gso_size) &&
4608 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4609 u32 tcp_opt_len, ip_tcp_len;
4612 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4614 tcp_opt_len = tcp_optlen(skb);
4616 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4617 u32 tcp_off = skb_transport_offset(skb) -
4618 sizeof(struct ipv6hdr) - ETH_HLEN;
4620 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4621 TX_BD_FLAGS_SW_FLAGS;
4622 if (likely(tcp_off == 0))
4623 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4626 vlan_tag_flags |= ((tcp_off & 0x3) <<
4627 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4628 ((tcp_off & 0x10) <<
4629 TX_BD_FLAGS_TCP6_OFF4_SHL);
4630 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4633 if (skb_header_cloned(skb) &&
4634 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4636 return NETDEV_TX_OK;
4639 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4643 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4644 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4648 if (tcp_opt_len || (iph->ihl > 5)) {
4649 vlan_tag_flags |= ((iph->ihl - 5) +
4650 (tcp_opt_len >> 2)) << 8;
4656 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4658 tx_buf = &bp->tx_buf_ring[ring_prod];
4660 pci_unmap_addr_set(tx_buf, mapping, mapping);
4662 txbd = &bp->tx_desc_ring[ring_prod];
4664 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4665 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4666 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4667 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4669 last_frag = skb_shinfo(skb)->nr_frags;
4671 for (i = 0; i < last_frag; i++) {
4672 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4674 prod = NEXT_TX_BD(prod);
4675 ring_prod = TX_RING_IDX(prod);
4676 txbd = &bp->tx_desc_ring[ring_prod];
4679 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4680 len, PCI_DMA_TODEVICE);
4681 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4684 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4685 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4686 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4687 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4690 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4692 prod = NEXT_TX_BD(prod);
4693 bp->tx_prod_bseq += skb->len;
4695 REG_WR16(bp, bp->tx_bidx_addr, prod);
4696 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4701 dev->trans_start = jiffies;
4703 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4704 netif_stop_queue(dev);
4705 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4706 netif_wake_queue(dev);
4709 return NETDEV_TX_OK;
4712 /* Called with rtnl_lock */
4714 bnx2_close(struct net_device *dev)
4716 struct bnx2 *bp = netdev_priv(dev);
4719 /* Calling flush_scheduled_work() may deadlock because
4720 * linkwatch_event() may be on the workqueue and it will try to get
4721 * the rtnl_lock which we are holding.
4723 while (bp->in_reset_task)
4726 bnx2_netif_stop(bp);
4727 del_timer_sync(&bp->timer);
4728 if (bp->flags & NO_WOL_FLAG)
4729 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4731 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4733 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4734 bnx2_reset_chip(bp, reset_code);
4735 free_irq(bp->pdev->irq, dev);
4736 if (bp->flags & USING_MSI_FLAG) {
4737 pci_disable_msi(bp->pdev);
4738 bp->flags &= ~USING_MSI_FLAG;
4743 netif_carrier_off(bp->dev);
4744 bnx2_set_power_state(bp, PCI_D3hot);
4748 #define GET_NET_STATS64(ctr) \
4749 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4750 (unsigned long) (ctr##_lo)
4752 #define GET_NET_STATS32(ctr) \
4755 #if (BITS_PER_LONG == 64)
4756 #define GET_NET_STATS GET_NET_STATS64
4758 #define GET_NET_STATS GET_NET_STATS32
4761 static struct net_device_stats *
4762 bnx2_get_stats(struct net_device *dev)
4764 struct bnx2 *bp = netdev_priv(dev);
4765 struct statistics_block *stats_blk = bp->stats_blk;
4766 struct net_device_stats *net_stats = &bp->net_stats;
4768 if (bp->stats_blk == NULL) {
4771 net_stats->rx_packets =
4772 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4773 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4774 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4776 net_stats->tx_packets =
4777 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4778 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4779 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4781 net_stats->rx_bytes =
4782 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4784 net_stats->tx_bytes =
4785 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4787 net_stats->multicast =
4788 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4790 net_stats->collisions =
4791 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4793 net_stats->rx_length_errors =
4794 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4795 stats_blk->stat_EtherStatsOverrsizePkts);
4797 net_stats->rx_over_errors =
4798 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4800 net_stats->rx_frame_errors =
4801 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4803 net_stats->rx_crc_errors =
4804 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4806 net_stats->rx_errors = net_stats->rx_length_errors +
4807 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4808 net_stats->rx_crc_errors;
4810 net_stats->tx_aborted_errors =
4811 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4812 stats_blk->stat_Dot3StatsLateCollisions);
4814 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4815 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4816 net_stats->tx_carrier_errors = 0;
4818 net_stats->tx_carrier_errors =
4820 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4823 net_stats->tx_errors =
4825 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4827 net_stats->tx_aborted_errors +
4828 net_stats->tx_carrier_errors;
4830 net_stats->rx_missed_errors =
4831 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4832 stats_blk->stat_FwRxDrop);
4837 /* All ethtool functions called with rtnl_lock */
4840 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4842 struct bnx2 *bp = netdev_priv(dev);
4844 cmd->supported = SUPPORTED_Autoneg;
4845 if (bp->phy_flags & PHY_SERDES_FLAG) {
4846 cmd->supported |= SUPPORTED_1000baseT_Full |
4848 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
4849 cmd->supported |= SUPPORTED_2500baseX_Full;
4851 cmd->port = PORT_FIBRE;
4854 cmd->supported |= SUPPORTED_10baseT_Half |
4855 SUPPORTED_10baseT_Full |
4856 SUPPORTED_100baseT_Half |
4857 SUPPORTED_100baseT_Full |
4858 SUPPORTED_1000baseT_Full |
4861 cmd->port = PORT_TP;
4864 cmd->advertising = bp->advertising;
4866 if (bp->autoneg & AUTONEG_SPEED) {
4867 cmd->autoneg = AUTONEG_ENABLE;
4870 cmd->autoneg = AUTONEG_DISABLE;
4873 if (netif_carrier_ok(dev)) {
4874 cmd->speed = bp->line_speed;
4875 cmd->duplex = bp->duplex;
4882 cmd->transceiver = XCVR_INTERNAL;
4883 cmd->phy_address = bp->phy_addr;
4889 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4891 struct bnx2 *bp = netdev_priv(dev);
4892 u8 autoneg = bp->autoneg;
4893 u8 req_duplex = bp->req_duplex;
4894 u16 req_line_speed = bp->req_line_speed;
4895 u32 advertising = bp->advertising;
4897 if (cmd->autoneg == AUTONEG_ENABLE) {
4898 autoneg |= AUTONEG_SPEED;
4900 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4902 /* allow advertising 1 speed */
4903 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4904 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4905 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4906 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4908 if (bp->phy_flags & PHY_SERDES_FLAG)
4911 advertising = cmd->advertising;
4914 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4915 advertising = cmd->advertising;
4917 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4921 if (bp->phy_flags & PHY_SERDES_FLAG) {
4922 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4925 advertising = ETHTOOL_ALL_COPPER_SPEED;
4928 advertising |= ADVERTISED_Autoneg;
4931 if (bp->phy_flags & PHY_SERDES_FLAG) {
4932 if ((cmd->speed != SPEED_1000 &&
4933 cmd->speed != SPEED_2500) ||
4934 (cmd->duplex != DUPLEX_FULL))
4937 if (cmd->speed == SPEED_2500 &&
4938 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4941 else if (cmd->speed == SPEED_1000) {
4944 autoneg &= ~AUTONEG_SPEED;
4945 req_line_speed = cmd->speed;
4946 req_duplex = cmd->duplex;
4950 bp->autoneg = autoneg;
4951 bp->advertising = advertising;
4952 bp->req_line_speed = req_line_speed;
4953 bp->req_duplex = req_duplex;
4955 spin_lock_bh(&bp->phy_lock);
4959 spin_unlock_bh(&bp->phy_lock);
4965 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4967 struct bnx2 *bp = netdev_priv(dev);
4969 strcpy(info->driver, DRV_MODULE_NAME);
4970 strcpy(info->version, DRV_MODULE_VERSION);
4971 strcpy(info->bus_info, pci_name(bp->pdev));
4972 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4973 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4974 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4975 info->fw_version[1] = info->fw_version[3] = '.';
4976 info->fw_version[5] = 0;
4979 #define BNX2_REGDUMP_LEN (32 * 1024)
4982 bnx2_get_regs_len(struct net_device *dev)
4984 return BNX2_REGDUMP_LEN;
4988 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4990 u32 *p = _p, i, offset;
4992 struct bnx2 *bp = netdev_priv(dev);
4993 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4994 0x0800, 0x0880, 0x0c00, 0x0c10,
4995 0x0c30, 0x0d08, 0x1000, 0x101c,
4996 0x1040, 0x1048, 0x1080, 0x10a4,
4997 0x1400, 0x1490, 0x1498, 0x14f0,
4998 0x1500, 0x155c, 0x1580, 0x15dc,
4999 0x1600, 0x1658, 0x1680, 0x16d8,
5000 0x1800, 0x1820, 0x1840, 0x1854,
5001 0x1880, 0x1894, 0x1900, 0x1984,
5002 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5003 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5004 0x2000, 0x2030, 0x23c0, 0x2400,
5005 0x2800, 0x2820, 0x2830, 0x2850,
5006 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5007 0x3c00, 0x3c94, 0x4000, 0x4010,
5008 0x4080, 0x4090, 0x43c0, 0x4458,
5009 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5010 0x4fc0, 0x5010, 0x53c0, 0x5444,
5011 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5012 0x5fc0, 0x6000, 0x6400, 0x6428,
5013 0x6800, 0x6848, 0x684c, 0x6860,
5014 0x6888, 0x6910, 0x8000 };
5018 memset(p, 0, BNX2_REGDUMP_LEN);
5020 if (!netif_running(bp->dev))
5024 offset = reg_boundaries[0];
5026 while (offset < BNX2_REGDUMP_LEN) {
5027 *p++ = REG_RD(bp, offset);
5029 if (offset == reg_boundaries[i + 1]) {
5030 offset = reg_boundaries[i + 2];
5031 p = (u32 *) (orig_p + offset);
5038 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5040 struct bnx2 *bp = netdev_priv(dev);
5042 if (bp->flags & NO_WOL_FLAG) {
5047 wol->supported = WAKE_MAGIC;
5049 wol->wolopts = WAKE_MAGIC;
5053 memset(&wol->sopass, 0, sizeof(wol->sopass));
5057 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5059 struct bnx2 *bp = netdev_priv(dev);
5061 if (wol->wolopts & ~WAKE_MAGIC)
5064 if (wol->wolopts & WAKE_MAGIC) {
5065 if (bp->flags & NO_WOL_FLAG)
5077 bnx2_nway_reset(struct net_device *dev)
5079 struct bnx2 *bp = netdev_priv(dev);
5082 if (!(bp->autoneg & AUTONEG_SPEED)) {
5086 spin_lock_bh(&bp->phy_lock);
5088 /* Force a link down visible on the other side */
5089 if (bp->phy_flags & PHY_SERDES_FLAG) {
5090 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5091 spin_unlock_bh(&bp->phy_lock);
5095 spin_lock_bh(&bp->phy_lock);
5097 bp->current_interval = SERDES_AN_TIMEOUT;
5098 bp->serdes_an_pending = 1;
5099 mod_timer(&bp->timer, jiffies + bp->current_interval);
5102 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5103 bmcr &= ~BMCR_LOOPBACK;
5104 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5106 spin_unlock_bh(&bp->phy_lock);
5112 bnx2_get_eeprom_len(struct net_device *dev)
5114 struct bnx2 *bp = netdev_priv(dev);
5116 if (bp->flash_info == NULL)
5119 return (int) bp->flash_size;
5123 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5126 struct bnx2 *bp = netdev_priv(dev);
5129 /* parameters already validated in ethtool_get_eeprom */
5131 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5137 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5140 struct bnx2 *bp = netdev_priv(dev);
5143 /* parameters already validated in ethtool_set_eeprom */
5145 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5151 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5153 struct bnx2 *bp = netdev_priv(dev);
5155 memset(coal, 0, sizeof(struct ethtool_coalesce));
5157 coal->rx_coalesce_usecs = bp->rx_ticks;
5158 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5159 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5160 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5162 coal->tx_coalesce_usecs = bp->tx_ticks;
5163 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5164 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5165 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5167 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5173 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5175 struct bnx2 *bp = netdev_priv(dev);
5177 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5178 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5180 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5181 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5183 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5184 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5186 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5187 if (bp->rx_quick_cons_trip_int > 0xff)
5188 bp->rx_quick_cons_trip_int = 0xff;
5190 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5191 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5193 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5194 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5196 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5197 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5199 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5200 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5203 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5204 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5205 bp->stats_ticks &= 0xffff00;
5207 if (netif_running(bp->dev)) {
5208 bnx2_netif_stop(bp);
5210 bnx2_netif_start(bp);
5217 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5219 struct bnx2 *bp = netdev_priv(dev);
5221 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5222 ering->rx_mini_max_pending = 0;
5223 ering->rx_jumbo_max_pending = 0;
5225 ering->rx_pending = bp->rx_ring_size;
5226 ering->rx_mini_pending = 0;
5227 ering->rx_jumbo_pending = 0;
5229 ering->tx_max_pending = MAX_TX_DESC_CNT;
5230 ering->tx_pending = bp->tx_ring_size;
5234 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5236 struct bnx2 *bp = netdev_priv(dev);
5238 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5239 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5240 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5244 if (netif_running(bp->dev)) {
5245 bnx2_netif_stop(bp);
5246 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5251 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5252 bp->tx_ring_size = ering->tx_pending;
5254 if (netif_running(bp->dev)) {
5257 rc = bnx2_alloc_mem(bp);
5261 bnx2_netif_start(bp);
5268 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5270 struct bnx2 *bp = netdev_priv(dev);
5272 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5273 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5274 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5278 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5280 struct bnx2 *bp = netdev_priv(dev);
5282 bp->req_flow_ctrl = 0;
5283 if (epause->rx_pause)
5284 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5285 if (epause->tx_pause)
5286 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5288 if (epause->autoneg) {
5289 bp->autoneg |= AUTONEG_FLOW_CTRL;
5292 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5295 spin_lock_bh(&bp->phy_lock);
5299 spin_unlock_bh(&bp->phy_lock);
5305 bnx2_get_rx_csum(struct net_device *dev)
5307 struct bnx2 *bp = netdev_priv(dev);
5313 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5315 struct bnx2 *bp = netdev_priv(dev);
5322 bnx2_set_tso(struct net_device *dev, u32 data)
5324 struct bnx2 *bp = netdev_priv(dev);
5327 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5328 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5329 dev->features |= NETIF_F_TSO6;
5331 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5336 #define BNX2_NUM_STATS 46
5339 char string[ETH_GSTRING_LEN];
5340 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5342 { "rx_error_bytes" },
5344 { "tx_error_bytes" },
5345 { "rx_ucast_packets" },
5346 { "rx_mcast_packets" },
5347 { "rx_bcast_packets" },
5348 { "tx_ucast_packets" },
5349 { "tx_mcast_packets" },
5350 { "tx_bcast_packets" },
5351 { "tx_mac_errors" },
5352 { "tx_carrier_errors" },
5353 { "rx_crc_errors" },
5354 { "rx_align_errors" },
5355 { "tx_single_collisions" },
5356 { "tx_multi_collisions" },
5358 { "tx_excess_collisions" },
5359 { "tx_late_collisions" },
5360 { "tx_total_collisions" },
5363 { "rx_undersize_packets" },
5364 { "rx_oversize_packets" },
5365 { "rx_64_byte_packets" },
5366 { "rx_65_to_127_byte_packets" },
5367 { "rx_128_to_255_byte_packets" },
5368 { "rx_256_to_511_byte_packets" },
5369 { "rx_512_to_1023_byte_packets" },
5370 { "rx_1024_to_1522_byte_packets" },
5371 { "rx_1523_to_9022_byte_packets" },
5372 { "tx_64_byte_packets" },
5373 { "tx_65_to_127_byte_packets" },
5374 { "tx_128_to_255_byte_packets" },
5375 { "tx_256_to_511_byte_packets" },
5376 { "tx_512_to_1023_byte_packets" },
5377 { "tx_1024_to_1522_byte_packets" },
5378 { "tx_1523_to_9022_byte_packets" },
5379 { "rx_xon_frames" },
5380 { "rx_xoff_frames" },
5381 { "tx_xon_frames" },
5382 { "tx_xoff_frames" },
5383 { "rx_mac_ctrl_frames" },
5384 { "rx_filtered_packets" },
5386 { "rx_fw_discards" },
5389 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5391 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5392 STATS_OFFSET32(stat_IfHCInOctets_hi),
5393 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5394 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5395 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5396 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5397 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5398 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5399 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5400 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5401 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5402 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5403 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5404 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5405 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5406 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5407 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5408 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5409 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5410 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5411 STATS_OFFSET32(stat_EtherStatsCollisions),
5412 STATS_OFFSET32(stat_EtherStatsFragments),
5413 STATS_OFFSET32(stat_EtherStatsJabbers),
5414 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5415 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5416 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5417 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5418 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5419 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5420 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5421 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5422 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5423 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5424 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5425 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5426 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5427 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5428 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5429 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5430 STATS_OFFSET32(stat_XonPauseFramesReceived),
5431 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5432 STATS_OFFSET32(stat_OutXonSent),
5433 STATS_OFFSET32(stat_OutXoffSent),
5434 STATS_OFFSET32(stat_MacControlFramesReceived),
5435 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5436 STATS_OFFSET32(stat_IfInMBUFDiscards),
5437 STATS_OFFSET32(stat_FwRxDrop),
5440 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5441 * skipped because of errata.
5443 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5444 8,0,8,8,8,8,8,8,8,8,
5445 4,0,4,4,4,4,4,4,4,4,
5446 4,4,4,4,4,4,4,4,4,4,
5447 4,4,4,4,4,4,4,4,4,4,
5451 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5452 8,0,8,8,8,8,8,8,8,8,
5453 4,4,4,4,4,4,4,4,4,4,
5454 4,4,4,4,4,4,4,4,4,4,
5455 4,4,4,4,4,4,4,4,4,4,
5459 #define BNX2_NUM_TESTS 6
5462 char string[ETH_GSTRING_LEN];
5463 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5464 { "register_test (offline)" },
5465 { "memory_test (offline)" },
5466 { "loopback_test (offline)" },
5467 { "nvram_test (online)" },
5468 { "interrupt_test (online)" },
5469 { "link_test (online)" },
5473 bnx2_self_test_count(struct net_device *dev)
5475 return BNX2_NUM_TESTS;
5479 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5481 struct bnx2 *bp = netdev_priv(dev);
5483 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5484 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5487 bnx2_netif_stop(bp);
5488 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5491 if (bnx2_test_registers(bp) != 0) {
5493 etest->flags |= ETH_TEST_FL_FAILED;
5495 if (bnx2_test_memory(bp) != 0) {
5497 etest->flags |= ETH_TEST_FL_FAILED;
5499 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5500 etest->flags |= ETH_TEST_FL_FAILED;
5502 if (!netif_running(bp->dev)) {
5503 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5507 bnx2_netif_start(bp);
5510 /* wait for link up */
5511 for (i = 0; i < 7; i++) {
5514 msleep_interruptible(1000);
5518 if (bnx2_test_nvram(bp) != 0) {
5520 etest->flags |= ETH_TEST_FL_FAILED;
5522 if (bnx2_test_intr(bp) != 0) {
5524 etest->flags |= ETH_TEST_FL_FAILED;
5527 if (bnx2_test_link(bp) != 0) {
5529 etest->flags |= ETH_TEST_FL_FAILED;
5535 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5537 switch (stringset) {
5539 memcpy(buf, bnx2_stats_str_arr,
5540 sizeof(bnx2_stats_str_arr));
5543 memcpy(buf, bnx2_tests_str_arr,
5544 sizeof(bnx2_tests_str_arr));
5550 bnx2_get_stats_count(struct net_device *dev)
5552 return BNX2_NUM_STATS;
5556 bnx2_get_ethtool_stats(struct net_device *dev,
5557 struct ethtool_stats *stats, u64 *buf)
5559 struct bnx2 *bp = netdev_priv(dev);
5561 u32 *hw_stats = (u32 *) bp->stats_blk;
5562 u8 *stats_len_arr = NULL;
5564 if (hw_stats == NULL) {
5565 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5569 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5570 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5571 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5572 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5573 stats_len_arr = bnx2_5706_stats_len_arr;
5575 stats_len_arr = bnx2_5708_stats_len_arr;
5577 for (i = 0; i < BNX2_NUM_STATS; i++) {
5578 if (stats_len_arr[i] == 0) {
5579 /* skip this counter */
5583 if (stats_len_arr[i] == 4) {
5584 /* 4-byte counter */
5586 *(hw_stats + bnx2_stats_offset_arr[i]);
5589 /* 8-byte counter */
5590 buf[i] = (((u64) *(hw_stats +
5591 bnx2_stats_offset_arr[i])) << 32) +
5592 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5597 bnx2_phys_id(struct net_device *dev, u32 data)
5599 struct bnx2 *bp = netdev_priv(dev);
5606 save = REG_RD(bp, BNX2_MISC_CFG);
5607 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5609 for (i = 0; i < (data * 2); i++) {
5611 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5614 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5615 BNX2_EMAC_LED_1000MB_OVERRIDE |
5616 BNX2_EMAC_LED_100MB_OVERRIDE |
5617 BNX2_EMAC_LED_10MB_OVERRIDE |
5618 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5619 BNX2_EMAC_LED_TRAFFIC);
5621 msleep_interruptible(500);
5622 if (signal_pending(current))
5625 REG_WR(bp, BNX2_EMAC_LED, 0);
5626 REG_WR(bp, BNX2_MISC_CFG, save);
5631 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5633 struct bnx2 *bp = netdev_priv(dev);
5635 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5636 return (ethtool_op_set_tx_hw_csum(dev, data));
5638 return (ethtool_op_set_tx_csum(dev, data));
5641 static const struct ethtool_ops bnx2_ethtool_ops = {
5642 .get_settings = bnx2_get_settings,
5643 .set_settings = bnx2_set_settings,
5644 .get_drvinfo = bnx2_get_drvinfo,
5645 .get_regs_len = bnx2_get_regs_len,
5646 .get_regs = bnx2_get_regs,
5647 .get_wol = bnx2_get_wol,
5648 .set_wol = bnx2_set_wol,
5649 .nway_reset = bnx2_nway_reset,
5650 .get_link = ethtool_op_get_link,
5651 .get_eeprom_len = bnx2_get_eeprom_len,
5652 .get_eeprom = bnx2_get_eeprom,
5653 .set_eeprom = bnx2_set_eeprom,
5654 .get_coalesce = bnx2_get_coalesce,
5655 .set_coalesce = bnx2_set_coalesce,
5656 .get_ringparam = bnx2_get_ringparam,
5657 .set_ringparam = bnx2_set_ringparam,
5658 .get_pauseparam = bnx2_get_pauseparam,
5659 .set_pauseparam = bnx2_set_pauseparam,
5660 .get_rx_csum = bnx2_get_rx_csum,
5661 .set_rx_csum = bnx2_set_rx_csum,
5662 .get_tx_csum = ethtool_op_get_tx_csum,
5663 .set_tx_csum = bnx2_set_tx_csum,
5664 .get_sg = ethtool_op_get_sg,
5665 .set_sg = ethtool_op_set_sg,
5666 .get_tso = ethtool_op_get_tso,
5667 .set_tso = bnx2_set_tso,
5668 .self_test_count = bnx2_self_test_count,
5669 .self_test = bnx2_self_test,
5670 .get_strings = bnx2_get_strings,
5671 .phys_id = bnx2_phys_id,
5672 .get_stats_count = bnx2_get_stats_count,
5673 .get_ethtool_stats = bnx2_get_ethtool_stats,
5674 .get_perm_addr = ethtool_op_get_perm_addr,
5677 /* Called with rtnl_lock */
5679 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5681 struct mii_ioctl_data *data = if_mii(ifr);
5682 struct bnx2 *bp = netdev_priv(dev);
5687 data->phy_id = bp->phy_addr;
5693 if (!netif_running(dev))
5696 spin_lock_bh(&bp->phy_lock);
5697 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5698 spin_unlock_bh(&bp->phy_lock);
5700 data->val_out = mii_regval;
5706 if (!capable(CAP_NET_ADMIN))
5709 if (!netif_running(dev))
5712 spin_lock_bh(&bp->phy_lock);
5713 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5714 spin_unlock_bh(&bp->phy_lock);
5725 /* Called with rtnl_lock */
5727 bnx2_change_mac_addr(struct net_device *dev, void *p)
5729 struct sockaddr *addr = p;
5730 struct bnx2 *bp = netdev_priv(dev);
5732 if (!is_valid_ether_addr(addr->sa_data))
5735 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5736 if (netif_running(dev))
5737 bnx2_set_mac_addr(bp);
5742 /* Called with rtnl_lock */
5744 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5749 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5753 if (netif_running(dev)) {
5754 bnx2_netif_stop(bp);
5758 bnx2_netif_start(bp);
5763 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5765 poll_bnx2(struct net_device *dev)
5767 struct bnx2 *bp = netdev_priv(dev);
5769 disable_irq(bp->pdev->irq);
5770 bnx2_interrupt(bp->pdev->irq, dev);
5771 enable_irq(bp->pdev->irq);
5775 static void __devinit
5776 bnx2_get_5709_media(struct bnx2 *bp)
5778 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5779 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5782 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5784 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5785 bp->phy_flags |= PHY_SERDES_FLAG;
5789 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5790 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5792 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5794 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5799 bp->phy_flags |= PHY_SERDES_FLAG;
5807 bp->phy_flags |= PHY_SERDES_FLAG;
5813 static int __devinit
5814 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5817 unsigned long mem_len;
5820 u64 dma_mask, persist_dma_mask;
5822 SET_MODULE_OWNER(dev);
5823 SET_NETDEV_DEV(dev, &pdev->dev);
5824 bp = netdev_priv(dev);
5829 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5830 rc = pci_enable_device(pdev);
5832 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5836 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5838 "Cannot find PCI device base address, aborting.\n");
5840 goto err_out_disable;
5843 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5845 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5846 goto err_out_disable;
5849 pci_set_master(pdev);
5851 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5852 if (bp->pm_cap == 0) {
5854 "Cannot find power management capability, aborting.\n");
5856 goto err_out_release;
5862 spin_lock_init(&bp->phy_lock);
5863 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5865 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5866 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5867 dev->mem_end = dev->mem_start + mem_len;
5868 dev->irq = pdev->irq;
5870 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5873 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5875 goto err_out_release;
5878 /* Configure byte swap and enable write to the reg_window registers.
5879 * Rely on CPU to do target byte swapping on big endian systems
5880 * The chip's target access swapping will not swap all accesses
5882 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5883 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5884 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5886 bnx2_set_power_state(bp, PCI_D0);
5888 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5890 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5891 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5892 if (bp->pcix_cap == 0) {
5894 "Cannot find PCIX capability, aborting.\n");
5900 /* 5708 cannot support DMA addresses > 40-bit. */
5901 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5902 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5904 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5906 /* Configure DMA attributes. */
5907 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5908 dev->features |= NETIF_F_HIGHDMA;
5909 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5912 "pci_set_consistent_dma_mask failed, aborting.\n");
5915 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5916 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5920 /* Get bus information. */
5921 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5922 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5925 bp->flags |= PCIX_FLAG;
5927 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5929 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5931 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5932 bp->bus_speed_mhz = 133;
5935 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5936 bp->bus_speed_mhz = 100;
5939 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5940 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5941 bp->bus_speed_mhz = 66;
5944 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5945 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5946 bp->bus_speed_mhz = 50;
5949 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5950 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5951 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5952 bp->bus_speed_mhz = 33;
5957 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5958 bp->bus_speed_mhz = 66;
5960 bp->bus_speed_mhz = 33;
5963 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5964 bp->flags |= PCI_32BIT_FLAG;
5966 /* 5706A0 may falsely detect SERR and PERR. */
5967 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5968 reg = REG_RD(bp, PCI_COMMAND);
5969 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5970 REG_WR(bp, PCI_COMMAND, reg);
5972 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5973 !(bp->flags & PCIX_FLAG)) {
5976 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5980 bnx2_init_nvram(bp);
5982 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5984 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5985 BNX2_SHM_HDR_SIGNATURE_SIG) {
5986 u32 off = PCI_FUNC(pdev->devfn) << 2;
5988 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5990 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5992 /* Get the permanent MAC address. First we need to make sure the
5993 * firmware is actually running.
5995 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5997 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5998 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5999 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6004 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6006 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6007 bp->mac_addr[0] = (u8) (reg >> 8);
6008 bp->mac_addr[1] = (u8) reg;
6010 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6011 bp->mac_addr[2] = (u8) (reg >> 24);
6012 bp->mac_addr[3] = (u8) (reg >> 16);
6013 bp->mac_addr[4] = (u8) (reg >> 8);
6014 bp->mac_addr[5] = (u8) reg;
6016 bp->tx_ring_size = MAX_TX_DESC_CNT;
6017 bnx2_set_rx_ring_size(bp, 255);
6021 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6023 bp->tx_quick_cons_trip_int = 20;
6024 bp->tx_quick_cons_trip = 20;
6025 bp->tx_ticks_int = 80;
6028 bp->rx_quick_cons_trip_int = 6;
6029 bp->rx_quick_cons_trip = 6;
6030 bp->rx_ticks_int = 18;
6033 bp->stats_ticks = 1000000 & 0xffff00;
6035 bp->timer_interval = HZ;
6036 bp->current_interval = HZ;
6040 /* Disable WOL support if we are running on a SERDES chip. */
6041 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6042 bnx2_get_5709_media(bp);
6043 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6044 bp->phy_flags |= PHY_SERDES_FLAG;
6046 if (bp->phy_flags & PHY_SERDES_FLAG) {
6047 bp->flags |= NO_WOL_FLAG;
6048 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6050 reg = REG_RD_IND(bp, bp->shmem_base +
6051 BNX2_SHARED_HW_CFG_CONFIG);
6052 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6053 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6055 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6056 CHIP_NUM(bp) == CHIP_NUM_5708)
6057 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6058 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6059 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6061 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6062 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6063 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6064 bp->flags |= NO_WOL_FLAG;
6066 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6067 bp->tx_quick_cons_trip_int =
6068 bp->tx_quick_cons_trip;
6069 bp->tx_ticks_int = bp->tx_ticks;
6070 bp->rx_quick_cons_trip_int =
6071 bp->rx_quick_cons_trip;
6072 bp->rx_ticks_int = bp->rx_ticks;
6073 bp->comp_prod_trip_int = bp->comp_prod_trip;
6074 bp->com_ticks_int = bp->com_ticks;
6075 bp->cmd_ticks_int = bp->cmd_ticks;
6078 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6080 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6081 * with byte enables disabled on the unused 32-bit word. This is legal
6082 * but causes problems on the AMD 8132 which will eventually stop
6083 * responding after a while.
6085 * AMD believes this incompatibility is unique to the 5706, and
6086 * prefers to locally disable MSI rather than globally disabling it.
6088 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6089 struct pci_dev *amd_8132 = NULL;
6091 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6092 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6096 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6097 if (rev >= 0x10 && rev <= 0x13) {
6099 pci_dev_put(amd_8132);
6105 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6106 bp->req_line_speed = 0;
6107 if (bp->phy_flags & PHY_SERDES_FLAG) {
6108 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6110 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6111 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6112 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6114 bp->req_line_speed = bp->line_speed = SPEED_1000;
6115 bp->req_duplex = DUPLEX_FULL;
6119 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6122 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6124 init_timer(&bp->timer);
6125 bp->timer.expires = RUN_AT(bp->timer_interval);
6126 bp->timer.data = (unsigned long) bp;
6127 bp->timer.function = bnx2_timer;
6133 iounmap(bp->regview);
6138 pci_release_regions(pdev);
6141 pci_disable_device(pdev);
6142 pci_set_drvdata(pdev, NULL);
6148 static int __devinit
6149 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6151 static int version_printed = 0;
6152 struct net_device *dev = NULL;
6156 if (version_printed++ == 0)
6157 printk(KERN_INFO "%s", version);
6159 /* dev zeroed in init_etherdev */
6160 dev = alloc_etherdev(sizeof(*bp));
6165 rc = bnx2_init_board(pdev, dev);
6171 dev->open = bnx2_open;
6172 dev->hard_start_xmit = bnx2_start_xmit;
6173 dev->stop = bnx2_close;
6174 dev->get_stats = bnx2_get_stats;
6175 dev->set_multicast_list = bnx2_set_rx_mode;
6176 dev->do_ioctl = bnx2_ioctl;
6177 dev->set_mac_address = bnx2_change_mac_addr;
6178 dev->change_mtu = bnx2_change_mtu;
6179 dev->tx_timeout = bnx2_tx_timeout;
6180 dev->watchdog_timeo = TX_TIMEOUT;
6182 dev->vlan_rx_register = bnx2_vlan_rx_register;
6183 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6185 dev->poll = bnx2_poll;
6186 dev->ethtool_ops = &bnx2_ethtool_ops;
6189 bp = netdev_priv(dev);
6191 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6192 dev->poll_controller = poll_bnx2;
6195 pci_set_drvdata(pdev, dev);
6197 memcpy(dev->dev_addr, bp->mac_addr, 6);
6198 memcpy(dev->perm_addr, bp->mac_addr, 6);
6199 bp->name = board_info[ent->driver_data].name;
6201 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6202 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6204 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6206 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6208 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6209 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6210 dev->features |= NETIF_F_TSO6;
6212 if ((rc = register_netdev(dev))) {
6213 dev_err(&pdev->dev, "Cannot register net device\n");
6215 iounmap(bp->regview);
6216 pci_release_regions(pdev);
6217 pci_disable_device(pdev);
6218 pci_set_drvdata(pdev, NULL);
6223 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6227 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6228 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6229 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6230 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6235 printk("node addr ");
6236 for (i = 0; i < 6; i++)
6237 printk("%2.2x", dev->dev_addr[i]);
6243 static void __devexit
6244 bnx2_remove_one(struct pci_dev *pdev)
6246 struct net_device *dev = pci_get_drvdata(pdev);
6247 struct bnx2 *bp = netdev_priv(dev);
6249 flush_scheduled_work();
6251 unregister_netdev(dev);
6254 iounmap(bp->regview);
6257 pci_release_regions(pdev);
6258 pci_disable_device(pdev);
6259 pci_set_drvdata(pdev, NULL);
6263 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6265 struct net_device *dev = pci_get_drvdata(pdev);
6266 struct bnx2 *bp = netdev_priv(dev);
6269 if (!netif_running(dev))
6272 flush_scheduled_work();
6273 bnx2_netif_stop(bp);
6274 netif_device_detach(dev);
6275 del_timer_sync(&bp->timer);
6276 if (bp->flags & NO_WOL_FLAG)
6277 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6279 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6281 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6282 bnx2_reset_chip(bp, reset_code);
6284 pci_save_state(pdev);
6285 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6290 bnx2_resume(struct pci_dev *pdev)
6292 struct net_device *dev = pci_get_drvdata(pdev);
6293 struct bnx2 *bp = netdev_priv(dev);
6295 if (!netif_running(dev))
6298 pci_restore_state(pdev);
6299 bnx2_set_power_state(bp, PCI_D0);
6300 netif_device_attach(dev);
6302 bnx2_netif_start(bp);
6306 static struct pci_driver bnx2_pci_driver = {
6307 .name = DRV_MODULE_NAME,
6308 .id_table = bnx2_pci_tbl,
6309 .probe = bnx2_init_one,
6310 .remove = __devexit_p(bnx2_remove_one),
6311 .suspend = bnx2_suspend,
6312 .resume = bnx2_resume,
6315 static int __init bnx2_init(void)
6317 return pci_register_driver(&bnx2_pci_driver);
6320 static void __exit bnx2_cleanup(void)
6322 pci_unregister_driver(&bnx2_pci_driver);
6325 module_init(bnx2_init);
6326 module_exit(bnx2_cleanup);