1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.5"
60 #define DRV_MODULE_RELDATE "September 20, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev,
472 sizeof(struct tx_bd) * TX_DESC_CNT,
473 bp->tx_desc_ring, bp->tx_desc_mapping);
474 bp->tx_desc_ring = NULL;
476 kfree(bp->tx_buf_ring);
477 bp->tx_buf_ring = NULL;
478 for (i = 0; i < bp->rx_max_ring; i++) {
479 if (bp->rx_desc_ring[i])
480 pci_free_consistent(bp->pdev,
481 sizeof(struct rx_bd) * RX_DESC_CNT,
483 bp->rx_desc_mapping[i]);
484 bp->rx_desc_ring[i] = NULL;
486 vfree(bp->rx_buf_ring);
487 bp->rx_buf_ring = NULL;
491 bnx2_alloc_mem(struct bnx2 *bp)
493 int i, status_blk_size;
495 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
497 if (bp->tx_buf_ring == NULL)
500 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501 sizeof(struct tx_bd) *
503 &bp->tx_desc_mapping);
504 if (bp->tx_desc_ring == NULL)
507 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
509 if (bp->rx_buf_ring == NULL)
512 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
515 for (i = 0; i < bp->rx_max_ring; i++) {
516 bp->rx_desc_ring[i] =
517 pci_alloc_consistent(bp->pdev,
518 sizeof(struct rx_bd) * RX_DESC_CNT,
519 &bp->rx_desc_mapping[i]);
520 if (bp->rx_desc_ring[i] == NULL)
525 /* Combine status and statistics blocks into one allocation. */
526 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527 bp->status_stats_size = status_blk_size +
528 sizeof(struct statistics_block);
530 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531 &bp->status_blk_mapping);
532 if (bp->status_blk == NULL)
535 memset(bp->status_blk, 0, bp->status_stats_size);
537 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
540 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
542 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544 if (bp->ctx_pages == 0)
546 for (i = 0; i < bp->ctx_pages; i++) {
547 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
549 &bp->ctx_blk_mapping[i]);
550 if (bp->ctx_blk[i] == NULL)
562 bnx2_report_fw_link(struct bnx2 *bp)
564 u32 fw_link_status = 0;
566 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
572 switch (bp->line_speed) {
574 if (bp->duplex == DUPLEX_HALF)
575 fw_link_status = BNX2_LINK_STATUS_10HALF;
577 fw_link_status = BNX2_LINK_STATUS_10FULL;
580 if (bp->duplex == DUPLEX_HALF)
581 fw_link_status = BNX2_LINK_STATUS_100HALF;
583 fw_link_status = BNX2_LINK_STATUS_100FULL;
586 if (bp->duplex == DUPLEX_HALF)
587 fw_link_status = BNX2_LINK_STATUS_1000HALF;
589 fw_link_status = BNX2_LINK_STATUS_1000FULL;
592 if (bp->duplex == DUPLEX_HALF)
593 fw_link_status = BNX2_LINK_STATUS_2500HALF;
595 fw_link_status = BNX2_LINK_STATUS_2500FULL;
599 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
602 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
604 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
607 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
611 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
615 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
617 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
621 bnx2_xceiver_str(struct bnx2 *bp)
623 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
629 bnx2_report_link(struct bnx2 *bp)
632 netif_carrier_on(bp->dev);
633 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634 bnx2_xceiver_str(bp));
636 printk("%d Mbps ", bp->line_speed);
638 if (bp->duplex == DUPLEX_FULL)
639 printk("full duplex");
641 printk("half duplex");
644 if (bp->flow_ctrl & FLOW_CTRL_RX) {
645 printk(", receive ");
646 if (bp->flow_ctrl & FLOW_CTRL_TX)
647 printk("& transmit ");
650 printk(", transmit ");
652 printk("flow control ON");
657 netif_carrier_off(bp->dev);
658 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659 bnx2_xceiver_str(bp));
662 bnx2_report_fw_link(bp);
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
668 u32 local_adv, remote_adv;
671 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
674 if (bp->duplex == DUPLEX_FULL) {
675 bp->flow_ctrl = bp->req_flow_ctrl;
680 if (bp->duplex != DUPLEX_FULL) {
684 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
688 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_TX;
691 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692 bp->flow_ctrl |= FLOW_CTRL_RX;
696 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
699 if (bp->phy_flags & PHY_SERDES_FLAG) {
700 u32 new_local_adv = 0;
701 u32 new_remote_adv = 0;
703 if (local_adv & ADVERTISE_1000XPAUSE)
704 new_local_adv |= ADVERTISE_PAUSE_CAP;
705 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706 new_local_adv |= ADVERTISE_PAUSE_ASYM;
707 if (remote_adv & ADVERTISE_1000XPAUSE)
708 new_remote_adv |= ADVERTISE_PAUSE_CAP;
709 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
712 local_adv = new_local_adv;
713 remote_adv = new_remote_adv;
716 /* See Table 28B-3 of 802.3ab-1999 spec. */
717 if (local_adv & ADVERTISE_PAUSE_CAP) {
718 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
722 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723 bp->flow_ctrl = FLOW_CTRL_RX;
727 if (remote_adv & ADVERTISE_PAUSE_CAP) {
728 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
732 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
736 bp->flow_ctrl = FLOW_CTRL_TX;
742 bnx2_5709s_linkup(struct bnx2 *bp)
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
752 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753 bp->line_speed = bp->req_line_speed;
754 bp->duplex = bp->req_duplex;
757 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
759 case MII_BNX2_GP_TOP_AN_SPEED_10:
760 bp->line_speed = SPEED_10;
762 case MII_BNX2_GP_TOP_AN_SPEED_100:
763 bp->line_speed = SPEED_100;
765 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767 bp->line_speed = SPEED_1000;
769 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770 bp->line_speed = SPEED_2500;
773 if (val & MII_BNX2_GP_TOP_AN_FD)
774 bp->duplex = DUPLEX_FULL;
776 bp->duplex = DUPLEX_HALF;
781 bnx2_5708s_linkup(struct bnx2 *bp)
786 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788 case BCM5708S_1000X_STAT1_SPEED_10:
789 bp->line_speed = SPEED_10;
791 case BCM5708S_1000X_STAT1_SPEED_100:
792 bp->line_speed = SPEED_100;
794 case BCM5708S_1000X_STAT1_SPEED_1G:
795 bp->line_speed = SPEED_1000;
797 case BCM5708S_1000X_STAT1_SPEED_2G5:
798 bp->line_speed = SPEED_2500;
801 if (val & BCM5708S_1000X_STAT1_FD)
802 bp->duplex = DUPLEX_FULL;
804 bp->duplex = DUPLEX_HALF;
810 bnx2_5706s_linkup(struct bnx2 *bp)
812 u32 bmcr, local_adv, remote_adv, common;
815 bp->line_speed = SPEED_1000;
817 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818 if (bmcr & BMCR_FULLDPLX) {
819 bp->duplex = DUPLEX_FULL;
822 bp->duplex = DUPLEX_HALF;
825 if (!(bmcr & BMCR_ANENABLE)) {
829 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
832 common = local_adv & remote_adv;
833 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
835 if (common & ADVERTISE_1000XFULL) {
836 bp->duplex = DUPLEX_FULL;
839 bp->duplex = DUPLEX_HALF;
847 bnx2_copper_linkup(struct bnx2 *bp)
851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852 if (bmcr & BMCR_ANENABLE) {
853 u32 local_adv, remote_adv, common;
855 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
858 common = local_adv & (remote_adv >> 2);
859 if (common & ADVERTISE_1000FULL) {
860 bp->line_speed = SPEED_1000;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_1000HALF) {
864 bp->line_speed = SPEED_1000;
865 bp->duplex = DUPLEX_HALF;
868 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871 common = local_adv & remote_adv;
872 if (common & ADVERTISE_100FULL) {
873 bp->line_speed = SPEED_100;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_100HALF) {
877 bp->line_speed = SPEED_100;
878 bp->duplex = DUPLEX_HALF;
880 else if (common & ADVERTISE_10FULL) {
881 bp->line_speed = SPEED_10;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_10HALF) {
885 bp->line_speed = SPEED_10;
886 bp->duplex = DUPLEX_HALF;
895 if (bmcr & BMCR_SPEED100) {
896 bp->line_speed = SPEED_100;
899 bp->line_speed = SPEED_10;
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
913 bnx2_set_mac_link(struct bnx2 *bp)
917 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919 (bp->duplex == DUPLEX_HALF)) {
920 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
923 /* Configure the EMAC mode register. */
924 val = REG_RD(bp, BNX2_EMAC_MODE);
926 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928 BNX2_EMAC_MODE_25G_MODE);
931 switch (bp->line_speed) {
933 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934 val |= BNX2_EMAC_MODE_PORT_MII_10M;
939 val |= BNX2_EMAC_MODE_PORT_MII;
942 val |= BNX2_EMAC_MODE_25G_MODE;
945 val |= BNX2_EMAC_MODE_PORT_GMII;
950 val |= BNX2_EMAC_MODE_PORT_GMII;
953 /* Set the MAC to operate in the appropriate duplex mode. */
954 if (bp->duplex == DUPLEX_HALF)
955 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956 REG_WR(bp, BNX2_EMAC_MODE, val);
958 /* Enable/disable rx PAUSE. */
959 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_RX)
962 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
965 /* Enable/disable tx PAUSE. */
966 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
969 if (bp->flow_ctrl & FLOW_CTRL_TX)
970 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
973 /* Acknowledge the interrupt. */
974 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
980 bnx2_enable_bmsr1(struct bnx2 *bp)
982 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983 (CHIP_NUM(bp) == CHIP_NUM_5709))
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985 MII_BNX2_BLK_ADDR_GP_STATUS);
989 bnx2_disable_bmsr1(struct bnx2 *bp)
991 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992 (CHIP_NUM(bp) == CHIP_NUM_5709))
993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1003 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1006 if (bp->autoneg & AUTONEG_SPEED)
1007 bp->advertising |= ADVERTISED_2500baseX_Full;
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (!(up1 & BCM5708S_UP1_2G5)) {
1014 up1 |= BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1032 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1038 bnx2_read_phy(bp, bp->mii_up1, &up1);
1039 if (up1 & BCM5708S_UP1_2G5) {
1040 up1 &= ~BCM5708S_UP1_2G5;
1041 bnx2_write_phy(bp, bp->mii_up1, up1);
1045 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1070 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076 bmcr |= BCM5708S_BMCR_FORCE_2500;
1079 if (bp->autoneg & AUTONEG_SPEED) {
1080 bmcr &= ~BMCR_ANENABLE;
1081 if (bp->req_duplex == DUPLEX_FULL)
1082 bmcr |= BMCR_FULLDPLX;
1084 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1092 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099 MII_BNX2_BLK_ADDR_SERDES_DIG);
1100 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1104 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1113 if (bp->autoneg & AUTONEG_SPEED)
1114 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1119 bnx2_set_link(struct bnx2 *bp)
1124 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1129 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1132 link_up = bp->link_up;
1134 bnx2_enable_bmsr1(bp);
1135 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137 bnx2_disable_bmsr1(bp);
1139 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1143 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144 if (val & BNX2_EMAC_STATUS_LINK)
1145 bmsr |= BMSR_LSTATUS;
1147 bmsr &= ~BMSR_LSTATUS;
1150 if (bmsr & BMSR_LSTATUS) {
1153 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155 bnx2_5706s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157 bnx2_5708s_linkup(bp);
1158 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159 bnx2_5709s_linkup(bp);
1162 bnx2_copper_linkup(bp);
1164 bnx2_resolve_flow_ctrl(bp);
1167 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168 (bp->autoneg & AUTONEG_SPEED))
1169 bnx2_disable_forced_2g5(bp);
1171 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1175 if (bp->link_up != link_up) {
1176 bnx2_report_link(bp);
1179 bnx2_set_mac_link(bp);
1185 bnx2_reset_phy(struct bnx2 *bp)
1190 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1192 #define PHY_RESET_MAX_WAIT 100
1193 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1196 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1197 if (!(reg & BMCR_RESET)) {
1202 if (i == PHY_RESET_MAX_WAIT) {
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1213 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPAUSE;
1220 adv = ADVERTISE_PAUSE_CAP;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_ASYM;
1231 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1236 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1247 u32 speed_arg = 0, pause_adv;
1249 pause_adv = bnx2_phy_get_pause_adv(bp);
1251 if (bp->autoneg & AUTONEG_SPEED) {
1252 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253 if (bp->advertising & ADVERTISED_10baseT_Half)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255 if (bp->advertising & ADVERTISED_10baseT_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257 if (bp->advertising & ADVERTISED_100baseT_Half)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259 if (bp->advertising & ADVERTISED_100baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 if (bp->req_line_speed == SPEED_2500)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268 else if (bp->req_line_speed == SPEED_1000)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270 else if (bp->req_line_speed == SPEED_100) {
1271 if (bp->req_duplex == DUPLEX_FULL)
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1274 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275 } else if (bp->req_line_speed == SPEED_10) {
1276 if (bp->req_duplex == DUPLEX_FULL)
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1279 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1283 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1288 if (port == PORT_TP)
1289 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1292 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1294 spin_unlock_bh(&bp->phy_lock);
1295 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296 spin_lock_bh(&bp->phy_lock);
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308 return (bnx2_setup_remote_phy(bp, port));
1310 if (!(bp->autoneg & AUTONEG_SPEED)) {
1312 int force_link_down = 0;
1314 if (bp->req_line_speed == SPEED_2500) {
1315 if (!bnx2_test_and_enable_2g5(bp))
1316 force_link_down = 1;
1317 } else if (bp->req_line_speed == SPEED_1000) {
1318 if (bnx2_test_and_disable_2g5(bp))
1319 force_link_down = 1;
1321 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1324 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326 new_bmcr |= BMCR_SPEED1000;
1328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 bnx2_enable_forced_2g5(bp);
1331 else if (bp->req_line_speed == SPEED_1000) {
1332 bnx2_disable_forced_2g5(bp);
1333 new_bmcr &= ~0x2000;
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 if (bp->req_line_speed == SPEED_2500)
1338 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1340 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1343 if (bp->req_duplex == DUPLEX_FULL) {
1344 adv |= ADVERTISE_1000XFULL;
1345 new_bmcr |= BMCR_FULLDPLX;
1348 adv |= ADVERTISE_1000XHALF;
1349 new_bmcr &= ~BMCR_FULLDPLX;
1351 if ((new_bmcr != bmcr) || (force_link_down)) {
1352 /* Force a link down visible on the other side */
1354 bnx2_write_phy(bp, bp->mii_adv, adv &
1355 ~(ADVERTISE_1000XFULL |
1356 ADVERTISE_1000XHALF));
1357 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358 BMCR_ANRESTART | BMCR_ANENABLE);
1361 netif_carrier_off(bp->dev);
1362 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363 bnx2_report_link(bp);
1365 bnx2_write_phy(bp, bp->mii_adv, adv);
1366 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1368 bnx2_resolve_flow_ctrl(bp);
1369 bnx2_set_mac_link(bp);
1374 bnx2_test_and_enable_2g5(bp);
1376 if (bp->advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= ADVERTISE_1000XFULL;
1379 new_adv |= bnx2_phy_get_pause_adv(bp);
1381 bnx2_read_phy(bp, bp->mii_adv, &adv);
1382 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1384 bp->serdes_an_pending = 0;
1385 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386 /* Force a link down visible on the other side */
1388 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389 spin_unlock_bh(&bp->phy_lock);
1391 spin_lock_bh(&bp->phy_lock);
1394 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1397 /* Speed up link-up time when the link partner
1398 * does not autonegotiate which is very common
1399 * in blade servers. Some blade servers use
1400 * IPMI for kerboard input and it's important
1401 * to minimize link disruptions. Autoneg. involves
1402 * exchanging base pages plus 3 next pages and
1403 * normally completes in about 120 msec.
1405 bp->current_interval = SERDES_AN_TIMEOUT;
1406 bp->serdes_an_pending = 1;
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1416 #define ETHTOOL_ALL_FIBRE_SPEED \
1417 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1418 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419 (ADVERTISED_1000baseT_Full)
1421 #define ETHTOOL_ALL_COPPER_SPEED \
1422 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1423 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1424 ADVERTISED_1000baseT_Full)
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1436 if (bp->phy_port == PORT_TP)
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1439 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1441 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442 bp->req_line_speed = 0;
1443 bp->autoneg |= AUTONEG_SPEED;
1444 bp->advertising = ADVERTISED_Autoneg;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446 bp->advertising |= ADVERTISED_10baseT_Half;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448 bp->advertising |= ADVERTISED_10baseT_Full;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450 bp->advertising |= ADVERTISED_100baseT_Half;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452 bp->advertising |= ADVERTISED_100baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454 bp->advertising |= ADVERTISED_1000baseT_Full;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456 bp->advertising |= ADVERTISED_2500baseX_Full;
1459 bp->advertising = 0;
1460 bp->req_duplex = DUPLEX_FULL;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462 bp->req_line_speed = SPEED_10;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464 bp->req_duplex = DUPLEX_HALF;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467 bp->req_line_speed = SPEED_100;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469 bp->req_duplex = DUPLEX_HALF;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472 bp->req_line_speed = SPEED_1000;
1473 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474 bp->req_line_speed = SPEED_2500;
1479 bnx2_set_default_link(struct bnx2 *bp)
1481 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482 return bnx2_set_default_remote_link(bp);
1484 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485 bp->req_line_speed = 0;
1486 if (bp->phy_flags & PHY_SERDES_FLAG) {
1489 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1491 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1495 bp->req_line_speed = bp->line_speed = SPEED_1000;
1496 bp->req_duplex = DUPLEX_FULL;
1499 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1508 spin_lock(&bp->indirect_lock);
1509 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513 spin_unlock(&bp->indirect_lock);
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1520 u8 link_up = bp->link_up;
1523 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1525 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526 bnx2_send_heart_beat(bp);
1528 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1530 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1536 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537 bp->duplex = DUPLEX_FULL;
1539 case BNX2_LINK_STATUS_10HALF:
1540 bp->duplex = DUPLEX_HALF;
1541 case BNX2_LINK_STATUS_10FULL:
1542 bp->line_speed = SPEED_10;
1544 case BNX2_LINK_STATUS_100HALF:
1545 bp->duplex = DUPLEX_HALF;
1546 case BNX2_LINK_STATUS_100BASE_T4:
1547 case BNX2_LINK_STATUS_100FULL:
1548 bp->line_speed = SPEED_100;
1550 case BNX2_LINK_STATUS_1000HALF:
1551 bp->duplex = DUPLEX_HALF;
1552 case BNX2_LINK_STATUS_1000FULL:
1553 bp->line_speed = SPEED_1000;
1555 case BNX2_LINK_STATUS_2500HALF:
1556 bp->duplex = DUPLEX_HALF;
1557 case BNX2_LINK_STATUS_2500FULL:
1558 bp->line_speed = SPEED_2500;
1565 spin_lock(&bp->phy_lock);
1567 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569 if (bp->duplex == DUPLEX_FULL)
1570 bp->flow_ctrl = bp->req_flow_ctrl;
1572 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_TX;
1574 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575 bp->flow_ctrl |= FLOW_CTRL_RX;
1578 old_port = bp->phy_port;
1579 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580 bp->phy_port = PORT_FIBRE;
1582 bp->phy_port = PORT_TP;
1584 if (old_port != bp->phy_port)
1585 bnx2_set_default_link(bp);
1587 spin_unlock(&bp->phy_lock);
1589 if (bp->link_up != link_up)
1590 bnx2_report_link(bp);
1592 bnx2_set_mac_link(bp);
1596 bnx2_set_remote_link(struct bnx2 *bp)
1600 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1602 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603 bnx2_remote_phy_event(bp);
1605 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1607 bnx2_send_heart_beat(bp);
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1621 if (bp->autoneg & AUTONEG_SPEED) {
1622 u32 adv_reg, adv1000_reg;
1623 u32 new_adv_reg = 0;
1624 u32 new_adv1000_reg = 0;
1626 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1630 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631 adv1000_reg &= PHY_ALL_1000_SPEED;
1633 if (bp->advertising & ADVERTISED_10baseT_Half)
1634 new_adv_reg |= ADVERTISE_10HALF;
1635 if (bp->advertising & ADVERTISED_10baseT_Full)
1636 new_adv_reg |= ADVERTISE_10FULL;
1637 if (bp->advertising & ADVERTISED_100baseT_Half)
1638 new_adv_reg |= ADVERTISE_100HALF;
1639 if (bp->advertising & ADVERTISED_100baseT_Full)
1640 new_adv_reg |= ADVERTISE_100FULL;
1641 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642 new_adv1000_reg |= ADVERTISE_1000FULL;
1644 new_adv_reg |= ADVERTISE_CSMA;
1646 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1648 if ((adv1000_reg != new_adv1000_reg) ||
1649 (adv_reg != new_adv_reg) ||
1650 ((bmcr & BMCR_ANENABLE) == 0)) {
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1657 else if (bp->link_up) {
1658 /* Flow ctrl may have changed from auto to forced */
1659 /* or vice-versa. */
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 if (bp->req_line_speed == SPEED_100) {
1669 new_bmcr |= BMCR_SPEED100;
1671 if (bp->req_duplex == DUPLEX_FULL) {
1672 new_bmcr |= BMCR_FULLDPLX;
1674 if (new_bmcr != bmcr) {
1677 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 if (bmsr & BMSR_LSTATUS) {
1681 /* Force link down */
1682 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683 spin_unlock_bh(&bp->phy_lock);
1685 spin_lock_bh(&bp->phy_lock);
1687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1691 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1693 /* Normally, the new speed is setup after the link has
1694 * gone down and up again. In some cases, link will not go
1695 * down so we need to set up the new speed here.
1697 if (bmsr & BMSR_LSTATUS) {
1698 bp->line_speed = bp->req_line_speed;
1699 bp->duplex = bp->req_duplex;
1700 bnx2_resolve_flow_ctrl(bp);
1701 bnx2_set_mac_link(bp);
1704 bnx2_resolve_flow_ctrl(bp);
1705 bnx2_set_mac_link(bp);
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1713 if (bp->loopback == MAC_LOOPBACK)
1716 if (bp->phy_flags & PHY_SERDES_FLAG) {
1717 return (bnx2_setup_serdes_phy(bp, port));
1720 return (bnx2_setup_copper_phy(bp));
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1729 bp->mii_bmcr = MII_BMCR + 0x10;
1730 bp->mii_bmsr = MII_BMSR + 0x10;
1731 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732 bp->mii_adv = MII_ADVERTISE + 0x10;
1733 bp->mii_lpa = MII_LPA + 0x10;
1734 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1739 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1744 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752 val |= BCM5708S_UP1_2G5;
1754 val &= ~BCM5708S_UP1_2G5;
1755 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1762 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1764 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1768 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1780 bp->mii_up1 = BCM5708S_UP1;
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1786 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1790 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1794 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796 val |= BCM5708S_UP1_2G5;
1797 bnx2_write_phy(bp, BCM5708S_UP1, val);
1800 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803 /* increase tx signal amplitude */
1804 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805 BCM5708S_BLK_ADDR_TX_MISC);
1806 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1812 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1818 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819 BNX2_SHARED_HW_CFG_CONFIG);
1820 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822 BCM5708S_BLK_ADDR_TX_MISC);
1823 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825 BCM5708S_BLK_ADDR_DIG);
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1836 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1838 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1841 if (bp->dev->mtu > 1500) {
1844 /* Set extended packet length bit */
1845 bnx2_write_phy(bp, 0x18, 0x7);
1846 bnx2_read_phy(bp, 0x18, &val);
1847 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1849 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850 bnx2_read_phy(bp, 0x1c, &val);
1851 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1856 bnx2_write_phy(bp, 0x18, 0x7);
1857 bnx2_read_phy(bp, 0x18, &val);
1858 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1860 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861 bnx2_read_phy(bp, 0x1c, &val);
1862 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1875 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876 bnx2_write_phy(bp, 0x18, 0x0c00);
1877 bnx2_write_phy(bp, 0x17, 0x000a);
1878 bnx2_write_phy(bp, 0x15, 0x310b);
1879 bnx2_write_phy(bp, 0x17, 0x201f);
1880 bnx2_write_phy(bp, 0x15, 0x9506);
1881 bnx2_write_phy(bp, 0x17, 0x401f);
1882 bnx2_write_phy(bp, 0x15, 0x14e2);
1883 bnx2_write_phy(bp, 0x18, 0x0400);
1886 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888 MII_BNX2_DSP_EXPAND_REG | 0x8);
1889 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1891 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1894 if (bp->dev->mtu > 1500) {
1895 /* Set extended packet length bit */
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val | 0x4000);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val | 0x1);
1904 bnx2_write_phy(bp, 0x18, 0x7);
1905 bnx2_read_phy(bp, 0x18, &val);
1906 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1908 bnx2_read_phy(bp, 0x10, &val);
1909 bnx2_write_phy(bp, 0x10, val & ~0x1);
1912 /* ethernet@wirespeed */
1913 bnx2_write_phy(bp, 0x18, 0x7007);
1914 bnx2_read_phy(bp, 0x18, &val);
1915 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1921 bnx2_init_phy(struct bnx2 *bp)
1926 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1929 bp->mii_bmcr = MII_BMCR;
1930 bp->mii_bmsr = MII_BMSR;
1931 bp->mii_bmsr1 = MII_BMSR;
1932 bp->mii_adv = MII_ADVERTISE;
1933 bp->mii_lpa = MII_LPA;
1935 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1940 bnx2_read_phy(bp, MII_PHYSID1, &val);
1941 bp->phy_id = val << 16;
1942 bnx2_read_phy(bp, MII_PHYSID2, &val);
1943 bp->phy_id |= val & 0xffff;
1945 if (bp->phy_flags & PHY_SERDES_FLAG) {
1946 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947 rc = bnx2_init_5706s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949 rc = bnx2_init_5708s_phy(bp);
1950 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951 rc = bnx2_init_5709s_phy(bp);
1954 rc = bnx2_init_copper_phy(bp);
1959 rc = bnx2_setup_phy(bp, bp->phy_port);
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1969 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 static int bnx2_test_link(struct bnx2 *);
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1985 spin_lock_bh(&bp->phy_lock);
1986 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1988 spin_unlock_bh(&bp->phy_lock);
1992 for (i = 0; i < 10; i++) {
1993 if (bnx2_test_link(bp) == 0)
1998 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001 BNX2_EMAC_MODE_25G_MODE);
2003 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2016 msg_data |= bp->fw_wr_seq;
2018 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2020 /* wait for an acknowledgement. */
2021 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2026 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2029 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2032 /* If we timed out, inform the firmware that this is the case. */
2033 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2035 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2038 msg_data &= ~BNX2_DRV_MSG_CODE;
2039 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2041 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2046 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2053 bnx2_init_5709_context(struct bnx2 *bp)
2058 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059 val |= (BCM_PAGE_BITS - 8) << 16;
2060 REG_WR(bp, BNX2_CTX_COMMAND, val);
2061 for (i = 0; i < 10; i++) {
2062 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2067 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2070 for (i = 0; i < bp->ctx_pages; i++) {
2073 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077 (u64) bp->ctx_blk_mapping[i] >> 32);
2078 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080 for (j = 0; j < 10; j++) {
2082 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2087 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2096 bnx2_init_context(struct bnx2 *bp)
2102 u32 vcid_addr, pcid_addr, offset;
2107 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2110 vcid_addr = GET_PCID_ADDR(vcid);
2112 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2117 pcid_addr = GET_PCID_ADDR(new_vcid);
2120 vcid_addr = GET_CID_ADDR(vcid);
2121 pcid_addr = vcid_addr;
2124 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125 vcid_addr += (i << PHY_CTX_SHIFT);
2126 pcid_addr += (i << PHY_CTX_SHIFT);
2128 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2131 /* Zero out the context. */
2132 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133 CTX_WR(bp, 0x00, offset, 0);
2135 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2148 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149 if (good_mbuf == NULL) {
2150 printk(KERN_ERR PFX "Failed to allocate memory in "
2151 "bnx2_alloc_bad_rbuf\n");
2155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2160 /* Allocate a bunch of mbufs and save the good ones in an array. */
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2165 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2167 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2169 /* The addresses with Bit 9 set are bad memory blocks. */
2170 if (!(val & (1 << 9))) {
2171 good_mbuf[good_mbuf_cnt] = (u16) val;
2175 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2178 /* Free the good ones back to the mbuf pool thus discarding
2179 * all the bad ones. */
2180 while (good_mbuf_cnt) {
2183 val = good_mbuf[good_mbuf_cnt];
2184 val = (val << 9) | val | 1;
2186 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2196 u8 *mac_addr = bp->dev->dev_addr;
2198 val = (mac_addr[0] << 8) | mac_addr[1];
2200 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203 (mac_addr[4] << 8) | mac_addr[5];
2205 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2211 struct sk_buff *skb;
2212 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2214 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215 unsigned long align;
2217 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2222 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223 skb_reserve(skb, BNX2_RX_ALIGN - align);
2225 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226 PCI_DMA_FROMDEVICE);
2229 pci_unmap_addr_set(rx_buf, mapping, mapping);
2231 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2234 bp->rx_prod_bseq += bp->rx_buf_use_size;
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2242 struct status_block *sblk = bp->status_blk;
2243 u32 new_link_state, old_link_state;
2246 new_link_state = sblk->status_attn_bits & event;
2247 old_link_state = sblk->status_attn_bits_ack & event;
2248 if (new_link_state != old_link_state) {
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2252 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2260 bnx2_phy_int(struct bnx2 *bp)
2262 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263 spin_lock(&bp->phy_lock);
2265 spin_unlock(&bp->phy_lock);
2267 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268 bnx2_set_remote_link(bp);
2273 bnx2_tx_int(struct bnx2 *bp)
2275 struct status_block *sblk = bp->status_blk;
2276 u16 hw_cons, sw_cons, sw_ring_cons;
2279 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2283 sw_cons = bp->tx_cons;
2285 while (sw_cons != hw_cons) {
2286 struct sw_bd *tx_buf;
2287 struct sk_buff *skb;
2290 sw_ring_cons = TX_RING_IDX(sw_cons);
2292 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2295 /* partial BD completions possible with TSO packets */
2296 if (skb_is_gso(skb)) {
2297 u16 last_idx, last_ring_idx;
2299 last_idx = sw_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 last_ring_idx = sw_ring_cons +
2302 skb_shinfo(skb)->nr_frags + 1;
2303 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2306 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2311 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312 skb_headlen(skb), PCI_DMA_TODEVICE);
2315 last = skb_shinfo(skb)->nr_frags;
2317 for (i = 0; i < last; i++) {
2318 sw_cons = NEXT_TX_BD(sw_cons);
2320 pci_unmap_page(bp->pdev,
2322 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2324 skb_shinfo(skb)->frags[i].size,
2328 sw_cons = NEXT_TX_BD(sw_cons);
2330 tx_free_bd += last + 1;
2334 hw_cons = bp->hw_tx_cons =
2335 sblk->status_tx_quick_consumer_index0;
2337 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2342 bp->tx_cons = sw_cons;
2343 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344 * before checking for netif_queue_stopped(). Without the
2345 * memory barrier, there is a small possibility that bnx2_start_xmit()
2346 * will miss it and cause the queue to be stopped forever.
2350 if (unlikely(netif_queue_stopped(bp->dev)) &&
2351 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352 netif_tx_lock(bp->dev);
2353 if ((netif_queue_stopped(bp->dev)) &&
2354 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355 netif_wake_queue(bp->dev);
2356 netif_tx_unlock(bp->dev);
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2364 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365 struct rx_bd *cons_bd, *prod_bd;
2367 cons_rx_buf = &bp->rx_buf_ring[cons];
2368 prod_rx_buf = &bp->rx_buf_ring[prod];
2370 pci_dma_sync_single_for_device(bp->pdev,
2371 pci_unmap_addr(cons_rx_buf, mapping),
2372 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2374 bp->rx_prod_bseq += bp->rx_buf_use_size;
2376 prod_rx_buf->skb = skb;
2381 pci_unmap_addr_set(prod_rx_buf, mapping,
2382 pci_unmap_addr(cons_rx_buf, mapping));
2384 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2391 bnx2_rx_int(struct bnx2 *bp, int budget)
2393 struct status_block *sblk = bp->status_blk;
2394 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2395 struct l2_fhdr *rx_hdr;
2398 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2399 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2402 sw_cons = bp->rx_cons;
2403 sw_prod = bp->rx_prod;
2405 /* Memory barrier necessary as speculative reads of the rx
2406 * buffer can be ahead of the index in the status block
2409 while (sw_cons != hw_cons) {
2412 struct sw_bd *rx_buf;
2413 struct sk_buff *skb;
2414 dma_addr_t dma_addr;
2416 sw_ring_cons = RX_RING_IDX(sw_cons);
2417 sw_ring_prod = RX_RING_IDX(sw_prod);
2419 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2424 dma_addr = pci_unmap_addr(rx_buf, mapping);
2426 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2427 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2429 rx_hdr = (struct l2_fhdr *) skb->data;
2430 len = rx_hdr->l2_fhdr_pkt_len - 4;
2432 if ((status = rx_hdr->l2_fhdr_status) &
2433 (L2_FHDR_ERRORS_BAD_CRC |
2434 L2_FHDR_ERRORS_PHY_DECODE |
2435 L2_FHDR_ERRORS_ALIGNMENT |
2436 L2_FHDR_ERRORS_TOO_SHORT |
2437 L2_FHDR_ERRORS_GIANT_FRAME)) {
2442 /* Since we don't have a jumbo ring, copy small packets
2445 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2446 struct sk_buff *new_skb;
2448 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2449 if (new_skb == NULL)
2453 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2454 new_skb->data, len + 2);
2455 skb_reserve(new_skb, 2);
2456 skb_put(new_skb, len);
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2463 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2464 pci_unmap_single(bp->pdev, dma_addr,
2465 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2467 skb_reserve(skb, bp->rx_offset);
2472 bnx2_reuse_rx_skb(bp, skb,
2473 sw_ring_cons, sw_ring_prod);
2477 skb->protocol = eth_type_trans(skb, bp->dev);
2479 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2480 (ntohs(skb->protocol) != 0x8100)) {
2487 skb->ip_summed = CHECKSUM_NONE;
2489 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2490 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2492 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2493 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2494 skb->ip_summed = CHECKSUM_UNNECESSARY;
2498 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2499 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2500 rx_hdr->l2_fhdr_vlan_tag);
2504 netif_receive_skb(skb);
2506 bp->dev->last_rx = jiffies;
2510 sw_cons = NEXT_RX_BD(sw_cons);
2511 sw_prod = NEXT_RX_BD(sw_prod);
2513 if ((rx_pkt == budget))
2516 /* Refresh hw_cons to see if there is new work */
2517 if (sw_cons == hw_cons) {
2518 hw_cons = bp->hw_rx_cons =
2519 sblk->status_rx_quick_consumer_index0;
2520 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2525 bp->rx_cons = sw_cons;
2526 bp->rx_prod = sw_prod;
2528 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2530 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2538 /* MSI ISR - The only difference between this and the INTx ISR
2539 * is that the MSI interrupt is always serviced.
2542 bnx2_msi(int irq, void *dev_instance)
2544 struct net_device *dev = dev_instance;
2545 struct bnx2 *bp = netdev_priv(dev);
2547 prefetch(bp->status_blk);
2548 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2549 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2550 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2552 /* Return here if interrupt is disabled. */
2553 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2556 netif_rx_schedule(dev, &bp->napi);
2562 bnx2_msi_1shot(int irq, void *dev_instance)
2564 struct net_device *dev = dev_instance;
2565 struct bnx2 *bp = netdev_priv(dev);
2567 prefetch(bp->status_blk);
2569 /* Return here if interrupt is disabled. */
2570 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2573 netif_rx_schedule(dev, &bp->napi);
2579 bnx2_interrupt(int irq, void *dev_instance)
2581 struct net_device *dev = dev_instance;
2582 struct bnx2 *bp = netdev_priv(dev);
2583 struct status_block *sblk = bp->status_blk;
2585 /* When using INTx, it is possible for the interrupt to arrive
2586 * at the CPU before the status block posted prior to the
2587 * interrupt. Reading a register will flush the status block.
2588 * When using MSI, the MSI message will always complete after
2589 * the status block write.
2591 if ((sblk->status_idx == bp->last_status_idx) &&
2592 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2593 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2597 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2600 /* Read back to deassert IRQ immediately to avoid too many
2601 * spurious interrupts.
2603 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2605 /* Return here if interrupt is shared and is disabled. */
2606 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2609 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2610 bp->last_status_idx = sblk->status_idx;
2611 __netif_rx_schedule(dev, &bp->napi);
2617 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2618 STATUS_ATTN_BITS_TIMER_ABORT)
2621 bnx2_has_work(struct bnx2 *bp)
2623 struct status_block *sblk = bp->status_blk;
2625 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2626 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2629 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2630 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2637 bnx2_poll(struct napi_struct *napi, int budget)
2639 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2640 struct net_device *dev = bp->dev;
2641 struct status_block *sblk = bp->status_blk;
2642 u32 status_attn_bits = sblk->status_attn_bits;
2643 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2646 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2647 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2651 /* This is needed to take care of transient status
2652 * during link changes.
2654 REG_WR(bp, BNX2_HC_COMMAND,
2655 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2656 REG_RD(bp, BNX2_HC_COMMAND);
2659 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2662 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2663 work_done = bnx2_rx_int(bp, budget);
2665 bp->last_status_idx = bp->status_blk->status_idx;
2668 if (!bnx2_has_work(bp)) {
2669 netif_rx_complete(dev, napi);
2670 if (likely(bp->flags & USING_MSI_FLAG)) {
2671 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2672 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2673 bp->last_status_idx);
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2679 bp->last_status_idx);
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 bp->last_status_idx);
2689 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2690 * from set_multicast.
2693 bnx2_set_rx_mode(struct net_device *dev)
2695 struct bnx2 *bp = netdev_priv(dev);
2696 u32 rx_mode, sort_mode;
2699 spin_lock_bh(&bp->phy_lock);
2701 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2702 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2703 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2705 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2706 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2708 if (!(bp->flags & ASF_ENABLE_FLAG))
2709 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2711 if (dev->flags & IFF_PROMISC) {
2712 /* Promiscuous mode. */
2713 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2714 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2715 BNX2_RPM_SORT_USER0_PROM_VLAN;
2717 else if (dev->flags & IFF_ALLMULTI) {
2718 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2719 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2722 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2725 /* Accept one or more multicast(s). */
2726 struct dev_mc_list *mclist;
2727 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2732 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2734 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2735 i++, mclist = mclist->next) {
2737 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2739 regidx = (bit & 0xe0) >> 5;
2741 mc_filter[regidx] |= (1 << bit);
2744 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2745 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2749 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2752 if (rx_mode != bp->rx_mode) {
2753 bp->rx_mode = rx_mode;
2754 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2758 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2759 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2761 spin_unlock_bh(&bp->phy_lock);
2765 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2772 for (i = 0; i < rv2p_code_len; i += 8) {
2773 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2775 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2778 if (rv2p_proc == RV2P_PROC1) {
2779 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2780 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2783 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2784 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2788 /* Reset the processor, un-stall is done later. */
2789 if (rv2p_proc == RV2P_PROC1) {
2790 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2793 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2798 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2805 val = REG_RD_IND(bp, cpu_reg->mode);
2806 val |= cpu_reg->mode_value_halt;
2807 REG_WR_IND(bp, cpu_reg->mode, val);
2808 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2810 /* Load the Text area. */
2811 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2816 text = vmalloc(FW_BUF_SIZE);
2819 rc = zlib_inflate_blob(text, FW_BUF_SIZE, fw->gz_text, fw->gz_text_len);
2824 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2825 REG_WR_IND(bp, offset, cpu_to_le32(text[j]));
2830 /* Load the Data area. */
2831 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2835 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2836 REG_WR_IND(bp, offset, fw->data[j]);
2840 /* Load the SBSS area. */
2841 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2845 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2846 REG_WR_IND(bp, offset, fw->sbss[j]);
2850 /* Load the BSS area. */
2851 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2855 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2856 REG_WR_IND(bp, offset, fw->bss[j]);
2860 /* Load the Read-Only area. */
2861 offset = cpu_reg->spad_base +
2862 (fw->rodata_addr - cpu_reg->mips_view_base);
2866 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2867 REG_WR_IND(bp, offset, fw->rodata[j]);
2871 /* Clear the pre-fetch instruction. */
2872 REG_WR_IND(bp, cpu_reg->inst, 0);
2873 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2875 /* Start the CPU. */
2876 val = REG_RD_IND(bp, cpu_reg->mode);
2877 val &= ~cpu_reg->mode_value_halt;
2878 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2879 REG_WR_IND(bp, cpu_reg->mode, val);
2885 bnx2_init_cpus(struct bnx2 *bp)
2887 struct cpu_reg cpu_reg;
2892 /* Initialize the RV2P processor. */
2893 text = vmalloc(FW_BUF_SIZE);
2896 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2901 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2903 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2908 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2911 /* Initialize the RX Processor. */
2912 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2913 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2914 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2915 cpu_reg.state = BNX2_RXP_CPU_STATE;
2916 cpu_reg.state_value_clear = 0xffffff;
2917 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2918 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2919 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2920 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2921 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2922 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2923 cpu_reg.mips_view_base = 0x8000000;
2925 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2926 fw = &bnx2_rxp_fw_09;
2928 fw = &bnx2_rxp_fw_06;
2930 rc = load_cpu_fw(bp, &cpu_reg, fw);
2934 /* Initialize the TX Processor. */
2935 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2936 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2937 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2938 cpu_reg.state = BNX2_TXP_CPU_STATE;
2939 cpu_reg.state_value_clear = 0xffffff;
2940 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2941 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2942 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2943 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2944 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2945 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2946 cpu_reg.mips_view_base = 0x8000000;
2948 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2949 fw = &bnx2_txp_fw_09;
2951 fw = &bnx2_txp_fw_06;
2953 rc = load_cpu_fw(bp, &cpu_reg, fw);
2957 /* Initialize the TX Patch-up Processor. */
2958 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2959 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2960 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2961 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2962 cpu_reg.state_value_clear = 0xffffff;
2963 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2964 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2965 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2966 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2967 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2968 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2969 cpu_reg.mips_view_base = 0x8000000;
2971 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2972 fw = &bnx2_tpat_fw_09;
2974 fw = &bnx2_tpat_fw_06;
2976 rc = load_cpu_fw(bp, &cpu_reg, fw);
2980 /* Initialize the Completion Processor. */
2981 cpu_reg.mode = BNX2_COM_CPU_MODE;
2982 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2983 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2984 cpu_reg.state = BNX2_COM_CPU_STATE;
2985 cpu_reg.state_value_clear = 0xffffff;
2986 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2987 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2988 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2989 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2990 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2991 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2992 cpu_reg.mips_view_base = 0x8000000;
2994 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2995 fw = &bnx2_com_fw_09;
2997 fw = &bnx2_com_fw_06;
2999 rc = load_cpu_fw(bp, &cpu_reg, fw);
3003 /* Initialize the Command Processor. */
3004 cpu_reg.mode = BNX2_CP_CPU_MODE;
3005 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3006 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3007 cpu_reg.state = BNX2_CP_CPU_STATE;
3008 cpu_reg.state_value_clear = 0xffffff;
3009 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3010 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3011 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3012 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3013 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3014 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3015 cpu_reg.mips_view_base = 0x8000000;
3017 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3018 fw = &bnx2_cp_fw_09;
3020 rc = load_cpu_fw(bp, &cpu_reg, fw);
3029 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3033 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3039 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3040 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3041 PCI_PM_CTRL_PME_STATUS);
3043 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3044 /* delay required during transition out of D3hot */
3047 val = REG_RD(bp, BNX2_EMAC_MODE);
3048 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3049 val &= ~BNX2_EMAC_MODE_MPKT;
3050 REG_WR(bp, BNX2_EMAC_MODE, val);
3052 val = REG_RD(bp, BNX2_RPM_CONFIG);
3053 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3054 REG_WR(bp, BNX2_RPM_CONFIG, val);
3065 autoneg = bp->autoneg;
3066 advertising = bp->advertising;
3068 bp->autoneg = AUTONEG_SPEED;
3069 bp->advertising = ADVERTISED_10baseT_Half |
3070 ADVERTISED_10baseT_Full |
3071 ADVERTISED_100baseT_Half |
3072 ADVERTISED_100baseT_Full |
3075 bnx2_setup_copper_phy(bp);
3077 bp->autoneg = autoneg;
3078 bp->advertising = advertising;
3080 bnx2_set_mac_addr(bp);
3082 val = REG_RD(bp, BNX2_EMAC_MODE);
3084 /* Enable port mode. */
3085 val &= ~BNX2_EMAC_MODE_PORT;
3086 val |= BNX2_EMAC_MODE_PORT_MII |
3087 BNX2_EMAC_MODE_MPKT_RCVD |
3088 BNX2_EMAC_MODE_ACPI_RCVD |
3089 BNX2_EMAC_MODE_MPKT;
3091 REG_WR(bp, BNX2_EMAC_MODE, val);
3093 /* receive all multicast */
3094 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3095 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3098 REG_WR(bp, BNX2_EMAC_RX_MODE,
3099 BNX2_EMAC_RX_MODE_SORT_MODE);
3101 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3102 BNX2_RPM_SORT_USER0_MC_EN;
3103 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3104 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3105 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3106 BNX2_RPM_SORT_USER0_ENA);
3108 /* Need to enable EMAC and RPM for WOL. */
3109 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3110 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3111 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3112 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3114 val = REG_RD(bp, BNX2_RPM_CONFIG);
3115 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3116 REG_WR(bp, BNX2_RPM_CONFIG, val);
3118 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3121 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3124 if (!(bp->flags & NO_WOL_FLAG))
3125 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3127 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3128 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3129 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3138 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3140 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3143 /* No more memory access after this point until
3144 * device is brought back to D0.
3156 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3161 /* Request access to the flash interface. */
3162 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3163 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3164 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3165 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3171 if (j >= NVRAM_TIMEOUT_COUNT)
3178 bnx2_release_nvram_lock(struct bnx2 *bp)
3183 /* Relinquish nvram interface. */
3184 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3186 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3187 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3188 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3194 if (j >= NVRAM_TIMEOUT_COUNT)
3202 bnx2_enable_nvram_write(struct bnx2 *bp)
3206 val = REG_RD(bp, BNX2_MISC_CFG);
3207 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3209 if (bp->flash_info->flags & BNX2_NV_WREN) {
3212 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3213 REG_WR(bp, BNX2_NVM_COMMAND,
3214 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3216 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3219 val = REG_RD(bp, BNX2_NVM_COMMAND);
3220 if (val & BNX2_NVM_COMMAND_DONE)
3224 if (j >= NVRAM_TIMEOUT_COUNT)
3231 bnx2_disable_nvram_write(struct bnx2 *bp)
3235 val = REG_RD(bp, BNX2_MISC_CFG);
3236 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3241 bnx2_enable_nvram_access(struct bnx2 *bp)
3245 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3246 /* Enable both bits, even on read. */
3247 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3248 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3252 bnx2_disable_nvram_access(struct bnx2 *bp)
3256 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3257 /* Disable both bits, even after read. */
3258 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3259 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3260 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3264 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3269 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3270 /* Buffered flash, no erase needed */
3273 /* Build an erase command */
3274 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3275 BNX2_NVM_COMMAND_DOIT;
3277 /* Need to clear DONE bit separately. */
3278 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3280 /* Address of the NVRAM to read from. */
3281 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3283 /* Issue an erase command. */
3284 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3286 /* Wait for completion. */
3287 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3292 val = REG_RD(bp, BNX2_NVM_COMMAND);
3293 if (val & BNX2_NVM_COMMAND_DONE)
3297 if (j >= NVRAM_TIMEOUT_COUNT)
3304 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3309 /* Build the command word. */
3310 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3312 /* Calculate an offset of a buffered flash, not needed for 5709. */
3313 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3314 offset = ((offset / bp->flash_info->page_size) <<
3315 bp->flash_info->page_bits) +
3316 (offset % bp->flash_info->page_size);
3319 /* Need to clear DONE bit separately. */
3320 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3322 /* Address of the NVRAM to read from. */
3323 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3325 /* Issue a read command. */
3326 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3328 /* Wait for completion. */
3329 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3334 val = REG_RD(bp, BNX2_NVM_COMMAND);
3335 if (val & BNX2_NVM_COMMAND_DONE) {
3336 val = REG_RD(bp, BNX2_NVM_READ);
3338 val = be32_to_cpu(val);
3339 memcpy(ret_val, &val, 4);
3343 if (j >= NVRAM_TIMEOUT_COUNT)
3351 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3356 /* Build the command word. */
3357 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3359 /* Calculate an offset of a buffered flash, not needed for 5709. */
3360 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3361 offset = ((offset / bp->flash_info->page_size) <<
3362 bp->flash_info->page_bits) +
3363 (offset % bp->flash_info->page_size);
3366 /* Need to clear DONE bit separately. */
3367 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3369 memcpy(&val32, val, 4);
3370 val32 = cpu_to_be32(val32);
3372 /* Write the data. */
3373 REG_WR(bp, BNX2_NVM_WRITE, val32);
3375 /* Address of the NVRAM to write to. */
3376 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3378 /* Issue the write command. */
3379 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3381 /* Wait for completion. */
3382 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3385 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3388 if (j >= NVRAM_TIMEOUT_COUNT)
3395 bnx2_init_nvram(struct bnx2 *bp)
3398 int j, entry_count, rc = 0;
3399 struct flash_spec *flash;
3401 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3402 bp->flash_info = &flash_5709;
3403 goto get_flash_size;
3406 /* Determine the selected interface. */
3407 val = REG_RD(bp, BNX2_NVM_CFG1);
3409 entry_count = ARRAY_SIZE(flash_table);
3411 if (val & 0x40000000) {
3413 /* Flash interface has been reconfigured */
3414 for (j = 0, flash = &flash_table[0]; j < entry_count;
3416 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3417 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3418 bp->flash_info = flash;
3425 /* Not yet been reconfigured */
3427 if (val & (1 << 23))
3428 mask = FLASH_BACKUP_STRAP_MASK;
3430 mask = FLASH_STRAP_MASK;
3432 for (j = 0, flash = &flash_table[0]; j < entry_count;
3435 if ((val & mask) == (flash->strapping & mask)) {
3436 bp->flash_info = flash;
3438 /* Request access to the flash interface. */
3439 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3442 /* Enable access to flash interface */
3443 bnx2_enable_nvram_access(bp);
3445 /* Reconfigure the flash interface */
3446 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3447 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3448 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3449 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3451 /* Disable access to flash interface */
3452 bnx2_disable_nvram_access(bp);
3453 bnx2_release_nvram_lock(bp);
3458 } /* if (val & 0x40000000) */
3460 if (j == entry_count) {
3461 bp->flash_info = NULL;
3462 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3467 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3468 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3470 bp->flash_size = val;
3472 bp->flash_size = bp->flash_info->total_size;
3478 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3482 u32 cmd_flags, offset32, len32, extra;
3487 /* Request access to the flash interface. */
3488 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3491 /* Enable access to flash interface */
3492 bnx2_enable_nvram_access(bp);
3505 pre_len = 4 - (offset & 3);
3507 if (pre_len >= len32) {
3509 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3510 BNX2_NVM_COMMAND_LAST;
3513 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3516 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3521 memcpy(ret_buf, buf + (offset & 3), pre_len);
3528 extra = 4 - (len32 & 3);
3529 len32 = (len32 + 4) & ~3;
3536 cmd_flags = BNX2_NVM_COMMAND_LAST;
3538 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3539 BNX2_NVM_COMMAND_LAST;
3541 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3543 memcpy(ret_buf, buf, 4 - extra);
3545 else if (len32 > 0) {
3548 /* Read the first word. */
3552 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3554 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3556 /* Advance to the next dword. */
3561 while (len32 > 4 && rc == 0) {
3562 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3564 /* Advance to the next dword. */
3573 cmd_flags = BNX2_NVM_COMMAND_LAST;
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3576 memcpy(ret_buf, buf, 4 - extra);
3579 /* Disable access to flash interface */
3580 bnx2_disable_nvram_access(bp);
3582 bnx2_release_nvram_lock(bp);
3588 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3591 u32 written, offset32, len32;
3592 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3594 int align_start, align_end;
3599 align_start = align_end = 0;
3601 if ((align_start = (offset32 & 3))) {
3603 len32 += align_start;
3606 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3611 align_end = 4 - (len32 & 3);
3613 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3617 if (align_start || align_end) {
3618 align_buf = kmalloc(len32, GFP_KERNEL);
3619 if (align_buf == NULL)
3622 memcpy(align_buf, start, 4);
3625 memcpy(align_buf + len32 - 4, end, 4);
3627 memcpy(align_buf + align_start, data_buf, buf_size);
3631 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3632 flash_buffer = kmalloc(264, GFP_KERNEL);
3633 if (flash_buffer == NULL) {
3635 goto nvram_write_end;
3640 while ((written < len32) && (rc == 0)) {
3641 u32 page_start, page_end, data_start, data_end;
3642 u32 addr, cmd_flags;
3645 /* Find the page_start addr */
3646 page_start = offset32 + written;
3647 page_start -= (page_start % bp->flash_info->page_size);
3648 /* Find the page_end addr */
3649 page_end = page_start + bp->flash_info->page_size;
3650 /* Find the data_start addr */
3651 data_start = (written == 0) ? offset32 : page_start;
3652 /* Find the data_end addr */
3653 data_end = (page_end > offset32 + len32) ?
3654 (offset32 + len32) : page_end;
3656 /* Request access to the flash interface. */
3657 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3658 goto nvram_write_end;
3660 /* Enable access to flash interface */
3661 bnx2_enable_nvram_access(bp);
3663 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3664 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3667 /* Read the whole page into the buffer
3668 * (non-buffer flash only) */
3669 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3670 if (j == (bp->flash_info->page_size - 4)) {
3671 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3673 rc = bnx2_nvram_read_dword(bp,
3679 goto nvram_write_end;
3685 /* Enable writes to flash interface (unlock write-protect) */
3686 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3687 goto nvram_write_end;
3689 /* Loop to write back the buffer data from page_start to
3692 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3693 /* Erase the page */
3694 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3695 goto nvram_write_end;
3697 /* Re-enable the write again for the actual write */
3698 bnx2_enable_nvram_write(bp);
3700 for (addr = page_start; addr < data_start;
3701 addr += 4, i += 4) {
3703 rc = bnx2_nvram_write_dword(bp, addr,
3704 &flash_buffer[i], cmd_flags);
3707 goto nvram_write_end;
3713 /* Loop to write the new data from data_start to data_end */
3714 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3715 if ((addr == page_end - 4) ||
3716 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3717 (addr == data_end - 4))) {
3719 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3721 rc = bnx2_nvram_write_dword(bp, addr, buf,
3725 goto nvram_write_end;
3731 /* Loop to write back the buffer data from data_end
3733 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3734 for (addr = data_end; addr < page_end;
3735 addr += 4, i += 4) {
3737 if (addr == page_end-4) {
3738 cmd_flags = BNX2_NVM_COMMAND_LAST;
3740 rc = bnx2_nvram_write_dword(bp, addr,
3741 &flash_buffer[i], cmd_flags);
3744 goto nvram_write_end;
3750 /* Disable writes to flash interface (lock write-protect) */
3751 bnx2_disable_nvram_write(bp);
3753 /* Disable access to flash interface */
3754 bnx2_disable_nvram_access(bp);
3755 bnx2_release_nvram_lock(bp);
3757 /* Increment written */
3758 written += data_end - data_start;
3762 kfree(flash_buffer);
3768 bnx2_init_remote_phy(struct bnx2 *bp)
3772 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3773 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3776 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3777 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3780 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3781 if (netif_running(bp->dev)) {
3782 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3783 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3784 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3787 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3789 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3790 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3791 bp->phy_port = PORT_FIBRE;
3793 bp->phy_port = PORT_TP;
3798 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3803 /* Wait for the current PCI transaction to complete before
3804 * issuing a reset. */
3805 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3806 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3807 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3808 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3809 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3810 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3813 /* Wait for the firmware to tell us it is ok to issue a reset. */
3814 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3816 /* Deposit a driver reset signature so the firmware knows that
3817 * this is a soft reset. */
3818 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3819 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3821 /* Do a dummy read to force the chip to complete all current transaction
3822 * before we issue a reset. */
3823 val = REG_RD(bp, BNX2_MISC_ID);
3825 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3826 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3827 REG_RD(bp, BNX2_MISC_COMMAND);
3830 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3831 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3833 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3836 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3837 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3838 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3841 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3843 /* Reading back any register after chip reset will hang the
3844 * bus on 5706 A0 and A1. The msleep below provides plenty
3845 * of margin for write posting.
3847 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3848 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3851 /* Reset takes approximate 30 usec */
3852 for (i = 0; i < 10; i++) {
3853 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3854 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3855 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3860 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3861 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3862 printk(KERN_ERR PFX "Chip reset did not complete\n");
3867 /* Make sure byte swapping is properly configured. */
3868 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3869 if (val != 0x01020304) {
3870 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3874 /* Wait for the firmware to finish its initialization. */
3875 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3879 spin_lock_bh(&bp->phy_lock);
3880 bnx2_init_remote_phy(bp);
3881 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3882 bnx2_set_default_remote_link(bp);
3883 spin_unlock_bh(&bp->phy_lock);
3885 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3886 /* Adjust the voltage regular to two steps lower. The default
3887 * of this register is 0x0000000e. */
3888 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3890 /* Remove bad rbuf memory from the free pool. */
3891 rc = bnx2_alloc_bad_rbuf(bp);
3898 bnx2_init_chip(struct bnx2 *bp)
3903 /* Make sure the interrupt is not active. */
3904 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3906 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3907 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3909 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3911 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3912 DMA_READ_CHANS << 12 |
3913 DMA_WRITE_CHANS << 16;
3915 val |= (0x2 << 20) | (1 << 11);
3917 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3920 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3921 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3922 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3924 REG_WR(bp, BNX2_DMA_CONFIG, val);
3926 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3927 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3928 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3929 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3932 if (bp->flags & PCIX_FLAG) {
3935 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3937 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3938 val16 & ~PCI_X_CMD_ERO);
3941 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3942 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3943 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3944 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3946 /* Initialize context mapping and zero out the quick contexts. The
3947 * context block must have already been enabled. */
3948 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3949 rc = bnx2_init_5709_context(bp);
3953 bnx2_init_context(bp);
3955 if ((rc = bnx2_init_cpus(bp)) != 0)
3958 bnx2_init_nvram(bp);
3960 bnx2_set_mac_addr(bp);
3962 val = REG_RD(bp, BNX2_MQ_CONFIG);
3963 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3964 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3965 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3966 val |= BNX2_MQ_CONFIG_HALT_DIS;
3968 REG_WR(bp, BNX2_MQ_CONFIG, val);
3970 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3971 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3972 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3974 val = (BCM_PAGE_BITS - 8) << 24;
3975 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3977 /* Configure page size. */
3978 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3979 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3980 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3981 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3983 val = bp->mac_addr[0] +
3984 (bp->mac_addr[1] << 8) +
3985 (bp->mac_addr[2] << 16) +
3987 (bp->mac_addr[4] << 8) +
3988 (bp->mac_addr[5] << 16);
3989 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3991 /* Program the MTU. Also include 4 bytes for CRC32. */
3992 val = bp->dev->mtu + ETH_HLEN + 4;
3993 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3994 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3995 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3997 bp->last_status_idx = 0;
3998 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4000 /* Set up how to generate a link change interrupt. */
4001 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4003 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4004 (u64) bp->status_blk_mapping & 0xffffffff);
4005 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4007 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4008 (u64) bp->stats_blk_mapping & 0xffffffff);
4009 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4010 (u64) bp->stats_blk_mapping >> 32);
4012 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4013 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4015 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4016 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4018 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4019 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4021 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4023 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4025 REG_WR(bp, BNX2_HC_COM_TICKS,
4026 (bp->com_ticks_int << 16) | bp->com_ticks);
4028 REG_WR(bp, BNX2_HC_CMD_TICKS,
4029 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4031 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4032 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4034 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4035 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4037 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4038 val = BNX2_HC_CONFIG_COLLECT_STATS;
4040 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4041 BNX2_HC_CONFIG_COLLECT_STATS;
4044 if (bp->flags & ONE_SHOT_MSI_FLAG)
4045 val |= BNX2_HC_CONFIG_ONE_SHOT;
4047 REG_WR(bp, BNX2_HC_CONFIG, val);
4049 /* Clear internal stats counters. */
4050 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4052 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4054 /* Initialize the receive filter. */
4055 bnx2_set_rx_mode(bp->dev);
4057 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4058 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4059 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4060 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4062 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4065 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4066 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4070 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4076 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4078 u32 val, offset0, offset1, offset2, offset3;
4080 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4081 offset0 = BNX2_L2CTX_TYPE_XI;
4082 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4083 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4084 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4086 offset0 = BNX2_L2CTX_TYPE;
4087 offset1 = BNX2_L2CTX_CMD_TYPE;
4088 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4089 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4091 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4092 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4094 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4095 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4097 val = (u64) bp->tx_desc_mapping >> 32;
4098 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4100 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4101 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4105 bnx2_init_tx_ring(struct bnx2 *bp)
4110 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4112 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4114 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4115 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4120 bp->tx_prod_bseq = 0;
4123 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4124 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4126 bnx2_init_tx_context(bp, cid);
4130 bnx2_init_rx_ring(struct bnx2 *bp)
4134 u16 prod, ring_prod;
4137 /* 8 for CRC and VLAN */
4138 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4140 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4142 ring_prod = prod = bp->rx_prod = 0;
4145 bp->rx_prod_bseq = 0;
4147 for (i = 0; i < bp->rx_max_ring; i++) {
4150 rxbd = &bp->rx_desc_ring[i][0];
4151 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4152 rxbd->rx_bd_len = bp->rx_buf_use_size;
4153 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4155 if (i == (bp->rx_max_ring - 1))
4159 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4160 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4164 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4165 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4167 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4169 val = (u64) bp->rx_desc_mapping[0] >> 32;
4170 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4172 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4173 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4175 for (i = 0; i < bp->rx_ring_size; i++) {
4176 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4179 prod = NEXT_RX_BD(prod);
4180 ring_prod = RX_RING_IDX(prod);
4184 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4186 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4190 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4194 bp->rx_ring_size = size;
4196 while (size > MAX_RX_DESC_CNT) {
4197 size -= MAX_RX_DESC_CNT;
4200 /* round to next power of 2 */
4202 while ((max & num_rings) == 0)
4205 if (num_rings != max)
4208 bp->rx_max_ring = max;
4209 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4213 bnx2_free_tx_skbs(struct bnx2 *bp)
4217 if (bp->tx_buf_ring == NULL)
4220 for (i = 0; i < TX_DESC_CNT; ) {
4221 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4222 struct sk_buff *skb = tx_buf->skb;
4230 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4231 skb_headlen(skb), PCI_DMA_TODEVICE);
4235 last = skb_shinfo(skb)->nr_frags;
4236 for (j = 0; j < last; j++) {
4237 tx_buf = &bp->tx_buf_ring[i + j + 1];
4238 pci_unmap_page(bp->pdev,
4239 pci_unmap_addr(tx_buf, mapping),
4240 skb_shinfo(skb)->frags[j].size,
4250 bnx2_free_rx_skbs(struct bnx2 *bp)
4254 if (bp->rx_buf_ring == NULL)
4257 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4258 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4259 struct sk_buff *skb = rx_buf->skb;
4264 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4265 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4274 bnx2_free_skbs(struct bnx2 *bp)
4276 bnx2_free_tx_skbs(bp);
4277 bnx2_free_rx_skbs(bp);
4281 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4285 rc = bnx2_reset_chip(bp, reset_code);
4290 if ((rc = bnx2_init_chip(bp)) != 0)
4293 bnx2_init_tx_ring(bp);
4294 bnx2_init_rx_ring(bp);
4299 bnx2_init_nic(struct bnx2 *bp)
4303 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4306 spin_lock_bh(&bp->phy_lock);
4309 spin_unlock_bh(&bp->phy_lock);
4314 bnx2_test_registers(struct bnx2 *bp)
4318 static const struct {
4321 #define BNX2_FL_NOT_5709 1
4325 { 0x006c, 0, 0x00000000, 0x0000003f },
4326 { 0x0090, 0, 0xffffffff, 0x00000000 },
4327 { 0x0094, 0, 0x00000000, 0x00000000 },
4329 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4330 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4331 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4332 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4333 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4334 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4335 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4336 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4337 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4339 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4340 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4341 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4342 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4343 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4344 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4346 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4347 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4348 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4350 { 0x1000, 0, 0x00000000, 0x00000001 },
4351 { 0x1004, 0, 0x00000000, 0x000f0001 },
4353 { 0x1408, 0, 0x01c00800, 0x00000000 },
4354 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4355 { 0x14a8, 0, 0x00000000, 0x000001ff },
4356 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4357 { 0x14b0, 0, 0x00000002, 0x00000001 },
4358 { 0x14b8, 0, 0x00000000, 0x00000000 },
4359 { 0x14c0, 0, 0x00000000, 0x00000009 },
4360 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4361 { 0x14cc, 0, 0x00000000, 0x00000001 },
4362 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4364 { 0x1800, 0, 0x00000000, 0x00000001 },
4365 { 0x1804, 0, 0x00000000, 0x00000003 },
4367 { 0x2800, 0, 0x00000000, 0x00000001 },
4368 { 0x2804, 0, 0x00000000, 0x00003f01 },
4369 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4370 { 0x2810, 0, 0xffff0000, 0x00000000 },
4371 { 0x2814, 0, 0xffff0000, 0x00000000 },
4372 { 0x2818, 0, 0xffff0000, 0x00000000 },
4373 { 0x281c, 0, 0xffff0000, 0x00000000 },
4374 { 0x2834, 0, 0xffffffff, 0x00000000 },
4375 { 0x2840, 0, 0x00000000, 0xffffffff },
4376 { 0x2844, 0, 0x00000000, 0xffffffff },
4377 { 0x2848, 0, 0xffffffff, 0x00000000 },
4378 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4380 { 0x2c00, 0, 0x00000000, 0x00000011 },
4381 { 0x2c04, 0, 0x00000000, 0x00030007 },
4383 { 0x3c00, 0, 0x00000000, 0x00000001 },
4384 { 0x3c04, 0, 0x00000000, 0x00070000 },
4385 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4386 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4387 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4388 { 0x3c14, 0, 0x00000000, 0xffffffff },
4389 { 0x3c18, 0, 0x00000000, 0xffffffff },
4390 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4391 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4393 { 0x5004, 0, 0x00000000, 0x0000007f },
4394 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4396 { 0x5c00, 0, 0x00000000, 0x00000001 },
4397 { 0x5c04, 0, 0x00000000, 0x0003000f },
4398 { 0x5c08, 0, 0x00000003, 0x00000000 },
4399 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4400 { 0x5c10, 0, 0x00000000, 0xffffffff },
4401 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4402 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4403 { 0x5c88, 0, 0x00000000, 0x00077373 },
4404 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4406 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4407 { 0x680c, 0, 0xffffffff, 0x00000000 },
4408 { 0x6810, 0, 0xffffffff, 0x00000000 },
4409 { 0x6814, 0, 0xffffffff, 0x00000000 },
4410 { 0x6818, 0, 0xffffffff, 0x00000000 },
4411 { 0x681c, 0, 0xffffffff, 0x00000000 },
4412 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4413 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4414 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4415 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4416 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4417 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4418 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4419 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4420 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4421 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4422 { 0x684c, 0, 0xffffffff, 0x00000000 },
4423 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4424 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4425 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4426 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4427 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4428 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4430 { 0xffff, 0, 0x00000000, 0x00000000 },
4435 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4438 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4439 u32 offset, rw_mask, ro_mask, save_val, val;
4440 u16 flags = reg_tbl[i].flags;
4442 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4445 offset = (u32) reg_tbl[i].offset;
4446 rw_mask = reg_tbl[i].rw_mask;
4447 ro_mask = reg_tbl[i].ro_mask;
4449 save_val = readl(bp->regview + offset);
4451 writel(0, bp->regview + offset);
4453 val = readl(bp->regview + offset);
4454 if ((val & rw_mask) != 0) {
4458 if ((val & ro_mask) != (save_val & ro_mask)) {
4462 writel(0xffffffff, bp->regview + offset);
4464 val = readl(bp->regview + offset);
4465 if ((val & rw_mask) != rw_mask) {
4469 if ((val & ro_mask) != (save_val & ro_mask)) {
4473 writel(save_val, bp->regview + offset);
4477 writel(save_val, bp->regview + offset);
4485 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4487 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4488 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4491 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4494 for (offset = 0; offset < size; offset += 4) {
4496 REG_WR_IND(bp, start + offset, test_pattern[i]);
4498 if (REG_RD_IND(bp, start + offset) !=
4508 bnx2_test_memory(struct bnx2 *bp)
4512 static struct mem_entry {
4515 } mem_tbl_5706[] = {
4516 { 0x60000, 0x4000 },
4517 { 0xa0000, 0x3000 },
4518 { 0xe0000, 0x4000 },
4519 { 0x120000, 0x4000 },
4520 { 0x1a0000, 0x4000 },
4521 { 0x160000, 0x4000 },
4525 { 0x60000, 0x4000 },
4526 { 0xa0000, 0x3000 },
4527 { 0xe0000, 0x4000 },
4528 { 0x120000, 0x4000 },
4529 { 0x1a0000, 0x4000 },
4532 struct mem_entry *mem_tbl;
4534 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4535 mem_tbl = mem_tbl_5709;
4537 mem_tbl = mem_tbl_5706;
4539 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4540 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4541 mem_tbl[i].len)) != 0) {
4549 #define BNX2_MAC_LOOPBACK 0
4550 #define BNX2_PHY_LOOPBACK 1
4553 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4555 unsigned int pkt_size, num_pkts, i;
4556 struct sk_buff *skb, *rx_skb;
4557 unsigned char *packet;
4558 u16 rx_start_idx, rx_idx;
4561 struct sw_bd *rx_buf;
4562 struct l2_fhdr *rx_hdr;
4565 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4566 bp->loopback = MAC_LOOPBACK;
4567 bnx2_set_mac_loopback(bp);
4569 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4570 bp->loopback = PHY_LOOPBACK;
4571 bnx2_set_phy_loopback(bp);
4577 skb = netdev_alloc_skb(bp->dev, pkt_size);
4580 packet = skb_put(skb, pkt_size);
4581 memcpy(packet, bp->dev->dev_addr, 6);
4582 memset(packet + 6, 0x0, 8);
4583 for (i = 14; i < pkt_size; i++)
4584 packet[i] = (unsigned char) (i & 0xff);
4586 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4589 REG_WR(bp, BNX2_HC_COMMAND,
4590 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4592 REG_RD(bp, BNX2_HC_COMMAND);
4595 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4599 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4601 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4602 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4603 txbd->tx_bd_mss_nbytes = pkt_size;
4604 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4607 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4608 bp->tx_prod_bseq += pkt_size;
4610 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4611 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4615 REG_WR(bp, BNX2_HC_COMMAND,
4616 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4618 REG_RD(bp, BNX2_HC_COMMAND);
4622 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4625 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4626 goto loopback_test_done;
4629 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4630 if (rx_idx != rx_start_idx + num_pkts) {
4631 goto loopback_test_done;
4634 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4635 rx_skb = rx_buf->skb;
4637 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4638 skb_reserve(rx_skb, bp->rx_offset);
4640 pci_dma_sync_single_for_cpu(bp->pdev,
4641 pci_unmap_addr(rx_buf, mapping),
4642 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4644 if (rx_hdr->l2_fhdr_status &
4645 (L2_FHDR_ERRORS_BAD_CRC |
4646 L2_FHDR_ERRORS_PHY_DECODE |
4647 L2_FHDR_ERRORS_ALIGNMENT |
4648 L2_FHDR_ERRORS_TOO_SHORT |
4649 L2_FHDR_ERRORS_GIANT_FRAME)) {
4651 goto loopback_test_done;
4654 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4655 goto loopback_test_done;
4658 for (i = 14; i < pkt_size; i++) {
4659 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4660 goto loopback_test_done;
4671 #define BNX2_MAC_LOOPBACK_FAILED 1
4672 #define BNX2_PHY_LOOPBACK_FAILED 2
4673 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4674 BNX2_PHY_LOOPBACK_FAILED)
4677 bnx2_test_loopback(struct bnx2 *bp)
4681 if (!netif_running(bp->dev))
4682 return BNX2_LOOPBACK_FAILED;
4684 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4685 spin_lock_bh(&bp->phy_lock);
4687 spin_unlock_bh(&bp->phy_lock);
4688 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4689 rc |= BNX2_MAC_LOOPBACK_FAILED;
4690 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4691 rc |= BNX2_PHY_LOOPBACK_FAILED;
4695 #define NVRAM_SIZE 0x200
4696 #define CRC32_RESIDUAL 0xdebb20e3
4699 bnx2_test_nvram(struct bnx2 *bp)
4701 u32 buf[NVRAM_SIZE / 4];
4702 u8 *data = (u8 *) buf;
4706 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4707 goto test_nvram_done;
4709 magic = be32_to_cpu(buf[0]);
4710 if (magic != 0x669955aa) {
4712 goto test_nvram_done;
4715 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4716 goto test_nvram_done;
4718 csum = ether_crc_le(0x100, data);
4719 if (csum != CRC32_RESIDUAL) {
4721 goto test_nvram_done;
4724 csum = ether_crc_le(0x100, data + 0x100);
4725 if (csum != CRC32_RESIDUAL) {
4734 bnx2_test_link(struct bnx2 *bp)
4738 spin_lock_bh(&bp->phy_lock);
4739 bnx2_enable_bmsr1(bp);
4740 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4741 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4742 bnx2_disable_bmsr1(bp);
4743 spin_unlock_bh(&bp->phy_lock);
4745 if (bmsr & BMSR_LSTATUS) {
4752 bnx2_test_intr(struct bnx2 *bp)
4757 if (!netif_running(bp->dev))
4760 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4762 /* This register is not touched during run-time. */
4763 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4764 REG_RD(bp, BNX2_HC_COMMAND);
4766 for (i = 0; i < 10; i++) {
4767 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4773 msleep_interruptible(10);
4782 bnx2_5706_serdes_timer(struct bnx2 *bp)
4784 spin_lock(&bp->phy_lock);
4785 if (bp->serdes_an_pending)
4786 bp->serdes_an_pending--;
4787 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4790 bp->current_interval = bp->timer_interval;
4792 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4794 if (bmcr & BMCR_ANENABLE) {
4797 bnx2_write_phy(bp, 0x1c, 0x7c00);
4798 bnx2_read_phy(bp, 0x1c, &phy1);
4800 bnx2_write_phy(bp, 0x17, 0x0f01);
4801 bnx2_read_phy(bp, 0x15, &phy2);
4802 bnx2_write_phy(bp, 0x17, 0x0f01);
4803 bnx2_read_phy(bp, 0x15, &phy2);
4805 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4806 !(phy2 & 0x20)) { /* no CONFIG */
4808 bmcr &= ~BMCR_ANENABLE;
4809 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4810 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4811 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4815 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4816 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4819 bnx2_write_phy(bp, 0x17, 0x0f01);
4820 bnx2_read_phy(bp, 0x15, &phy2);
4824 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4825 bmcr |= BMCR_ANENABLE;
4826 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4831 bp->current_interval = bp->timer_interval;
4833 spin_unlock(&bp->phy_lock);
4837 bnx2_5708_serdes_timer(struct bnx2 *bp)
4839 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4842 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4843 bp->serdes_an_pending = 0;
4847 spin_lock(&bp->phy_lock);
4848 if (bp->serdes_an_pending)
4849 bp->serdes_an_pending--;
4850 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4853 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4854 if (bmcr & BMCR_ANENABLE) {
4855 bnx2_enable_forced_2g5(bp);
4856 bp->current_interval = SERDES_FORCED_TIMEOUT;
4858 bnx2_disable_forced_2g5(bp);
4859 bp->serdes_an_pending = 2;
4860 bp->current_interval = bp->timer_interval;
4864 bp->current_interval = bp->timer_interval;
4866 spin_unlock(&bp->phy_lock);
4870 bnx2_timer(unsigned long data)
4872 struct bnx2 *bp = (struct bnx2 *) data;
4874 if (!netif_running(bp->dev))
4877 if (atomic_read(&bp->intr_sem) != 0)
4878 goto bnx2_restart_timer;
4880 bnx2_send_heart_beat(bp);
4882 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4884 /* workaround occasional corrupted counters */
4885 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4886 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4887 BNX2_HC_COMMAND_STATS_NOW);
4889 if (bp->phy_flags & PHY_SERDES_FLAG) {
4890 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4891 bnx2_5706_serdes_timer(bp);
4893 bnx2_5708_serdes_timer(bp);
4897 mod_timer(&bp->timer, jiffies + bp->current_interval);
4901 bnx2_request_irq(struct bnx2 *bp)
4903 struct net_device *dev = bp->dev;
4906 if (bp->flags & USING_MSI_FLAG) {
4907 irq_handler_t fn = bnx2_msi;
4909 if (bp->flags & ONE_SHOT_MSI_FLAG)
4910 fn = bnx2_msi_1shot;
4912 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4914 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4915 IRQF_SHARED, dev->name, dev);
4920 bnx2_free_irq(struct bnx2 *bp)
4922 struct net_device *dev = bp->dev;
4924 if (bp->flags & USING_MSI_FLAG) {
4925 free_irq(bp->pdev->irq, dev);
4926 pci_disable_msi(bp->pdev);
4927 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4929 free_irq(bp->pdev->irq, dev);
4932 /* Called with rtnl_lock */
4934 bnx2_open(struct net_device *dev)
4936 struct bnx2 *bp = netdev_priv(dev);
4939 netif_carrier_off(dev);
4941 bnx2_set_power_state(bp, PCI_D0);
4942 bnx2_disable_int(bp);
4944 rc = bnx2_alloc_mem(bp);
4948 napi_enable(&bp->napi);
4950 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4951 if (pci_enable_msi(bp->pdev) == 0) {
4952 bp->flags |= USING_MSI_FLAG;
4953 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4954 bp->flags |= ONE_SHOT_MSI_FLAG;
4957 rc = bnx2_request_irq(bp);
4960 napi_disable(&bp->napi);
4965 rc = bnx2_init_nic(bp);
4968 napi_disable(&bp->napi);
4975 mod_timer(&bp->timer, jiffies + bp->current_interval);
4977 atomic_set(&bp->intr_sem, 0);
4979 bnx2_enable_int(bp);
4981 if (bp->flags & USING_MSI_FLAG) {
4982 /* Test MSI to make sure it is working
4983 * If MSI test fails, go back to INTx mode
4985 if (bnx2_test_intr(bp) != 0) {
4986 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4987 " using MSI, switching to INTx mode. Please"
4988 " report this failure to the PCI maintainer"
4989 " and include system chipset information.\n",
4992 bnx2_disable_int(bp);
4995 rc = bnx2_init_nic(bp);
4998 rc = bnx2_request_irq(bp);
5001 napi_disable(&bp->napi);
5004 del_timer_sync(&bp->timer);
5007 bnx2_enable_int(bp);
5010 if (bp->flags & USING_MSI_FLAG) {
5011 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5014 netif_start_queue(dev);
5020 bnx2_reset_task(struct work_struct *work)
5022 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5024 if (!netif_running(bp->dev))
5027 bp->in_reset_task = 1;
5028 bnx2_netif_stop(bp);
5032 atomic_set(&bp->intr_sem, 1);
5033 bnx2_netif_start(bp);
5034 bp->in_reset_task = 0;
5038 bnx2_tx_timeout(struct net_device *dev)
5040 struct bnx2 *bp = netdev_priv(dev);
5042 /* This allows the netif to be shutdown gracefully before resetting */
5043 schedule_work(&bp->reset_task);
5047 /* Called with rtnl_lock */
5049 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5051 struct bnx2 *bp = netdev_priv(dev);
5053 bnx2_netif_stop(bp);
5056 bnx2_set_rx_mode(dev);
5058 bnx2_netif_start(bp);
5062 /* Called with netif_tx_lock.
5063 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5064 * netif_wake_queue().
5067 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5069 struct bnx2 *bp = netdev_priv(dev);
5072 struct sw_bd *tx_buf;
5073 u32 len, vlan_tag_flags, last_frag, mss;
5074 u16 prod, ring_prod;
5077 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5078 netif_stop_queue(dev);
5079 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5082 return NETDEV_TX_BUSY;
5084 len = skb_headlen(skb);
5086 ring_prod = TX_RING_IDX(prod);
5089 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5090 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5093 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5095 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5097 if ((mss = skb_shinfo(skb)->gso_size)) {
5098 u32 tcp_opt_len, ip_tcp_len;
5101 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5103 tcp_opt_len = tcp_optlen(skb);
5105 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5106 u32 tcp_off = skb_transport_offset(skb) -
5107 sizeof(struct ipv6hdr) - ETH_HLEN;
5109 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5110 TX_BD_FLAGS_SW_FLAGS;
5111 if (likely(tcp_off == 0))
5112 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5115 vlan_tag_flags |= ((tcp_off & 0x3) <<
5116 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5117 ((tcp_off & 0x10) <<
5118 TX_BD_FLAGS_TCP6_OFF4_SHL);
5119 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5122 if (skb_header_cloned(skb) &&
5123 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5125 return NETDEV_TX_OK;
5128 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5132 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5133 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5137 if (tcp_opt_len || (iph->ihl > 5)) {
5138 vlan_tag_flags |= ((iph->ihl - 5) +
5139 (tcp_opt_len >> 2)) << 8;
5145 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5147 tx_buf = &bp->tx_buf_ring[ring_prod];
5149 pci_unmap_addr_set(tx_buf, mapping, mapping);
5151 txbd = &bp->tx_desc_ring[ring_prod];
5153 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5154 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5155 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5156 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5158 last_frag = skb_shinfo(skb)->nr_frags;
5160 for (i = 0; i < last_frag; i++) {
5161 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5163 prod = NEXT_TX_BD(prod);
5164 ring_prod = TX_RING_IDX(prod);
5165 txbd = &bp->tx_desc_ring[ring_prod];
5168 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5169 len, PCI_DMA_TODEVICE);
5170 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5173 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5174 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5175 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5176 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5179 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5181 prod = NEXT_TX_BD(prod);
5182 bp->tx_prod_bseq += skb->len;
5184 REG_WR16(bp, bp->tx_bidx_addr, prod);
5185 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5190 dev->trans_start = jiffies;
5192 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5193 netif_stop_queue(dev);
5194 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5195 netif_wake_queue(dev);
5198 return NETDEV_TX_OK;
5201 /* Called with rtnl_lock */
5203 bnx2_close(struct net_device *dev)
5205 struct bnx2 *bp = netdev_priv(dev);
5208 /* Calling flush_scheduled_work() may deadlock because
5209 * linkwatch_event() may be on the workqueue and it will try to get
5210 * the rtnl_lock which we are holding.
5212 while (bp->in_reset_task)
5215 bnx2_disable_int_sync(bp);
5216 napi_disable(&bp->napi);
5217 del_timer_sync(&bp->timer);
5218 if (bp->flags & NO_WOL_FLAG)
5219 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5221 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5223 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5224 bnx2_reset_chip(bp, reset_code);
5229 netif_carrier_off(bp->dev);
5230 bnx2_set_power_state(bp, PCI_D3hot);
5234 #define GET_NET_STATS64(ctr) \
5235 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5236 (unsigned long) (ctr##_lo)
5238 #define GET_NET_STATS32(ctr) \
5241 #if (BITS_PER_LONG == 64)
5242 #define GET_NET_STATS GET_NET_STATS64
5244 #define GET_NET_STATS GET_NET_STATS32
5247 static struct net_device_stats *
5248 bnx2_get_stats(struct net_device *dev)
5250 struct bnx2 *bp = netdev_priv(dev);
5251 struct statistics_block *stats_blk = bp->stats_blk;
5252 struct net_device_stats *net_stats = &bp->net_stats;
5254 if (bp->stats_blk == NULL) {
5257 net_stats->rx_packets =
5258 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5259 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5260 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5262 net_stats->tx_packets =
5263 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5264 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5265 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5267 net_stats->rx_bytes =
5268 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5270 net_stats->tx_bytes =
5271 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5273 net_stats->multicast =
5274 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5276 net_stats->collisions =
5277 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5279 net_stats->rx_length_errors =
5280 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5281 stats_blk->stat_EtherStatsOverrsizePkts);
5283 net_stats->rx_over_errors =
5284 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5286 net_stats->rx_frame_errors =
5287 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5289 net_stats->rx_crc_errors =
5290 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5292 net_stats->rx_errors = net_stats->rx_length_errors +
5293 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5294 net_stats->rx_crc_errors;
5296 net_stats->tx_aborted_errors =
5297 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5298 stats_blk->stat_Dot3StatsLateCollisions);
5300 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5301 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5302 net_stats->tx_carrier_errors = 0;
5304 net_stats->tx_carrier_errors =
5306 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5309 net_stats->tx_errors =
5311 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5313 net_stats->tx_aborted_errors +
5314 net_stats->tx_carrier_errors;
5316 net_stats->rx_missed_errors =
5317 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5318 stats_blk->stat_FwRxDrop);
5323 /* All ethtool functions called with rtnl_lock */
5326 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5328 struct bnx2 *bp = netdev_priv(dev);
5329 int support_serdes = 0, support_copper = 0;
5331 cmd->supported = SUPPORTED_Autoneg;
5332 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5335 } else if (bp->phy_port == PORT_FIBRE)
5340 if (support_serdes) {
5341 cmd->supported |= SUPPORTED_1000baseT_Full |
5343 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5344 cmd->supported |= SUPPORTED_2500baseX_Full;
5347 if (support_copper) {
5348 cmd->supported |= SUPPORTED_10baseT_Half |
5349 SUPPORTED_10baseT_Full |
5350 SUPPORTED_100baseT_Half |
5351 SUPPORTED_100baseT_Full |
5352 SUPPORTED_1000baseT_Full |
5357 spin_lock_bh(&bp->phy_lock);
5358 cmd->port = bp->phy_port;
5359 cmd->advertising = bp->advertising;
5361 if (bp->autoneg & AUTONEG_SPEED) {
5362 cmd->autoneg = AUTONEG_ENABLE;
5365 cmd->autoneg = AUTONEG_DISABLE;
5368 if (netif_carrier_ok(dev)) {
5369 cmd->speed = bp->line_speed;
5370 cmd->duplex = bp->duplex;
5376 spin_unlock_bh(&bp->phy_lock);
5378 cmd->transceiver = XCVR_INTERNAL;
5379 cmd->phy_address = bp->phy_addr;
5385 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5387 struct bnx2 *bp = netdev_priv(dev);
5388 u8 autoneg = bp->autoneg;
5389 u8 req_duplex = bp->req_duplex;
5390 u16 req_line_speed = bp->req_line_speed;
5391 u32 advertising = bp->advertising;
5394 spin_lock_bh(&bp->phy_lock);
5396 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5397 goto err_out_unlock;
5399 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5400 goto err_out_unlock;
5402 if (cmd->autoneg == AUTONEG_ENABLE) {
5403 autoneg |= AUTONEG_SPEED;
5405 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5407 /* allow advertising 1 speed */
5408 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5409 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5410 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5411 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5413 if (cmd->port == PORT_FIBRE)
5414 goto err_out_unlock;
5416 advertising = cmd->advertising;
5418 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5419 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5420 (cmd->port == PORT_TP))
5421 goto err_out_unlock;
5422 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5423 advertising = cmd->advertising;
5424 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5425 goto err_out_unlock;
5427 if (cmd->port == PORT_FIBRE)
5428 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5430 advertising = ETHTOOL_ALL_COPPER_SPEED;
5432 advertising |= ADVERTISED_Autoneg;
5435 if (cmd->port == PORT_FIBRE) {
5436 if ((cmd->speed != SPEED_1000 &&
5437 cmd->speed != SPEED_2500) ||
5438 (cmd->duplex != DUPLEX_FULL))
5439 goto err_out_unlock;
5441 if (cmd->speed == SPEED_2500 &&
5442 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5443 goto err_out_unlock;
5445 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5446 goto err_out_unlock;
5448 autoneg &= ~AUTONEG_SPEED;
5449 req_line_speed = cmd->speed;
5450 req_duplex = cmd->duplex;
5454 bp->autoneg = autoneg;
5455 bp->advertising = advertising;
5456 bp->req_line_speed = req_line_speed;
5457 bp->req_duplex = req_duplex;
5459 err = bnx2_setup_phy(bp, cmd->port);
5462 spin_unlock_bh(&bp->phy_lock);
5468 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5470 struct bnx2 *bp = netdev_priv(dev);
5472 strcpy(info->driver, DRV_MODULE_NAME);
5473 strcpy(info->version, DRV_MODULE_VERSION);
5474 strcpy(info->bus_info, pci_name(bp->pdev));
5475 strcpy(info->fw_version, bp->fw_version);
5478 #define BNX2_REGDUMP_LEN (32 * 1024)
5481 bnx2_get_regs_len(struct net_device *dev)
5483 return BNX2_REGDUMP_LEN;
5487 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5489 u32 *p = _p, i, offset;
5491 struct bnx2 *bp = netdev_priv(dev);
5492 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5493 0x0800, 0x0880, 0x0c00, 0x0c10,
5494 0x0c30, 0x0d08, 0x1000, 0x101c,
5495 0x1040, 0x1048, 0x1080, 0x10a4,
5496 0x1400, 0x1490, 0x1498, 0x14f0,
5497 0x1500, 0x155c, 0x1580, 0x15dc,
5498 0x1600, 0x1658, 0x1680, 0x16d8,
5499 0x1800, 0x1820, 0x1840, 0x1854,
5500 0x1880, 0x1894, 0x1900, 0x1984,
5501 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5502 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5503 0x2000, 0x2030, 0x23c0, 0x2400,
5504 0x2800, 0x2820, 0x2830, 0x2850,
5505 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5506 0x3c00, 0x3c94, 0x4000, 0x4010,
5507 0x4080, 0x4090, 0x43c0, 0x4458,
5508 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5509 0x4fc0, 0x5010, 0x53c0, 0x5444,
5510 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5511 0x5fc0, 0x6000, 0x6400, 0x6428,
5512 0x6800, 0x6848, 0x684c, 0x6860,
5513 0x6888, 0x6910, 0x8000 };
5517 memset(p, 0, BNX2_REGDUMP_LEN);
5519 if (!netif_running(bp->dev))
5523 offset = reg_boundaries[0];
5525 while (offset < BNX2_REGDUMP_LEN) {
5526 *p++ = REG_RD(bp, offset);
5528 if (offset == reg_boundaries[i + 1]) {
5529 offset = reg_boundaries[i + 2];
5530 p = (u32 *) (orig_p + offset);
5537 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5539 struct bnx2 *bp = netdev_priv(dev);
5541 if (bp->flags & NO_WOL_FLAG) {
5546 wol->supported = WAKE_MAGIC;
5548 wol->wolopts = WAKE_MAGIC;
5552 memset(&wol->sopass, 0, sizeof(wol->sopass));
5556 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5558 struct bnx2 *bp = netdev_priv(dev);
5560 if (wol->wolopts & ~WAKE_MAGIC)
5563 if (wol->wolopts & WAKE_MAGIC) {
5564 if (bp->flags & NO_WOL_FLAG)
5576 bnx2_nway_reset(struct net_device *dev)
5578 struct bnx2 *bp = netdev_priv(dev);
5581 if (!(bp->autoneg & AUTONEG_SPEED)) {
5585 spin_lock_bh(&bp->phy_lock);
5587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5590 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5591 spin_unlock_bh(&bp->phy_lock);
5595 /* Force a link down visible on the other side */
5596 if (bp->phy_flags & PHY_SERDES_FLAG) {
5597 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5598 spin_unlock_bh(&bp->phy_lock);
5602 spin_lock_bh(&bp->phy_lock);
5604 bp->current_interval = SERDES_AN_TIMEOUT;
5605 bp->serdes_an_pending = 1;
5606 mod_timer(&bp->timer, jiffies + bp->current_interval);
5609 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5610 bmcr &= ~BMCR_LOOPBACK;
5611 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5613 spin_unlock_bh(&bp->phy_lock);
5619 bnx2_get_eeprom_len(struct net_device *dev)
5621 struct bnx2 *bp = netdev_priv(dev);
5623 if (bp->flash_info == NULL)
5626 return (int) bp->flash_size;
5630 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5633 struct bnx2 *bp = netdev_priv(dev);
5636 /* parameters already validated in ethtool_get_eeprom */
5638 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5644 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5647 struct bnx2 *bp = netdev_priv(dev);
5650 /* parameters already validated in ethtool_set_eeprom */
5652 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5658 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5660 struct bnx2 *bp = netdev_priv(dev);
5662 memset(coal, 0, sizeof(struct ethtool_coalesce));
5664 coal->rx_coalesce_usecs = bp->rx_ticks;
5665 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5666 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5667 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5669 coal->tx_coalesce_usecs = bp->tx_ticks;
5670 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5671 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5672 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5674 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5680 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5682 struct bnx2 *bp = netdev_priv(dev);
5684 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5685 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5687 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5688 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5690 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5691 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5693 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5694 if (bp->rx_quick_cons_trip_int > 0xff)
5695 bp->rx_quick_cons_trip_int = 0xff;
5697 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5698 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5700 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5701 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5703 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5704 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5706 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5707 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5710 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5711 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5712 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5713 bp->stats_ticks = USEC_PER_SEC;
5715 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5716 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5717 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5719 if (netif_running(bp->dev)) {
5720 bnx2_netif_stop(bp);
5722 bnx2_netif_start(bp);
5729 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5731 struct bnx2 *bp = netdev_priv(dev);
5733 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5734 ering->rx_mini_max_pending = 0;
5735 ering->rx_jumbo_max_pending = 0;
5737 ering->rx_pending = bp->rx_ring_size;
5738 ering->rx_mini_pending = 0;
5739 ering->rx_jumbo_pending = 0;
5741 ering->tx_max_pending = MAX_TX_DESC_CNT;
5742 ering->tx_pending = bp->tx_ring_size;
5746 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5748 struct bnx2 *bp = netdev_priv(dev);
5750 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5751 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5752 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5756 if (netif_running(bp->dev)) {
5757 bnx2_netif_stop(bp);
5758 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5763 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5764 bp->tx_ring_size = ering->tx_pending;
5766 if (netif_running(bp->dev)) {
5769 rc = bnx2_alloc_mem(bp);
5773 bnx2_netif_start(bp);
5780 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5782 struct bnx2 *bp = netdev_priv(dev);
5784 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5785 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5786 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5790 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5792 struct bnx2 *bp = netdev_priv(dev);
5794 bp->req_flow_ctrl = 0;
5795 if (epause->rx_pause)
5796 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5797 if (epause->tx_pause)
5798 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5800 if (epause->autoneg) {
5801 bp->autoneg |= AUTONEG_FLOW_CTRL;
5804 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5807 spin_lock_bh(&bp->phy_lock);
5809 bnx2_setup_phy(bp, bp->phy_port);
5811 spin_unlock_bh(&bp->phy_lock);
5817 bnx2_get_rx_csum(struct net_device *dev)
5819 struct bnx2 *bp = netdev_priv(dev);
5825 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5827 struct bnx2 *bp = netdev_priv(dev);
5834 bnx2_set_tso(struct net_device *dev, u32 data)
5836 struct bnx2 *bp = netdev_priv(dev);
5839 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5840 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5841 dev->features |= NETIF_F_TSO6;
5843 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5848 #define BNX2_NUM_STATS 46
5851 char string[ETH_GSTRING_LEN];
5852 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5854 { "rx_error_bytes" },
5856 { "tx_error_bytes" },
5857 { "rx_ucast_packets" },
5858 { "rx_mcast_packets" },
5859 { "rx_bcast_packets" },
5860 { "tx_ucast_packets" },
5861 { "tx_mcast_packets" },
5862 { "tx_bcast_packets" },
5863 { "tx_mac_errors" },
5864 { "tx_carrier_errors" },
5865 { "rx_crc_errors" },
5866 { "rx_align_errors" },
5867 { "tx_single_collisions" },
5868 { "tx_multi_collisions" },
5870 { "tx_excess_collisions" },
5871 { "tx_late_collisions" },
5872 { "tx_total_collisions" },
5875 { "rx_undersize_packets" },
5876 { "rx_oversize_packets" },
5877 { "rx_64_byte_packets" },
5878 { "rx_65_to_127_byte_packets" },
5879 { "rx_128_to_255_byte_packets" },
5880 { "rx_256_to_511_byte_packets" },
5881 { "rx_512_to_1023_byte_packets" },
5882 { "rx_1024_to_1522_byte_packets" },
5883 { "rx_1523_to_9022_byte_packets" },
5884 { "tx_64_byte_packets" },
5885 { "tx_65_to_127_byte_packets" },
5886 { "tx_128_to_255_byte_packets" },
5887 { "tx_256_to_511_byte_packets" },
5888 { "tx_512_to_1023_byte_packets" },
5889 { "tx_1024_to_1522_byte_packets" },
5890 { "tx_1523_to_9022_byte_packets" },
5891 { "rx_xon_frames" },
5892 { "rx_xoff_frames" },
5893 { "tx_xon_frames" },
5894 { "tx_xoff_frames" },
5895 { "rx_mac_ctrl_frames" },
5896 { "rx_filtered_packets" },
5898 { "rx_fw_discards" },
5901 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5903 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5904 STATS_OFFSET32(stat_IfHCInOctets_hi),
5905 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5906 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5907 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5908 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5909 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5910 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5911 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5912 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5913 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5914 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5915 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5916 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5917 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5918 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5919 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5920 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5921 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5922 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5923 STATS_OFFSET32(stat_EtherStatsCollisions),
5924 STATS_OFFSET32(stat_EtherStatsFragments),
5925 STATS_OFFSET32(stat_EtherStatsJabbers),
5926 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5927 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5928 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5929 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5930 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5931 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5932 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5933 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5934 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5935 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5936 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5937 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5938 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5939 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5940 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5941 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5942 STATS_OFFSET32(stat_XonPauseFramesReceived),
5943 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5944 STATS_OFFSET32(stat_OutXonSent),
5945 STATS_OFFSET32(stat_OutXoffSent),
5946 STATS_OFFSET32(stat_MacControlFramesReceived),
5947 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5948 STATS_OFFSET32(stat_IfInMBUFDiscards),
5949 STATS_OFFSET32(stat_FwRxDrop),
5952 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5953 * skipped because of errata.
5955 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5956 8,0,8,8,8,8,8,8,8,8,
5957 4,0,4,4,4,4,4,4,4,4,
5958 4,4,4,4,4,4,4,4,4,4,
5959 4,4,4,4,4,4,4,4,4,4,
5963 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5964 8,0,8,8,8,8,8,8,8,8,
5965 4,4,4,4,4,4,4,4,4,4,
5966 4,4,4,4,4,4,4,4,4,4,
5967 4,4,4,4,4,4,4,4,4,4,
5971 #define BNX2_NUM_TESTS 6
5974 char string[ETH_GSTRING_LEN];
5975 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5976 { "register_test (offline)" },
5977 { "memory_test (offline)" },
5978 { "loopback_test (offline)" },
5979 { "nvram_test (online)" },
5980 { "interrupt_test (online)" },
5981 { "link_test (online)" },
5985 bnx2_get_sset_count(struct net_device *dev, int sset)
5989 return BNX2_NUM_TESTS;
5991 return BNX2_NUM_STATS;
5998 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6000 struct bnx2 *bp = netdev_priv(dev);
6002 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6003 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6006 bnx2_netif_stop(bp);
6007 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6010 if (bnx2_test_registers(bp) != 0) {
6012 etest->flags |= ETH_TEST_FL_FAILED;
6014 if (bnx2_test_memory(bp) != 0) {
6016 etest->flags |= ETH_TEST_FL_FAILED;
6018 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6019 etest->flags |= ETH_TEST_FL_FAILED;
6021 if (!netif_running(bp->dev)) {
6022 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6026 bnx2_netif_start(bp);
6029 /* wait for link up */
6030 for (i = 0; i < 7; i++) {
6033 msleep_interruptible(1000);
6037 if (bnx2_test_nvram(bp) != 0) {
6039 etest->flags |= ETH_TEST_FL_FAILED;
6041 if (bnx2_test_intr(bp) != 0) {
6043 etest->flags |= ETH_TEST_FL_FAILED;
6046 if (bnx2_test_link(bp) != 0) {
6048 etest->flags |= ETH_TEST_FL_FAILED;
6054 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6056 switch (stringset) {
6058 memcpy(buf, bnx2_stats_str_arr,
6059 sizeof(bnx2_stats_str_arr));
6062 memcpy(buf, bnx2_tests_str_arr,
6063 sizeof(bnx2_tests_str_arr));
6069 bnx2_get_ethtool_stats(struct net_device *dev,
6070 struct ethtool_stats *stats, u64 *buf)
6072 struct bnx2 *bp = netdev_priv(dev);
6074 u32 *hw_stats = (u32 *) bp->stats_blk;
6075 u8 *stats_len_arr = NULL;
6077 if (hw_stats == NULL) {
6078 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6082 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6083 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6084 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6085 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6086 stats_len_arr = bnx2_5706_stats_len_arr;
6088 stats_len_arr = bnx2_5708_stats_len_arr;
6090 for (i = 0; i < BNX2_NUM_STATS; i++) {
6091 if (stats_len_arr[i] == 0) {
6092 /* skip this counter */
6096 if (stats_len_arr[i] == 4) {
6097 /* 4-byte counter */
6099 *(hw_stats + bnx2_stats_offset_arr[i]);
6102 /* 8-byte counter */
6103 buf[i] = (((u64) *(hw_stats +
6104 bnx2_stats_offset_arr[i])) << 32) +
6105 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6110 bnx2_phys_id(struct net_device *dev, u32 data)
6112 struct bnx2 *bp = netdev_priv(dev);
6119 save = REG_RD(bp, BNX2_MISC_CFG);
6120 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6122 for (i = 0; i < (data * 2); i++) {
6124 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6127 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6128 BNX2_EMAC_LED_1000MB_OVERRIDE |
6129 BNX2_EMAC_LED_100MB_OVERRIDE |
6130 BNX2_EMAC_LED_10MB_OVERRIDE |
6131 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6132 BNX2_EMAC_LED_TRAFFIC);
6134 msleep_interruptible(500);
6135 if (signal_pending(current))
6138 REG_WR(bp, BNX2_EMAC_LED, 0);
6139 REG_WR(bp, BNX2_MISC_CFG, save);
6144 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6146 struct bnx2 *bp = netdev_priv(dev);
6148 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6149 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6151 return (ethtool_op_set_tx_csum(dev, data));
6154 static const struct ethtool_ops bnx2_ethtool_ops = {
6155 .get_settings = bnx2_get_settings,
6156 .set_settings = bnx2_set_settings,
6157 .get_drvinfo = bnx2_get_drvinfo,
6158 .get_regs_len = bnx2_get_regs_len,
6159 .get_regs = bnx2_get_regs,
6160 .get_wol = bnx2_get_wol,
6161 .set_wol = bnx2_set_wol,
6162 .nway_reset = bnx2_nway_reset,
6163 .get_link = ethtool_op_get_link,
6164 .get_eeprom_len = bnx2_get_eeprom_len,
6165 .get_eeprom = bnx2_get_eeprom,
6166 .set_eeprom = bnx2_set_eeprom,
6167 .get_coalesce = bnx2_get_coalesce,
6168 .set_coalesce = bnx2_set_coalesce,
6169 .get_ringparam = bnx2_get_ringparam,
6170 .set_ringparam = bnx2_set_ringparam,
6171 .get_pauseparam = bnx2_get_pauseparam,
6172 .set_pauseparam = bnx2_set_pauseparam,
6173 .get_rx_csum = bnx2_get_rx_csum,
6174 .set_rx_csum = bnx2_set_rx_csum,
6175 .set_tx_csum = bnx2_set_tx_csum,
6176 .set_sg = ethtool_op_set_sg,
6177 .set_tso = bnx2_set_tso,
6178 .self_test = bnx2_self_test,
6179 .get_strings = bnx2_get_strings,
6180 .phys_id = bnx2_phys_id,
6181 .get_ethtool_stats = bnx2_get_ethtool_stats,
6182 .get_sset_count = bnx2_get_sset_count,
6185 /* Called with rtnl_lock */
6187 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6189 struct mii_ioctl_data *data = if_mii(ifr);
6190 struct bnx2 *bp = netdev_priv(dev);
6195 data->phy_id = bp->phy_addr;
6201 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6204 if (!netif_running(dev))
6207 spin_lock_bh(&bp->phy_lock);
6208 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6209 spin_unlock_bh(&bp->phy_lock);
6211 data->val_out = mii_regval;
6217 if (!capable(CAP_NET_ADMIN))
6220 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6223 if (!netif_running(dev))
6226 spin_lock_bh(&bp->phy_lock);
6227 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6228 spin_unlock_bh(&bp->phy_lock);
6239 /* Called with rtnl_lock */
6241 bnx2_change_mac_addr(struct net_device *dev, void *p)
6243 struct sockaddr *addr = p;
6244 struct bnx2 *bp = netdev_priv(dev);
6246 if (!is_valid_ether_addr(addr->sa_data))
6249 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6250 if (netif_running(dev))
6251 bnx2_set_mac_addr(bp);
6256 /* Called with rtnl_lock */
6258 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6260 struct bnx2 *bp = netdev_priv(dev);
6262 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6263 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6267 if (netif_running(dev)) {
6268 bnx2_netif_stop(bp);
6272 bnx2_netif_start(bp);
6277 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6279 poll_bnx2(struct net_device *dev)
6281 struct bnx2 *bp = netdev_priv(dev);
6283 disable_irq(bp->pdev->irq);
6284 bnx2_interrupt(bp->pdev->irq, dev);
6285 enable_irq(bp->pdev->irq);
6289 static void __devinit
6290 bnx2_get_5709_media(struct bnx2 *bp)
6292 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6293 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6296 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6298 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6299 bp->phy_flags |= PHY_SERDES_FLAG;
6303 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6304 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6306 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6308 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6313 bp->phy_flags |= PHY_SERDES_FLAG;
6321 bp->phy_flags |= PHY_SERDES_FLAG;
6327 static void __devinit
6328 bnx2_get_pci_speed(struct bnx2 *bp)
6332 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6333 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6336 bp->flags |= PCIX_FLAG;
6338 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6340 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6342 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6343 bp->bus_speed_mhz = 133;
6346 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6347 bp->bus_speed_mhz = 100;
6350 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6351 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6352 bp->bus_speed_mhz = 66;
6355 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6356 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6357 bp->bus_speed_mhz = 50;
6360 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6361 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6362 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6363 bp->bus_speed_mhz = 33;
6368 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6369 bp->bus_speed_mhz = 66;
6371 bp->bus_speed_mhz = 33;
6374 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6375 bp->flags |= PCI_32BIT_FLAG;
6379 static int __devinit
6380 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6383 unsigned long mem_len;
6386 u64 dma_mask, persist_dma_mask;
6388 SET_NETDEV_DEV(dev, &pdev->dev);
6389 bp = netdev_priv(dev);
6394 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6395 rc = pci_enable_device(pdev);
6397 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6401 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6403 "Cannot find PCI device base address, aborting.\n");
6405 goto err_out_disable;
6408 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6410 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6411 goto err_out_disable;
6414 pci_set_master(pdev);
6416 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6417 if (bp->pm_cap == 0) {
6419 "Cannot find power management capability, aborting.\n");
6421 goto err_out_release;
6427 spin_lock_init(&bp->phy_lock);
6428 spin_lock_init(&bp->indirect_lock);
6429 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6431 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6432 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6433 dev->mem_end = dev->mem_start + mem_len;
6434 dev->irq = pdev->irq;
6436 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6439 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6441 goto err_out_release;
6444 /* Configure byte swap and enable write to the reg_window registers.
6445 * Rely on CPU to do target byte swapping on big endian systems
6446 * The chip's target access swapping will not swap all accesses
6448 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6449 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6450 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6452 bnx2_set_power_state(bp, PCI_D0);
6454 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6456 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6457 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6459 "Cannot find PCIE capability, aborting.\n");
6463 bp->flags |= PCIE_FLAG;
6465 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6466 if (bp->pcix_cap == 0) {
6468 "Cannot find PCIX capability, aborting.\n");
6474 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6475 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6476 bp->flags |= MSI_CAP_FLAG;
6479 /* 5708 cannot support DMA addresses > 40-bit. */
6480 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6481 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6483 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6485 /* Configure DMA attributes. */
6486 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6487 dev->features |= NETIF_F_HIGHDMA;
6488 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6491 "pci_set_consistent_dma_mask failed, aborting.\n");
6494 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6495 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6499 if (!(bp->flags & PCIE_FLAG))
6500 bnx2_get_pci_speed(bp);
6502 /* 5706A0 may falsely detect SERR and PERR. */
6503 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6504 reg = REG_RD(bp, PCI_COMMAND);
6505 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6506 REG_WR(bp, PCI_COMMAND, reg);
6508 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6509 !(bp->flags & PCIX_FLAG)) {
6512 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6516 bnx2_init_nvram(bp);
6518 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6520 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6521 BNX2_SHM_HDR_SIGNATURE_SIG) {
6522 u32 off = PCI_FUNC(pdev->devfn) << 2;
6524 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6526 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6528 /* Get the permanent MAC address. First we need to make sure the
6529 * firmware is actually running.
6531 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6533 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6534 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6535 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6540 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6541 for (i = 0, j = 0; i < 3; i++) {
6544 num = (u8) (reg >> (24 - (i * 8)));
6545 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6546 if (num >= k || !skip0 || k == 1) {
6547 bp->fw_version[j++] = (num / k) + '0';
6552 bp->fw_version[j++] = '.';
6554 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6555 BNX2_PORT_FEATURE_ASF_ENABLED) {
6556 bp->flags |= ASF_ENABLE_FLAG;
6558 for (i = 0; i < 30; i++) {
6559 reg = REG_RD_IND(bp, bp->shmem_base +
6560 BNX2_BC_STATE_CONDITION);
6561 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6566 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6567 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6568 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6569 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6571 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6573 bp->fw_version[j++] = ' ';
6574 for (i = 0; i < 3; i++) {
6575 reg = REG_RD_IND(bp, addr + i * 4);
6577 memcpy(&bp->fw_version[j], ®, 4);
6582 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6583 bp->mac_addr[0] = (u8) (reg >> 8);
6584 bp->mac_addr[1] = (u8) reg;
6586 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6587 bp->mac_addr[2] = (u8) (reg >> 24);
6588 bp->mac_addr[3] = (u8) (reg >> 16);
6589 bp->mac_addr[4] = (u8) (reg >> 8);
6590 bp->mac_addr[5] = (u8) reg;
6592 bp->tx_ring_size = MAX_TX_DESC_CNT;
6593 bnx2_set_rx_ring_size(bp, 255);
6597 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6599 bp->tx_quick_cons_trip_int = 20;
6600 bp->tx_quick_cons_trip = 20;
6601 bp->tx_ticks_int = 80;
6604 bp->rx_quick_cons_trip_int = 6;
6605 bp->rx_quick_cons_trip = 6;
6606 bp->rx_ticks_int = 18;
6609 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6611 bp->timer_interval = HZ;
6612 bp->current_interval = HZ;
6616 /* Disable WOL support if we are running on a SERDES chip. */
6617 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6618 bnx2_get_5709_media(bp);
6619 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6620 bp->phy_flags |= PHY_SERDES_FLAG;
6622 bp->phy_port = PORT_TP;
6623 if (bp->phy_flags & PHY_SERDES_FLAG) {
6624 bp->phy_port = PORT_FIBRE;
6625 bp->flags |= NO_WOL_FLAG;
6626 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6628 reg = REG_RD_IND(bp, bp->shmem_base +
6629 BNX2_SHARED_HW_CFG_CONFIG);
6630 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6631 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6633 bnx2_init_remote_phy(bp);
6635 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6636 CHIP_NUM(bp) == CHIP_NUM_5708)
6637 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6638 else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
6639 CHIP_ID(bp) == CHIP_ID_5709_A1)
6640 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6642 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6643 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6644 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6645 bp->flags |= NO_WOL_FLAG;
6647 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6648 bp->tx_quick_cons_trip_int =
6649 bp->tx_quick_cons_trip;
6650 bp->tx_ticks_int = bp->tx_ticks;
6651 bp->rx_quick_cons_trip_int =
6652 bp->rx_quick_cons_trip;
6653 bp->rx_ticks_int = bp->rx_ticks;
6654 bp->comp_prod_trip_int = bp->comp_prod_trip;
6655 bp->com_ticks_int = bp->com_ticks;
6656 bp->cmd_ticks_int = bp->cmd_ticks;
6659 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6661 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6662 * with byte enables disabled on the unused 32-bit word. This is legal
6663 * but causes problems on the AMD 8132 which will eventually stop
6664 * responding after a while.
6666 * AMD believes this incompatibility is unique to the 5706, and
6667 * prefers to locally disable MSI rather than globally disabling it.
6669 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6670 struct pci_dev *amd_8132 = NULL;
6672 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6673 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6676 if (amd_8132->revision >= 0x10 &&
6677 amd_8132->revision <= 0x13) {
6679 pci_dev_put(amd_8132);
6685 bnx2_set_default_link(bp);
6686 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6688 init_timer(&bp->timer);
6689 bp->timer.expires = RUN_AT(bp->timer_interval);
6690 bp->timer.data = (unsigned long) bp;
6691 bp->timer.function = bnx2_timer;
6697 iounmap(bp->regview);
6702 pci_release_regions(pdev);
6705 pci_disable_device(pdev);
6706 pci_set_drvdata(pdev, NULL);
6712 static char * __devinit
6713 bnx2_bus_string(struct bnx2 *bp, char *str)
6717 if (bp->flags & PCIE_FLAG) {
6718 s += sprintf(s, "PCI Express");
6720 s += sprintf(s, "PCI");
6721 if (bp->flags & PCIX_FLAG)
6722 s += sprintf(s, "-X");
6723 if (bp->flags & PCI_32BIT_FLAG)
6724 s += sprintf(s, " 32-bit");
6726 s += sprintf(s, " 64-bit");
6727 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6732 static int __devinit
6733 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6735 static int version_printed = 0;
6736 struct net_device *dev = NULL;
6740 DECLARE_MAC_BUF(mac);
6742 if (version_printed++ == 0)
6743 printk(KERN_INFO "%s", version);
6745 /* dev zeroed in init_etherdev */
6746 dev = alloc_etherdev(sizeof(*bp));
6751 rc = bnx2_init_board(pdev, dev);
6757 dev->open = bnx2_open;
6758 dev->hard_start_xmit = bnx2_start_xmit;
6759 dev->stop = bnx2_close;
6760 dev->get_stats = bnx2_get_stats;
6761 dev->set_multicast_list = bnx2_set_rx_mode;
6762 dev->do_ioctl = bnx2_ioctl;
6763 dev->set_mac_address = bnx2_change_mac_addr;
6764 dev->change_mtu = bnx2_change_mtu;
6765 dev->tx_timeout = bnx2_tx_timeout;
6766 dev->watchdog_timeo = TX_TIMEOUT;
6768 dev->vlan_rx_register = bnx2_vlan_rx_register;
6770 dev->ethtool_ops = &bnx2_ethtool_ops;
6772 bp = netdev_priv(dev);
6773 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6775 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6776 dev->poll_controller = poll_bnx2;
6779 pci_set_drvdata(pdev, dev);
6781 memcpy(dev->dev_addr, bp->mac_addr, 6);
6782 memcpy(dev->perm_addr, bp->mac_addr, 6);
6783 bp->name = board_info[ent->driver_data].name;
6785 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6786 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6787 dev->features |= NETIF_F_IPV6_CSUM;
6790 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6792 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6793 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6794 dev->features |= NETIF_F_TSO6;
6796 if ((rc = register_netdev(dev))) {
6797 dev_err(&pdev->dev, "Cannot register net device\n");
6799 iounmap(bp->regview);
6800 pci_release_regions(pdev);
6801 pci_disable_device(pdev);
6802 pci_set_drvdata(pdev, NULL);
6807 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6808 "IRQ %d, node addr %s\n",
6811 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6812 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6813 bnx2_bus_string(bp, str),
6815 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6820 static void __devexit
6821 bnx2_remove_one(struct pci_dev *pdev)
6823 struct net_device *dev = pci_get_drvdata(pdev);
6824 struct bnx2 *bp = netdev_priv(dev);
6826 flush_scheduled_work();
6828 unregister_netdev(dev);
6831 iounmap(bp->regview);
6834 pci_release_regions(pdev);
6835 pci_disable_device(pdev);
6836 pci_set_drvdata(pdev, NULL);
6840 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6842 struct net_device *dev = pci_get_drvdata(pdev);
6843 struct bnx2 *bp = netdev_priv(dev);
6846 /* PCI register 4 needs to be saved whether netif_running() or not.
6847 * MSI address and data need to be saved if using MSI and
6850 pci_save_state(pdev);
6851 if (!netif_running(dev))
6854 flush_scheduled_work();
6855 bnx2_netif_stop(bp);
6856 netif_device_detach(dev);
6857 del_timer_sync(&bp->timer);
6858 if (bp->flags & NO_WOL_FLAG)
6859 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6861 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6863 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6864 bnx2_reset_chip(bp, reset_code);
6866 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6871 bnx2_resume(struct pci_dev *pdev)
6873 struct net_device *dev = pci_get_drvdata(pdev);
6874 struct bnx2 *bp = netdev_priv(dev);
6876 pci_restore_state(pdev);
6877 if (!netif_running(dev))
6880 bnx2_set_power_state(bp, PCI_D0);
6881 netif_device_attach(dev);
6883 bnx2_netif_start(bp);
6887 static struct pci_driver bnx2_pci_driver = {
6888 .name = DRV_MODULE_NAME,
6889 .id_table = bnx2_pci_tbl,
6890 .probe = bnx2_init_one,
6891 .remove = __devexit_p(bnx2_remove_one),
6892 .suspend = bnx2_suspend,
6893 .resume = bnx2_resume,
6896 static int __init bnx2_init(void)
6898 return pci_register_driver(&bnx2_pci_driver);
6901 static void __exit bnx2_cleanup(void)
6903 pci_unregister_driver(&bnx2_pci_driver);
6906 module_init(bnx2_init);
6907 module_exit(bnx2_cleanup);