1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.9"
60 #define DRV_MODULE_RELDATE "December 8, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
489 bnx2_alloc_mem(struct bnx2 *bp)
491 int i, status_blk_size;
493 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
494 if (bp->tx_buf_ring == NULL)
497 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
498 &bp->tx_desc_mapping);
499 if (bp->tx_desc_ring == NULL)
502 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
503 if (bp->rx_buf_ring == NULL)
506 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
508 for (i = 0; i < bp->rx_max_ring; i++) {
509 bp->rx_desc_ring[i] =
510 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
511 &bp->rx_desc_mapping[i]);
512 if (bp->rx_desc_ring[i] == NULL)
517 /* Combine status and statistics blocks into one allocation. */
518 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519 bp->status_stats_size = status_blk_size +
520 sizeof(struct statistics_block);
522 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
523 &bp->status_blk_mapping);
524 if (bp->status_blk == NULL)
527 memset(bp->status_blk, 0, bp->status_stats_size);
529 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
532 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536 if (bp->ctx_pages == 0)
538 for (i = 0; i < bp->ctx_pages; i++) {
539 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
541 &bp->ctx_blk_mapping[i]);
542 if (bp->ctx_blk[i] == NULL)
554 bnx2_report_fw_link(struct bnx2 *bp)
556 u32 fw_link_status = 0;
558 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
564 switch (bp->line_speed) {
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_10HALF;
569 fw_link_status = BNX2_LINK_STATUS_10FULL;
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_100HALF;
575 fw_link_status = BNX2_LINK_STATUS_100FULL;
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_1000HALF;
581 fw_link_status = BNX2_LINK_STATUS_1000FULL;
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_2500HALF;
587 fw_link_status = BNX2_LINK_STATUS_2500FULL;
591 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
594 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
596 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
599 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
603 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
607 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
609 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
613 bnx2_xceiver_str(struct bnx2 *bp)
615 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
621 bnx2_report_link(struct bnx2 *bp)
624 netif_carrier_on(bp->dev);
625 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626 bnx2_xceiver_str(bp));
628 printk("%d Mbps ", bp->line_speed);
630 if (bp->duplex == DUPLEX_FULL)
631 printk("full duplex");
633 printk("half duplex");
636 if (bp->flow_ctrl & FLOW_CTRL_RX) {
637 printk(", receive ");
638 if (bp->flow_ctrl & FLOW_CTRL_TX)
639 printk("& transmit ");
642 printk(", transmit ");
644 printk("flow control ON");
649 netif_carrier_off(bp->dev);
650 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651 bnx2_xceiver_str(bp));
654 bnx2_report_fw_link(bp);
658 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
660 u32 local_adv, remote_adv;
663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
666 if (bp->duplex == DUPLEX_FULL) {
667 bp->flow_ctrl = bp->req_flow_ctrl;
672 if (bp->duplex != DUPLEX_FULL) {
676 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
680 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682 bp->flow_ctrl |= FLOW_CTRL_TX;
683 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684 bp->flow_ctrl |= FLOW_CTRL_RX;
688 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
691 if (bp->phy_flags & PHY_SERDES_FLAG) {
692 u32 new_local_adv = 0;
693 u32 new_remote_adv = 0;
695 if (local_adv & ADVERTISE_1000XPAUSE)
696 new_local_adv |= ADVERTISE_PAUSE_CAP;
697 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698 new_local_adv |= ADVERTISE_PAUSE_ASYM;
699 if (remote_adv & ADVERTISE_1000XPAUSE)
700 new_remote_adv |= ADVERTISE_PAUSE_CAP;
701 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
704 local_adv = new_local_adv;
705 remote_adv = new_remote_adv;
708 /* See Table 28B-3 of 802.3ab-1999 spec. */
709 if (local_adv & ADVERTISE_PAUSE_CAP) {
710 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711 if (remote_adv & ADVERTISE_PAUSE_CAP) {
712 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
714 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715 bp->flow_ctrl = FLOW_CTRL_RX;
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
724 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
728 bp->flow_ctrl = FLOW_CTRL_TX;
734 bnx2_5709s_linkup(struct bnx2 *bp)
740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
744 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745 bp->line_speed = bp->req_line_speed;
746 bp->duplex = bp->req_duplex;
749 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
751 case MII_BNX2_GP_TOP_AN_SPEED_10:
752 bp->line_speed = SPEED_10;
754 case MII_BNX2_GP_TOP_AN_SPEED_100:
755 bp->line_speed = SPEED_100;
757 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759 bp->line_speed = SPEED_1000;
761 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762 bp->line_speed = SPEED_2500;
765 if (val & MII_BNX2_GP_TOP_AN_FD)
766 bp->duplex = DUPLEX_FULL;
768 bp->duplex = DUPLEX_HALF;
773 bnx2_5708s_linkup(struct bnx2 *bp)
778 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780 case BCM5708S_1000X_STAT1_SPEED_10:
781 bp->line_speed = SPEED_10;
783 case BCM5708S_1000X_STAT1_SPEED_100:
784 bp->line_speed = SPEED_100;
786 case BCM5708S_1000X_STAT1_SPEED_1G:
787 bp->line_speed = SPEED_1000;
789 case BCM5708S_1000X_STAT1_SPEED_2G5:
790 bp->line_speed = SPEED_2500;
793 if (val & BCM5708S_1000X_STAT1_FD)
794 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
802 bnx2_5706s_linkup(struct bnx2 *bp)
804 u32 bmcr, local_adv, remote_adv, common;
807 bp->line_speed = SPEED_1000;
809 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
810 if (bmcr & BMCR_FULLDPLX) {
811 bp->duplex = DUPLEX_FULL;
814 bp->duplex = DUPLEX_HALF;
817 if (!(bmcr & BMCR_ANENABLE)) {
821 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
824 common = local_adv & remote_adv;
825 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
827 if (common & ADVERTISE_1000XFULL) {
828 bp->duplex = DUPLEX_FULL;
831 bp->duplex = DUPLEX_HALF;
839 bnx2_copper_linkup(struct bnx2 *bp)
843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
844 if (bmcr & BMCR_ANENABLE) {
845 u32 local_adv, remote_adv, common;
847 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
850 common = local_adv & (remote_adv >> 2);
851 if (common & ADVERTISE_1000FULL) {
852 bp->line_speed = SPEED_1000;
853 bp->duplex = DUPLEX_FULL;
855 else if (common & ADVERTISE_1000HALF) {
856 bp->line_speed = SPEED_1000;
857 bp->duplex = DUPLEX_HALF;
860 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863 common = local_adv & remote_adv;
864 if (common & ADVERTISE_100FULL) {
865 bp->line_speed = SPEED_100;
866 bp->duplex = DUPLEX_FULL;
868 else if (common & ADVERTISE_100HALF) {
869 bp->line_speed = SPEED_100;
870 bp->duplex = DUPLEX_HALF;
872 else if (common & ADVERTISE_10FULL) {
873 bp->line_speed = SPEED_10;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_10HALF) {
877 bp->line_speed = SPEED_10;
878 bp->duplex = DUPLEX_HALF;
887 if (bmcr & BMCR_SPEED100) {
888 bp->line_speed = SPEED_100;
891 bp->line_speed = SPEED_10;
893 if (bmcr & BMCR_FULLDPLX) {
894 bp->duplex = DUPLEX_FULL;
897 bp->duplex = DUPLEX_HALF;
905 bnx2_set_mac_link(struct bnx2 *bp)
909 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911 (bp->duplex == DUPLEX_HALF)) {
912 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
915 /* Configure the EMAC mode register. */
916 val = REG_RD(bp, BNX2_EMAC_MODE);
918 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
919 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
920 BNX2_EMAC_MODE_25G_MODE);
923 switch (bp->line_speed) {
925 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926 val |= BNX2_EMAC_MODE_PORT_MII_10M;
931 val |= BNX2_EMAC_MODE_PORT_MII;
934 val |= BNX2_EMAC_MODE_25G_MODE;
937 val |= BNX2_EMAC_MODE_PORT_GMII;
942 val |= BNX2_EMAC_MODE_PORT_GMII;
945 /* Set the MAC to operate in the appropriate duplex mode. */
946 if (bp->duplex == DUPLEX_HALF)
947 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948 REG_WR(bp, BNX2_EMAC_MODE, val);
950 /* Enable/disable rx PAUSE. */
951 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
953 if (bp->flow_ctrl & FLOW_CTRL_RX)
954 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
957 /* Enable/disable tx PAUSE. */
958 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_TX)
962 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
965 /* Acknowledge the interrupt. */
966 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
972 bnx2_enable_bmsr1(struct bnx2 *bp)
974 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975 (CHIP_NUM(bp) == CHIP_NUM_5709))
976 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977 MII_BNX2_BLK_ADDR_GP_STATUS);
981 bnx2_disable_bmsr1(struct bnx2 *bp)
983 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984 (CHIP_NUM(bp) == CHIP_NUM_5709))
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
990 bnx2_test_and_enable_2g5(struct bnx2 *bp)
995 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
998 if (bp->autoneg & AUTONEG_SPEED)
999 bp->advertising |= ADVERTISED_2500baseX_Full;
1001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1004 bnx2_read_phy(bp, bp->mii_up1, &up1);
1005 if (!(up1 & BCM5708S_UP1_2G5)) {
1006 up1 |= BCM5708S_UP1_2G5;
1007 bnx2_write_phy(bp, bp->mii_up1, up1);
1011 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1019 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1027 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1030 bnx2_read_phy(bp, bp->mii_up1, &up1);
1031 if (up1 & BCM5708S_UP1_2G5) {
1032 up1 &= ~BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1045 bnx2_enable_forced_2g5(struct bnx2 *bp)
1049 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1052 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_SERDES_DIG);
1057 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1062 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1066 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068 bmcr |= BCM5708S_BMCR_FORCE_2500;
1071 if (bp->autoneg & AUTONEG_SPEED) {
1072 bmcr &= ~BMCR_ANENABLE;
1073 if (bp->req_duplex == DUPLEX_FULL)
1074 bmcr |= BMCR_FULLDPLX;
1076 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1080 bnx2_disable_forced_2g5(struct bnx2 *bp)
1084 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1087 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_SERDES_DIG);
1092 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1100 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1105 if (bp->autoneg & AUTONEG_SPEED)
1106 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1111 bnx2_set_link(struct bnx2 *bp)
1116 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1121 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1124 link_up = bp->link_up;
1126 bnx2_enable_bmsr1(bp);
1127 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129 bnx2_disable_bmsr1(bp);
1131 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1135 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136 if (val & BNX2_EMAC_STATUS_LINK)
1137 bmsr |= BMSR_LSTATUS;
1139 bmsr &= ~BMSR_LSTATUS;
1142 if (bmsr & BMSR_LSTATUS) {
1145 if (bp->phy_flags & PHY_SERDES_FLAG) {
1146 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147 bnx2_5706s_linkup(bp);
1148 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149 bnx2_5708s_linkup(bp);
1150 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151 bnx2_5709s_linkup(bp);
1154 bnx2_copper_linkup(bp);
1156 bnx2_resolve_flow_ctrl(bp);
1159 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1160 (bp->autoneg & AUTONEG_SPEED))
1161 bnx2_disable_forced_2g5(bp);
1163 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1167 if (bp->link_up != link_up) {
1168 bnx2_report_link(bp);
1171 bnx2_set_mac_link(bp);
1177 bnx2_reset_phy(struct bnx2 *bp)
1182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1184 #define PHY_RESET_MAX_WAIT 100
1185 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1188 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1189 if (!(reg & BMCR_RESET)) {
1194 if (i == PHY_RESET_MAX_WAIT) {
1201 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1205 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1208 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209 adv = ADVERTISE_1000XPAUSE;
1212 adv = ADVERTISE_PAUSE_CAP;
1215 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPSE_ASYM;
1220 adv = ADVERTISE_PAUSE_ASYM;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1234 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1237 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1239 u32 speed_arg = 0, pause_adv;
1241 pause_adv = bnx2_phy_get_pause_adv(bp);
1243 if (bp->autoneg & AUTONEG_SPEED) {
1244 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245 if (bp->advertising & ADVERTISED_10baseT_Half)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247 if (bp->advertising & ADVERTISED_10baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249 if (bp->advertising & ADVERTISED_100baseT_Half)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251 if (bp->advertising & ADVERTISED_100baseT_Full)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1258 if (bp->req_line_speed == SPEED_2500)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260 else if (bp->req_line_speed == SPEED_1000)
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262 else if (bp->req_line_speed == SPEED_100) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267 } else if (bp->req_line_speed == SPEED_10) {
1268 if (bp->req_duplex == DUPLEX_FULL)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1271 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1275 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1280 if (port == PORT_TP)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1286 spin_unlock_bh(&bp->phy_lock);
1287 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288 spin_lock_bh(&bp->phy_lock);
1294 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1299 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300 return (bnx2_setup_remote_phy(bp, port));
1302 if (!(bp->autoneg & AUTONEG_SPEED)) {
1304 int force_link_down = 0;
1306 if (bp->req_line_speed == SPEED_2500) {
1307 if (!bnx2_test_and_enable_2g5(bp))
1308 force_link_down = 1;
1309 } else if (bp->req_line_speed == SPEED_1000) {
1310 if (bnx2_test_and_disable_2g5(bp))
1311 force_link_down = 1;
1313 bnx2_read_phy(bp, bp->mii_adv, &adv);
1314 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1317 new_bmcr = bmcr & ~BMCR_ANENABLE;
1318 new_bmcr |= BMCR_SPEED1000;
1320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321 if (bp->req_line_speed == SPEED_2500)
1322 bnx2_enable_forced_2g5(bp);
1323 else if (bp->req_line_speed == SPEED_1000) {
1324 bnx2_disable_forced_2g5(bp);
1325 new_bmcr &= ~0x2000;
1328 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1332 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1335 if (bp->req_duplex == DUPLEX_FULL) {
1336 adv |= ADVERTISE_1000XFULL;
1337 new_bmcr |= BMCR_FULLDPLX;
1340 adv |= ADVERTISE_1000XHALF;
1341 new_bmcr &= ~BMCR_FULLDPLX;
1343 if ((new_bmcr != bmcr) || (force_link_down)) {
1344 /* Force a link down visible on the other side */
1346 bnx2_write_phy(bp, bp->mii_adv, adv &
1347 ~(ADVERTISE_1000XFULL |
1348 ADVERTISE_1000XHALF));
1349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1350 BMCR_ANRESTART | BMCR_ANENABLE);
1353 netif_carrier_off(bp->dev);
1354 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355 bnx2_report_link(bp);
1357 bnx2_write_phy(bp, bp->mii_adv, adv);
1358 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1360 bnx2_resolve_flow_ctrl(bp);
1361 bnx2_set_mac_link(bp);
1366 bnx2_test_and_enable_2g5(bp);
1368 if (bp->advertising & ADVERTISED_1000baseT_Full)
1369 new_adv |= ADVERTISE_1000XFULL;
1371 new_adv |= bnx2_phy_get_pause_adv(bp);
1373 bnx2_read_phy(bp, bp->mii_adv, &adv);
1374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1376 bp->serdes_an_pending = 0;
1377 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378 /* Force a link down visible on the other side */
1380 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1381 spin_unlock_bh(&bp->phy_lock);
1383 spin_lock_bh(&bp->phy_lock);
1386 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1389 /* Speed up link-up time when the link partner
1390 * does not autonegotiate which is very common
1391 * in blade servers. Some blade servers use
1392 * IPMI for kerboard input and it's important
1393 * to minimize link disruptions. Autoneg. involves
1394 * exchanging base pages plus 3 next pages and
1395 * normally completes in about 120 msec.
1397 bp->current_interval = SERDES_AN_TIMEOUT;
1398 bp->serdes_an_pending = 1;
1399 mod_timer(&bp->timer, jiffies + bp->current_interval);
1401 bnx2_resolve_flow_ctrl(bp);
1402 bnx2_set_mac_link(bp);
1408 #define ETHTOOL_ALL_FIBRE_SPEED \
1409 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1410 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411 (ADVERTISED_1000baseT_Full)
1413 #define ETHTOOL_ALL_COPPER_SPEED \
1414 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1415 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1416 ADVERTISED_1000baseT_Full)
1418 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1421 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1424 bnx2_set_default_remote_link(struct bnx2 *bp)
1428 if (bp->phy_port == PORT_TP)
1429 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1431 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1433 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434 bp->req_line_speed = 0;
1435 bp->autoneg |= AUTONEG_SPEED;
1436 bp->advertising = ADVERTISED_Autoneg;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438 bp->advertising |= ADVERTISED_10baseT_Half;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440 bp->advertising |= ADVERTISED_10baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442 bp->advertising |= ADVERTISED_100baseT_Half;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444 bp->advertising |= ADVERTISED_100baseT_Full;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446 bp->advertising |= ADVERTISED_1000baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448 bp->advertising |= ADVERTISED_2500baseX_Full;
1451 bp->advertising = 0;
1452 bp->req_duplex = DUPLEX_FULL;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454 bp->req_line_speed = SPEED_10;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459 bp->req_line_speed = SPEED_100;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461 bp->req_duplex = DUPLEX_HALF;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464 bp->req_line_speed = SPEED_1000;
1465 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466 bp->req_line_speed = SPEED_2500;
1471 bnx2_set_default_link(struct bnx2 *bp)
1473 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474 return bnx2_set_default_remote_link(bp);
1476 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477 bp->req_line_speed = 0;
1478 if (bp->phy_flags & PHY_SERDES_FLAG) {
1481 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1483 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1487 bp->req_line_speed = bp->line_speed = SPEED_1000;
1488 bp->req_duplex = DUPLEX_FULL;
1491 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1495 bnx2_send_heart_beat(struct bnx2 *bp)
1500 spin_lock(&bp->indirect_lock);
1501 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505 spin_unlock(&bp->indirect_lock);
1509 bnx2_remote_phy_event(struct bnx2 *bp)
1512 u8 link_up = bp->link_up;
1515 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1517 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518 bnx2_send_heart_beat(bp);
1520 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1522 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1528 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529 bp->duplex = DUPLEX_FULL;
1531 case BNX2_LINK_STATUS_10HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_10FULL:
1534 bp->line_speed = SPEED_10;
1536 case BNX2_LINK_STATUS_100HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_100BASE_T4:
1539 case BNX2_LINK_STATUS_100FULL:
1540 bp->line_speed = SPEED_100;
1542 case BNX2_LINK_STATUS_1000HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_1000FULL:
1545 bp->line_speed = SPEED_1000;
1547 case BNX2_LINK_STATUS_2500HALF:
1548 bp->duplex = DUPLEX_HALF;
1549 case BNX2_LINK_STATUS_2500FULL:
1550 bp->line_speed = SPEED_2500;
1557 spin_lock(&bp->phy_lock);
1559 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561 if (bp->duplex == DUPLEX_FULL)
1562 bp->flow_ctrl = bp->req_flow_ctrl;
1564 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565 bp->flow_ctrl |= FLOW_CTRL_TX;
1566 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567 bp->flow_ctrl |= FLOW_CTRL_RX;
1570 old_port = bp->phy_port;
1571 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572 bp->phy_port = PORT_FIBRE;
1574 bp->phy_port = PORT_TP;
1576 if (old_port != bp->phy_port)
1577 bnx2_set_default_link(bp);
1579 spin_unlock(&bp->phy_lock);
1581 if (bp->link_up != link_up)
1582 bnx2_report_link(bp);
1584 bnx2_set_mac_link(bp);
1588 bnx2_set_remote_link(struct bnx2 *bp)
1592 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1594 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595 bnx2_remote_phy_event(bp);
1597 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1599 bnx2_send_heart_beat(bp);
1606 bnx2_setup_copper_phy(struct bnx2 *bp)
1611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1613 if (bp->autoneg & AUTONEG_SPEED) {
1614 u32 adv_reg, adv1000_reg;
1615 u32 new_adv_reg = 0;
1616 u32 new_adv1000_reg = 0;
1618 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1619 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620 ADVERTISE_PAUSE_ASYM);
1622 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623 adv1000_reg &= PHY_ALL_1000_SPEED;
1625 if (bp->advertising & ADVERTISED_10baseT_Half)
1626 new_adv_reg |= ADVERTISE_10HALF;
1627 if (bp->advertising & ADVERTISED_10baseT_Full)
1628 new_adv_reg |= ADVERTISE_10FULL;
1629 if (bp->advertising & ADVERTISED_100baseT_Half)
1630 new_adv_reg |= ADVERTISE_100HALF;
1631 if (bp->advertising & ADVERTISED_100baseT_Full)
1632 new_adv_reg |= ADVERTISE_100FULL;
1633 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634 new_adv1000_reg |= ADVERTISE_1000FULL;
1636 new_adv_reg |= ADVERTISE_CSMA;
1638 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1640 if ((adv1000_reg != new_adv1000_reg) ||
1641 (adv_reg != new_adv_reg) ||
1642 ((bmcr & BMCR_ANENABLE) == 0)) {
1644 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1645 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1649 else if (bp->link_up) {
1650 /* Flow ctrl may have changed from auto to forced */
1651 /* or vice-versa. */
1653 bnx2_resolve_flow_ctrl(bp);
1654 bnx2_set_mac_link(bp);
1660 if (bp->req_line_speed == SPEED_100) {
1661 new_bmcr |= BMCR_SPEED100;
1663 if (bp->req_duplex == DUPLEX_FULL) {
1664 new_bmcr |= BMCR_FULLDPLX;
1666 if (new_bmcr != bmcr) {
1669 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1672 if (bmsr & BMSR_LSTATUS) {
1673 /* Force link down */
1674 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1675 spin_unlock_bh(&bp->phy_lock);
1677 spin_lock_bh(&bp->phy_lock);
1679 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1683 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1685 /* Normally, the new speed is setup after the link has
1686 * gone down and up again. In some cases, link will not go
1687 * down so we need to set up the new speed here.
1689 if (bmsr & BMSR_LSTATUS) {
1690 bp->line_speed = bp->req_line_speed;
1691 bp->duplex = bp->req_duplex;
1692 bnx2_resolve_flow_ctrl(bp);
1693 bnx2_set_mac_link(bp);
1696 bnx2_resolve_flow_ctrl(bp);
1697 bnx2_set_mac_link(bp);
1703 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1705 if (bp->loopback == MAC_LOOPBACK)
1708 if (bp->phy_flags & PHY_SERDES_FLAG) {
1709 return (bnx2_setup_serdes_phy(bp, port));
1712 return (bnx2_setup_copper_phy(bp));
1717 bnx2_init_5709s_phy(struct bnx2 *bp)
1721 bp->mii_bmcr = MII_BMCR + 0x10;
1722 bp->mii_bmsr = MII_BMSR + 0x10;
1723 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724 bp->mii_adv = MII_ADVERTISE + 0x10;
1725 bp->mii_lpa = MII_LPA + 0x10;
1726 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1731 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1736 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1741 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744 val |= BCM5708S_UP1_2G5;
1746 val &= ~BCM5708S_UP1_2G5;
1747 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1756 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1766 bnx2_init_5708s_phy(struct bnx2 *bp)
1772 bp->mii_up1 = BCM5708S_UP1;
1774 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1778 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1782 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1786 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788 val |= BCM5708S_UP1_2G5;
1789 bnx2_write_phy(bp, BCM5708S_UP1, val);
1792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1795 /* increase tx signal amplitude */
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797 BCM5708S_BLK_ADDR_TX_MISC);
1798 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1804 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1805 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1810 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1811 BNX2_SHARED_HW_CFG_CONFIG);
1812 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814 BCM5708S_BLK_ADDR_TX_MISC);
1815 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817 BCM5708S_BLK_ADDR_DIG);
1824 bnx2_init_5706s_phy(struct bnx2 *bp)
1828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1830 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1833 if (bp->dev->mtu > 1500) {
1836 /* Set extended packet length bit */
1837 bnx2_write_phy(bp, 0x18, 0x7);
1838 bnx2_read_phy(bp, 0x18, &val);
1839 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1841 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842 bnx2_read_phy(bp, 0x1c, &val);
1843 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1848 bnx2_write_phy(bp, 0x18, 0x7);
1849 bnx2_read_phy(bp, 0x18, &val);
1850 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1852 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853 bnx2_read_phy(bp, 0x1c, &val);
1854 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1861 bnx2_init_copper_phy(struct bnx2 *bp)
1867 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868 bnx2_write_phy(bp, 0x18, 0x0c00);
1869 bnx2_write_phy(bp, 0x17, 0x000a);
1870 bnx2_write_phy(bp, 0x15, 0x310b);
1871 bnx2_write_phy(bp, 0x17, 0x201f);
1872 bnx2_write_phy(bp, 0x15, 0x9506);
1873 bnx2_write_phy(bp, 0x17, 0x401f);
1874 bnx2_write_phy(bp, 0x15, 0x14e2);
1875 bnx2_write_phy(bp, 0x18, 0x0400);
1878 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880 MII_BNX2_DSP_EXPAND_REG | 0x8);
1881 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1883 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1886 if (bp->dev->mtu > 1500) {
1887 /* Set extended packet length bit */
1888 bnx2_write_phy(bp, 0x18, 0x7);
1889 bnx2_read_phy(bp, 0x18, &val);
1890 bnx2_write_phy(bp, 0x18, val | 0x4000);
1892 bnx2_read_phy(bp, 0x10, &val);
1893 bnx2_write_phy(bp, 0x10, val | 0x1);
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val & ~0x1);
1904 /* ethernet@wirespeed */
1905 bnx2_write_phy(bp, 0x18, 0x7007);
1906 bnx2_read_phy(bp, 0x18, &val);
1907 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1913 bnx2_init_phy(struct bnx2 *bp)
1918 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1921 bp->mii_bmcr = MII_BMCR;
1922 bp->mii_bmsr = MII_BMSR;
1923 bp->mii_bmsr1 = MII_BMSR;
1924 bp->mii_adv = MII_ADVERTISE;
1925 bp->mii_lpa = MII_LPA;
1927 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1932 bnx2_read_phy(bp, MII_PHYSID1, &val);
1933 bp->phy_id = val << 16;
1934 bnx2_read_phy(bp, MII_PHYSID2, &val);
1935 bp->phy_id |= val & 0xffff;
1937 if (bp->phy_flags & PHY_SERDES_FLAG) {
1938 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939 rc = bnx2_init_5706s_phy(bp);
1940 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941 rc = bnx2_init_5708s_phy(bp);
1942 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943 rc = bnx2_init_5709s_phy(bp);
1946 rc = bnx2_init_copper_phy(bp);
1951 rc = bnx2_setup_phy(bp, bp->phy_port);
1957 bnx2_set_mac_loopback(struct bnx2 *bp)
1961 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1969 static int bnx2_test_link(struct bnx2 *);
1972 bnx2_set_phy_loopback(struct bnx2 *bp)
1977 spin_lock_bh(&bp->phy_lock);
1978 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1980 spin_unlock_bh(&bp->phy_lock);
1984 for (i = 0; i < 10; i++) {
1985 if (bnx2_test_link(bp) == 0)
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1993 BNX2_EMAC_MODE_25G_MODE);
1995 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2002 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2008 msg_data |= bp->fw_wr_seq;
2010 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2012 /* wait for an acknowledgement. */
2013 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2016 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2018 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2021 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2024 /* If we timed out, inform the firmware that this is the case. */
2025 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2027 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2030 msg_data &= ~BNX2_DRV_MSG_CODE;
2031 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2033 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2038 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2045 bnx2_init_5709_context(struct bnx2 *bp)
2050 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051 val |= (BCM_PAGE_BITS - 8) << 16;
2052 REG_WR(bp, BNX2_CTX_COMMAND, val);
2053 for (i = 0; i < 10; i++) {
2054 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2059 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2062 for (i = 0; i < bp->ctx_pages; i++) {
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069 (u64) bp->ctx_blk_mapping[i] >> 32);
2070 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072 for (j = 0; j < 10; j++) {
2074 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2079 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2088 bnx2_init_context(struct bnx2 *bp)
2094 u32 vcid_addr, pcid_addr, offset;
2099 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2102 vcid_addr = GET_PCID_ADDR(vcid);
2104 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2109 pcid_addr = GET_PCID_ADDR(new_vcid);
2112 vcid_addr = GET_CID_ADDR(vcid);
2113 pcid_addr = vcid_addr;
2116 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT);
2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0);
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2134 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2140 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141 if (good_mbuf == NULL) {
2142 printk(KERN_ERR PFX "Failed to allocate memory in "
2143 "bnx2_alloc_bad_rbuf\n");
2147 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2152 /* Allocate a bunch of mbufs and save the good ones in an array. */
2153 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2157 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2159 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2161 /* The addresses with Bit 9 set are bad memory blocks. */
2162 if (!(val & (1 << 9))) {
2163 good_mbuf[good_mbuf_cnt] = (u16) val;
2167 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2170 /* Free the good ones back to the mbuf pool thus discarding
2171 * all the bad ones. */
2172 while (good_mbuf_cnt) {
2175 val = good_mbuf[good_mbuf_cnt];
2176 val = (val << 9) | val | 1;
2178 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2185 bnx2_set_mac_addr(struct bnx2 *bp)
2188 u8 *mac_addr = bp->dev->dev_addr;
2190 val = (mac_addr[0] << 8) | mac_addr[1];
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2194 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2195 (mac_addr[4] << 8) | mac_addr[5];
2197 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2201 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2203 struct sk_buff *skb;
2204 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2206 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2207 unsigned long align;
2209 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2214 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215 skb_reserve(skb, BNX2_RX_ALIGN - align);
2217 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218 PCI_DMA_FROMDEVICE);
2221 pci_unmap_addr_set(rx_buf, mapping, mapping);
2223 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2226 bp->rx_prod_bseq += bp->rx_buf_use_size;
2232 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2234 struct status_block *sblk = bp->status_blk;
2235 u32 new_link_state, old_link_state;
2238 new_link_state = sblk->status_attn_bits & event;
2239 old_link_state = sblk->status_attn_bits_ack & event;
2240 if (new_link_state != old_link_state) {
2242 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2244 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2252 bnx2_phy_int(struct bnx2 *bp)
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255 spin_lock(&bp->phy_lock);
2257 spin_unlock(&bp->phy_lock);
2259 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260 bnx2_set_remote_link(bp);
2265 bnx2_tx_int(struct bnx2 *bp)
2267 struct status_block *sblk = bp->status_blk;
2268 u16 hw_cons, sw_cons, sw_ring_cons;
2271 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2272 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2275 sw_cons = bp->tx_cons;
2277 while (sw_cons != hw_cons) {
2278 struct sw_bd *tx_buf;
2279 struct sk_buff *skb;
2282 sw_ring_cons = TX_RING_IDX(sw_cons);
2284 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2287 /* partial BD completions possible with TSO packets */
2288 if (skb_is_gso(skb)) {
2289 u16 last_idx, last_ring_idx;
2291 last_idx = sw_cons +
2292 skb_shinfo(skb)->nr_frags + 1;
2293 last_ring_idx = sw_ring_cons +
2294 skb_shinfo(skb)->nr_frags + 1;
2295 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2298 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2303 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304 skb_headlen(skb), PCI_DMA_TODEVICE);
2307 last = skb_shinfo(skb)->nr_frags;
2309 for (i = 0; i < last; i++) {
2310 sw_cons = NEXT_TX_BD(sw_cons);
2312 pci_unmap_page(bp->pdev,
2314 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2316 skb_shinfo(skb)->frags[i].size,
2320 sw_cons = NEXT_TX_BD(sw_cons);
2322 tx_free_bd += last + 1;
2326 hw_cons = bp->hw_tx_cons =
2327 sblk->status_tx_quick_consumer_index0;
2329 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2334 bp->tx_cons = sw_cons;
2335 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336 * before checking for netif_queue_stopped(). Without the
2337 * memory barrier, there is a small possibility that bnx2_start_xmit()
2338 * will miss it and cause the queue to be stopped forever.
2342 if (unlikely(netif_queue_stopped(bp->dev)) &&
2343 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344 netif_tx_lock(bp->dev);
2345 if ((netif_queue_stopped(bp->dev)) &&
2346 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2347 netif_wake_queue(bp->dev);
2348 netif_tx_unlock(bp->dev);
2353 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2356 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357 struct rx_bd *cons_bd, *prod_bd;
2359 cons_rx_buf = &bp->rx_buf_ring[cons];
2360 prod_rx_buf = &bp->rx_buf_ring[prod];
2362 pci_dma_sync_single_for_device(bp->pdev,
2363 pci_unmap_addr(cons_rx_buf, mapping),
2364 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2366 bp->rx_prod_bseq += bp->rx_buf_use_size;
2368 prod_rx_buf->skb = skb;
2373 pci_unmap_addr_set(prod_rx_buf, mapping,
2374 pci_unmap_addr(cons_rx_buf, mapping));
2376 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2378 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2383 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2384 dma_addr_t dma_addr, u32 ring_idx)
2387 u16 prod = ring_idx & 0xffff;
2389 err = bnx2_alloc_rx_skb(bp, prod);
2390 if (unlikely(err)) {
2391 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2395 skb_reserve(skb, bp->rx_offset);
2396 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2397 PCI_DMA_FROMDEVICE);
2404 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2406 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2408 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2414 bnx2_rx_int(struct bnx2 *bp, int budget)
2416 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2417 struct l2_fhdr *rx_hdr;
2420 hw_cons = bnx2_get_hw_rx_cons(bp);
2421 sw_cons = bp->rx_cons;
2422 sw_prod = bp->rx_prod;
2424 /* Memory barrier necessary as speculative reads of the rx
2425 * buffer can be ahead of the index in the status block
2428 while (sw_cons != hw_cons) {
2431 struct sw_bd *rx_buf;
2432 struct sk_buff *skb;
2433 dma_addr_t dma_addr;
2435 sw_ring_cons = RX_RING_IDX(sw_cons);
2436 sw_ring_prod = RX_RING_IDX(sw_prod);
2438 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2443 dma_addr = pci_unmap_addr(rx_buf, mapping);
2445 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2446 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2448 rx_hdr = (struct l2_fhdr *) skb->data;
2449 len = rx_hdr->l2_fhdr_pkt_len - 4;
2451 if ((status = rx_hdr->l2_fhdr_status) &
2452 (L2_FHDR_ERRORS_BAD_CRC |
2453 L2_FHDR_ERRORS_PHY_DECODE |
2454 L2_FHDR_ERRORS_ALIGNMENT |
2455 L2_FHDR_ERRORS_TOO_SHORT |
2456 L2_FHDR_ERRORS_GIANT_FRAME)) {
2458 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2462 /* Since we don't have a jumbo ring, copy small packets
2465 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2466 struct sk_buff *new_skb;
2468 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2469 if (new_skb == NULL) {
2470 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2476 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2477 new_skb->data, len + 2);
2478 skb_reserve(new_skb, 2);
2479 skb_put(new_skb, len);
2481 bnx2_reuse_rx_skb(bp, skb,
2482 sw_ring_cons, sw_ring_prod);
2485 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2486 (sw_ring_cons << 16) | sw_ring_prod)))
2489 skb->protocol = eth_type_trans(skb, bp->dev);
2491 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2492 (ntohs(skb->protocol) != 0x8100)) {
2499 skb->ip_summed = CHECKSUM_NONE;
2501 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2502 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2504 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2505 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2506 skb->ip_summed = CHECKSUM_UNNECESSARY;
2510 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2511 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2512 rx_hdr->l2_fhdr_vlan_tag);
2516 netif_receive_skb(skb);
2518 bp->dev->last_rx = jiffies;
2522 sw_cons = NEXT_RX_BD(sw_cons);
2523 sw_prod = NEXT_RX_BD(sw_prod);
2525 if ((rx_pkt == budget))
2528 /* Refresh hw_cons to see if there is new work */
2529 if (sw_cons == hw_cons) {
2530 hw_cons = bnx2_get_hw_rx_cons(bp);
2534 bp->rx_cons = sw_cons;
2535 bp->rx_prod = sw_prod;
2537 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2539 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2547 /* MSI ISR - The only difference between this and the INTx ISR
2548 * is that the MSI interrupt is always serviced.
2551 bnx2_msi(int irq, void *dev_instance)
2553 struct net_device *dev = dev_instance;
2554 struct bnx2 *bp = netdev_priv(dev);
2556 prefetch(bp->status_blk);
2557 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2558 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2559 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2561 /* Return here if interrupt is disabled. */
2562 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2565 netif_rx_schedule(dev, &bp->napi);
2571 bnx2_msi_1shot(int irq, void *dev_instance)
2573 struct net_device *dev = dev_instance;
2574 struct bnx2 *bp = netdev_priv(dev);
2576 prefetch(bp->status_blk);
2578 /* Return here if interrupt is disabled. */
2579 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2582 netif_rx_schedule(dev, &bp->napi);
2588 bnx2_interrupt(int irq, void *dev_instance)
2590 struct net_device *dev = dev_instance;
2591 struct bnx2 *bp = netdev_priv(dev);
2592 struct status_block *sblk = bp->status_blk;
2594 /* When using INTx, it is possible for the interrupt to arrive
2595 * at the CPU before the status block posted prior to the
2596 * interrupt. Reading a register will flush the status block.
2597 * When using MSI, the MSI message will always complete after
2598 * the status block write.
2600 if ((sblk->status_idx == bp->last_status_idx) &&
2601 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2602 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2605 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2606 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2607 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2609 /* Read back to deassert IRQ immediately to avoid too many
2610 * spurious interrupts.
2612 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2614 /* Return here if interrupt is shared and is disabled. */
2615 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2618 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2619 bp->last_status_idx = sblk->status_idx;
2620 __netif_rx_schedule(dev, &bp->napi);
2626 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2627 STATUS_ATTN_BITS_TIMER_ABORT)
2630 bnx2_has_work(struct bnx2 *bp)
2632 struct status_block *sblk = bp->status_blk;
2634 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2635 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2638 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2639 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2645 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2647 struct status_block *sblk = bp->status_blk;
2648 u32 status_attn_bits = sblk->status_attn_bits;
2649 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2651 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2652 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2656 /* This is needed to take care of transient status
2657 * during link changes.
2659 REG_WR(bp, BNX2_HC_COMMAND,
2660 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2661 REG_RD(bp, BNX2_HC_COMMAND);
2664 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2667 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2668 work_done += bnx2_rx_int(bp, budget - work_done);
2673 static int bnx2_poll(struct napi_struct *napi, int budget)
2675 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2677 struct status_block *sblk = bp->status_blk;
2680 work_done = bnx2_poll_work(bp, work_done, budget);
2682 if (unlikely(work_done >= budget))
2685 /* bp->last_status_idx is used below to tell the hw how
2686 * much work has been processed, so we must read it before
2687 * checking for more work.
2689 bp->last_status_idx = sblk->status_idx;
2691 if (likely(!bnx2_has_work(bp))) {
2692 netif_rx_complete(bp->dev, napi);
2693 if (likely(bp->flags & USING_MSI_FLAG)) {
2694 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2695 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2696 bp->last_status_idx);
2699 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2700 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2701 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2702 bp->last_status_idx);
2704 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2705 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2706 bp->last_status_idx);
2714 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2715 * from set_multicast.
2718 bnx2_set_rx_mode(struct net_device *dev)
2720 struct bnx2 *bp = netdev_priv(dev);
2721 u32 rx_mode, sort_mode;
2724 spin_lock_bh(&bp->phy_lock);
2726 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2727 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2728 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2730 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2731 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2733 if (!(bp->flags & ASF_ENABLE_FLAG))
2734 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2736 if (dev->flags & IFF_PROMISC) {
2737 /* Promiscuous mode. */
2738 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2739 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2740 BNX2_RPM_SORT_USER0_PROM_VLAN;
2742 else if (dev->flags & IFF_ALLMULTI) {
2743 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2744 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2747 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2750 /* Accept one or more multicast(s). */
2751 struct dev_mc_list *mclist;
2752 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2757 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2759 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2760 i++, mclist = mclist->next) {
2762 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2764 regidx = (bit & 0xe0) >> 5;
2766 mc_filter[regidx] |= (1 << bit);
2769 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2770 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2774 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2777 if (rx_mode != bp->rx_mode) {
2778 bp->rx_mode = rx_mode;
2779 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2782 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2783 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2784 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2786 spin_unlock_bh(&bp->phy_lock);
2790 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2797 for (i = 0; i < rv2p_code_len; i += 8) {
2798 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2800 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2803 if (rv2p_proc == RV2P_PROC1) {
2804 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2805 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2808 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2809 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2813 /* Reset the processor, un-stall is done later. */
2814 if (rv2p_proc == RV2P_PROC1) {
2815 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2818 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2823 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2830 val = REG_RD_IND(bp, cpu_reg->mode);
2831 val |= cpu_reg->mode_value_halt;
2832 REG_WR_IND(bp, cpu_reg->mode, val);
2833 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2835 /* Load the Text area. */
2836 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2840 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2845 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2846 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2850 /* Load the Data area. */
2851 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2855 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2856 REG_WR_IND(bp, offset, fw->data[j]);
2860 /* Load the SBSS area. */
2861 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2865 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2866 REG_WR_IND(bp, offset, 0);
2870 /* Load the BSS area. */
2871 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2875 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2876 REG_WR_IND(bp, offset, 0);
2880 /* Load the Read-Only area. */
2881 offset = cpu_reg->spad_base +
2882 (fw->rodata_addr - cpu_reg->mips_view_base);
2886 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2887 REG_WR_IND(bp, offset, fw->rodata[j]);
2891 /* Clear the pre-fetch instruction. */
2892 REG_WR_IND(bp, cpu_reg->inst, 0);
2893 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2895 /* Start the CPU. */
2896 val = REG_RD_IND(bp, cpu_reg->mode);
2897 val &= ~cpu_reg->mode_value_halt;
2898 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2899 REG_WR_IND(bp, cpu_reg->mode, val);
2905 bnx2_init_cpus(struct bnx2 *bp)
2907 struct cpu_reg cpu_reg;
2912 /* Initialize the RV2P processor. */
2913 text = vmalloc(FW_BUF_SIZE);
2916 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2920 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2922 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2926 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2928 /* Initialize the RX Processor. */
2929 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2930 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2931 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2932 cpu_reg.state = BNX2_RXP_CPU_STATE;
2933 cpu_reg.state_value_clear = 0xffffff;
2934 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2935 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2936 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2937 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2938 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2939 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2940 cpu_reg.mips_view_base = 0x8000000;
2942 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2943 fw = &bnx2_rxp_fw_09;
2945 fw = &bnx2_rxp_fw_06;
2948 rc = load_cpu_fw(bp, &cpu_reg, fw);
2952 /* Initialize the TX Processor. */
2953 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2954 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2955 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2956 cpu_reg.state = BNX2_TXP_CPU_STATE;
2957 cpu_reg.state_value_clear = 0xffffff;
2958 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2959 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2960 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2961 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2962 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2963 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2964 cpu_reg.mips_view_base = 0x8000000;
2966 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2967 fw = &bnx2_txp_fw_09;
2969 fw = &bnx2_txp_fw_06;
2972 rc = load_cpu_fw(bp, &cpu_reg, fw);
2976 /* Initialize the TX Patch-up Processor. */
2977 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2978 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2979 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2980 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2981 cpu_reg.state_value_clear = 0xffffff;
2982 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2983 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2984 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2985 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2986 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2987 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2988 cpu_reg.mips_view_base = 0x8000000;
2990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2991 fw = &bnx2_tpat_fw_09;
2993 fw = &bnx2_tpat_fw_06;
2996 rc = load_cpu_fw(bp, &cpu_reg, fw);
3000 /* Initialize the Completion Processor. */
3001 cpu_reg.mode = BNX2_COM_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_COM_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
3014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_com_fw_09;
3017 fw = &bnx2_com_fw_06;
3020 rc = load_cpu_fw(bp, &cpu_reg, fw);
3024 /* Initialize the Command Processor. */
3025 cpu_reg.mode = BNX2_CP_CPU_MODE;
3026 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3027 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3028 cpu_reg.state = BNX2_CP_CPU_STATE;
3029 cpu_reg.state_value_clear = 0xffffff;
3030 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3031 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3032 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3033 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3034 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3035 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3036 cpu_reg.mips_view_base = 0x8000000;
3038 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3039 fw = &bnx2_cp_fw_09;
3042 rc = load_cpu_fw(bp, &cpu_reg, fw);
3052 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3056 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3062 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3063 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3064 PCI_PM_CTRL_PME_STATUS);
3066 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3067 /* delay required during transition out of D3hot */
3070 val = REG_RD(bp, BNX2_EMAC_MODE);
3071 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3072 val &= ~BNX2_EMAC_MODE_MPKT;
3073 REG_WR(bp, BNX2_EMAC_MODE, val);
3075 val = REG_RD(bp, BNX2_RPM_CONFIG);
3076 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3077 REG_WR(bp, BNX2_RPM_CONFIG, val);
3088 autoneg = bp->autoneg;
3089 advertising = bp->advertising;
3091 if (bp->phy_port == PORT_TP) {
3092 bp->autoneg = AUTONEG_SPEED;
3093 bp->advertising = ADVERTISED_10baseT_Half |
3094 ADVERTISED_10baseT_Full |
3095 ADVERTISED_100baseT_Half |
3096 ADVERTISED_100baseT_Full |
3100 spin_lock_bh(&bp->phy_lock);
3101 bnx2_setup_phy(bp, bp->phy_port);
3102 spin_unlock_bh(&bp->phy_lock);
3104 bp->autoneg = autoneg;
3105 bp->advertising = advertising;
3107 bnx2_set_mac_addr(bp);
3109 val = REG_RD(bp, BNX2_EMAC_MODE);
3111 /* Enable port mode. */
3112 val &= ~BNX2_EMAC_MODE_PORT;
3113 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3114 BNX2_EMAC_MODE_ACPI_RCVD |
3115 BNX2_EMAC_MODE_MPKT;
3116 if (bp->phy_port == PORT_TP)
3117 val |= BNX2_EMAC_MODE_PORT_MII;
3119 val |= BNX2_EMAC_MODE_PORT_GMII;
3120 if (bp->line_speed == SPEED_2500)
3121 val |= BNX2_EMAC_MODE_25G_MODE;
3124 REG_WR(bp, BNX2_EMAC_MODE, val);
3126 /* receive all multicast */
3127 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3128 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3131 REG_WR(bp, BNX2_EMAC_RX_MODE,
3132 BNX2_EMAC_RX_MODE_SORT_MODE);
3134 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3135 BNX2_RPM_SORT_USER0_MC_EN;
3136 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3137 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3138 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3139 BNX2_RPM_SORT_USER0_ENA);
3141 /* Need to enable EMAC and RPM for WOL. */
3142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3143 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3144 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3145 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3147 val = REG_RD(bp, BNX2_RPM_CONFIG);
3148 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3149 REG_WR(bp, BNX2_RPM_CONFIG, val);
3151 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3154 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3157 if (!(bp->flags & NO_WOL_FLAG))
3158 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3160 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3161 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3162 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3171 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3173 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3176 /* No more memory access after this point until
3177 * device is brought back to D0.
3189 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3194 /* Request access to the flash interface. */
3195 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3196 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3197 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3198 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3204 if (j >= NVRAM_TIMEOUT_COUNT)
3211 bnx2_release_nvram_lock(struct bnx2 *bp)
3216 /* Relinquish nvram interface. */
3217 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3219 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3220 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3221 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3227 if (j >= NVRAM_TIMEOUT_COUNT)
3235 bnx2_enable_nvram_write(struct bnx2 *bp)
3239 val = REG_RD(bp, BNX2_MISC_CFG);
3240 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3242 if (bp->flash_info->flags & BNX2_NV_WREN) {
3245 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3246 REG_WR(bp, BNX2_NVM_COMMAND,
3247 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3252 val = REG_RD(bp, BNX2_NVM_COMMAND);
3253 if (val & BNX2_NVM_COMMAND_DONE)
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3264 bnx2_disable_nvram_write(struct bnx2 *bp)
3268 val = REG_RD(bp, BNX2_MISC_CFG);
3269 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3274 bnx2_enable_nvram_access(struct bnx2 *bp)
3278 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3279 /* Enable both bits, even on read. */
3280 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3281 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3285 bnx2_disable_nvram_access(struct bnx2 *bp)
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Disable both bits, even after read. */
3291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3292 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3293 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3297 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3302 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3303 /* Buffered flash, no erase needed */
3306 /* Build an erase command */
3307 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3308 BNX2_NVM_COMMAND_DOIT;
3310 /* Need to clear DONE bit separately. */
3311 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3313 /* Address of the NVRAM to read from. */
3314 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3316 /* Issue an erase command. */
3317 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3319 /* Wait for completion. */
3320 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3325 val = REG_RD(bp, BNX2_NVM_COMMAND);
3326 if (val & BNX2_NVM_COMMAND_DONE)
3330 if (j >= NVRAM_TIMEOUT_COUNT)
3337 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3342 /* Build the command word. */
3343 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3345 /* Calculate an offset of a buffered flash, not needed for 5709. */
3346 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3347 offset = ((offset / bp->flash_info->page_size) <<
3348 bp->flash_info->page_bits) +
3349 (offset % bp->flash_info->page_size);
3352 /* Need to clear DONE bit separately. */
3353 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3355 /* Address of the NVRAM to read from. */
3356 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3358 /* Issue a read command. */
3359 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3361 /* Wait for completion. */
3362 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3367 val = REG_RD(bp, BNX2_NVM_COMMAND);
3368 if (val & BNX2_NVM_COMMAND_DONE) {
3369 val = REG_RD(bp, BNX2_NVM_READ);
3371 val = be32_to_cpu(val);
3372 memcpy(ret_val, &val, 4);
3376 if (j >= NVRAM_TIMEOUT_COUNT)
3384 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3389 /* Build the command word. */
3390 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3392 /* Calculate an offset of a buffered flash, not needed for 5709. */
3393 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3394 offset = ((offset / bp->flash_info->page_size) <<
3395 bp->flash_info->page_bits) +
3396 (offset % bp->flash_info->page_size);
3399 /* Need to clear DONE bit separately. */
3400 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3402 memcpy(&val32, val, 4);
3403 val32 = cpu_to_be32(val32);
3405 /* Write the data. */
3406 REG_WR(bp, BNX2_NVM_WRITE, val32);
3408 /* Address of the NVRAM to write to. */
3409 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3411 /* Issue the write command. */
3412 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3414 /* Wait for completion. */
3415 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3418 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3421 if (j >= NVRAM_TIMEOUT_COUNT)
3428 bnx2_init_nvram(struct bnx2 *bp)
3431 int j, entry_count, rc = 0;
3432 struct flash_spec *flash;
3434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3435 bp->flash_info = &flash_5709;
3436 goto get_flash_size;
3439 /* Determine the selected interface. */
3440 val = REG_RD(bp, BNX2_NVM_CFG1);
3442 entry_count = ARRAY_SIZE(flash_table);
3444 if (val & 0x40000000) {
3446 /* Flash interface has been reconfigured */
3447 for (j = 0, flash = &flash_table[0]; j < entry_count;
3449 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3450 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3451 bp->flash_info = flash;
3458 /* Not yet been reconfigured */
3460 if (val & (1 << 23))
3461 mask = FLASH_BACKUP_STRAP_MASK;
3463 mask = FLASH_STRAP_MASK;
3465 for (j = 0, flash = &flash_table[0]; j < entry_count;
3468 if ((val & mask) == (flash->strapping & mask)) {
3469 bp->flash_info = flash;
3471 /* Request access to the flash interface. */
3472 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3475 /* Enable access to flash interface */
3476 bnx2_enable_nvram_access(bp);
3478 /* Reconfigure the flash interface */
3479 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3480 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3481 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3482 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3484 /* Disable access to flash interface */
3485 bnx2_disable_nvram_access(bp);
3486 bnx2_release_nvram_lock(bp);
3491 } /* if (val & 0x40000000) */
3493 if (j == entry_count) {
3494 bp->flash_info = NULL;
3495 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3500 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3501 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3503 bp->flash_size = val;
3505 bp->flash_size = bp->flash_info->total_size;
3511 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3515 u32 cmd_flags, offset32, len32, extra;
3520 /* Request access to the flash interface. */
3521 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3524 /* Enable access to flash interface */
3525 bnx2_enable_nvram_access(bp);
3538 pre_len = 4 - (offset & 3);
3540 if (pre_len >= len32) {
3542 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3543 BNX2_NVM_COMMAND_LAST;
3546 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3549 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3554 memcpy(ret_buf, buf + (offset & 3), pre_len);
3561 extra = 4 - (len32 & 3);
3562 len32 = (len32 + 4) & ~3;
3569 cmd_flags = BNX2_NVM_COMMAND_LAST;
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3572 BNX2_NVM_COMMAND_LAST;
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3576 memcpy(ret_buf, buf, 4 - extra);
3578 else if (len32 > 0) {
3581 /* Read the first word. */
3585 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3587 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3589 /* Advance to the next dword. */
3594 while (len32 > 4 && rc == 0) {
3595 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3597 /* Advance to the next dword. */
3606 cmd_flags = BNX2_NVM_COMMAND_LAST;
3607 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3609 memcpy(ret_buf, buf, 4 - extra);
3612 /* Disable access to flash interface */
3613 bnx2_disable_nvram_access(bp);
3615 bnx2_release_nvram_lock(bp);
3621 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3624 u32 written, offset32, len32;
3625 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3627 int align_start, align_end;
3632 align_start = align_end = 0;
3634 if ((align_start = (offset32 & 3))) {
3636 len32 += align_start;
3639 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3644 align_end = 4 - (len32 & 3);
3646 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3650 if (align_start || align_end) {
3651 align_buf = kmalloc(len32, GFP_KERNEL);
3652 if (align_buf == NULL)
3655 memcpy(align_buf, start, 4);
3658 memcpy(align_buf + len32 - 4, end, 4);
3660 memcpy(align_buf + align_start, data_buf, buf_size);
3664 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3665 flash_buffer = kmalloc(264, GFP_KERNEL);
3666 if (flash_buffer == NULL) {
3668 goto nvram_write_end;
3673 while ((written < len32) && (rc == 0)) {
3674 u32 page_start, page_end, data_start, data_end;
3675 u32 addr, cmd_flags;
3678 /* Find the page_start addr */
3679 page_start = offset32 + written;
3680 page_start -= (page_start % bp->flash_info->page_size);
3681 /* Find the page_end addr */
3682 page_end = page_start + bp->flash_info->page_size;
3683 /* Find the data_start addr */
3684 data_start = (written == 0) ? offset32 : page_start;
3685 /* Find the data_end addr */
3686 data_end = (page_end > offset32 + len32) ?
3687 (offset32 + len32) : page_end;
3689 /* Request access to the flash interface. */
3690 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3691 goto nvram_write_end;
3693 /* Enable access to flash interface */
3694 bnx2_enable_nvram_access(bp);
3696 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3697 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3700 /* Read the whole page into the buffer
3701 * (non-buffer flash only) */
3702 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3703 if (j == (bp->flash_info->page_size - 4)) {
3704 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3706 rc = bnx2_nvram_read_dword(bp,
3712 goto nvram_write_end;
3718 /* Enable writes to flash interface (unlock write-protect) */
3719 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3720 goto nvram_write_end;
3722 /* Loop to write back the buffer data from page_start to
3725 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3726 /* Erase the page */
3727 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3728 goto nvram_write_end;
3730 /* Re-enable the write again for the actual write */
3731 bnx2_enable_nvram_write(bp);
3733 for (addr = page_start; addr < data_start;
3734 addr += 4, i += 4) {
3736 rc = bnx2_nvram_write_dword(bp, addr,
3737 &flash_buffer[i], cmd_flags);
3740 goto nvram_write_end;
3746 /* Loop to write the new data from data_start to data_end */
3747 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3748 if ((addr == page_end - 4) ||
3749 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3750 (addr == data_end - 4))) {
3752 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3754 rc = bnx2_nvram_write_dword(bp, addr, buf,
3758 goto nvram_write_end;
3764 /* Loop to write back the buffer data from data_end
3766 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3767 for (addr = data_end; addr < page_end;
3768 addr += 4, i += 4) {
3770 if (addr == page_end-4) {
3771 cmd_flags = BNX2_NVM_COMMAND_LAST;
3773 rc = bnx2_nvram_write_dword(bp, addr,
3774 &flash_buffer[i], cmd_flags);
3777 goto nvram_write_end;
3783 /* Disable writes to flash interface (lock write-protect) */
3784 bnx2_disable_nvram_write(bp);
3786 /* Disable access to flash interface */
3787 bnx2_disable_nvram_access(bp);
3788 bnx2_release_nvram_lock(bp);
3790 /* Increment written */
3791 written += data_end - data_start;
3795 kfree(flash_buffer);
3801 bnx2_init_remote_phy(struct bnx2 *bp)
3805 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3806 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3809 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3810 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3813 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3814 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3816 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3817 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3818 bp->phy_port = PORT_FIBRE;
3820 bp->phy_port = PORT_TP;
3822 if (netif_running(bp->dev)) {
3825 if (val & BNX2_LINK_STATUS_LINK_UP) {
3827 netif_carrier_on(bp->dev);
3830 netif_carrier_off(bp->dev);
3832 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3833 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3834 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3841 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3847 /* Wait for the current PCI transaction to complete before
3848 * issuing a reset. */
3849 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3850 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3851 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3852 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3853 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3854 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3857 /* Wait for the firmware to tell us it is ok to issue a reset. */
3858 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3860 /* Deposit a driver reset signature so the firmware knows that
3861 * this is a soft reset. */
3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3863 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3865 /* Do a dummy read to force the chip to complete all current transaction
3866 * before we issue a reset. */
3867 val = REG_RD(bp, BNX2_MISC_ID);
3869 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3870 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3871 REG_RD(bp, BNX2_MISC_COMMAND);
3874 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3875 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3877 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3880 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3881 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3882 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3885 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3887 /* Reading back any register after chip reset will hang the
3888 * bus on 5706 A0 and A1. The msleep below provides plenty
3889 * of margin for write posting.
3891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3892 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3895 /* Reset takes approximate 30 usec */
3896 for (i = 0; i < 10; i++) {
3897 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3898 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3904 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3905 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3906 printk(KERN_ERR PFX "Chip reset did not complete\n");
3911 /* Make sure byte swapping is properly configured. */
3912 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3913 if (val != 0x01020304) {
3914 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3918 /* Wait for the firmware to finish its initialization. */
3919 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3923 spin_lock_bh(&bp->phy_lock);
3924 old_port = bp->phy_port;
3925 bnx2_init_remote_phy(bp);
3926 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3927 bnx2_set_default_remote_link(bp);
3928 spin_unlock_bh(&bp->phy_lock);
3930 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3931 /* Adjust the voltage regular to two steps lower. The default
3932 * of this register is 0x0000000e. */
3933 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3935 /* Remove bad rbuf memory from the free pool. */
3936 rc = bnx2_alloc_bad_rbuf(bp);
3943 bnx2_init_chip(struct bnx2 *bp)
3948 /* Make sure the interrupt is not active. */
3949 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3951 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3952 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3954 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3956 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3957 DMA_READ_CHANS << 12 |
3958 DMA_WRITE_CHANS << 16;
3960 val |= (0x2 << 20) | (1 << 11);
3962 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3965 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3966 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3967 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3969 REG_WR(bp, BNX2_DMA_CONFIG, val);
3971 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3972 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3973 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3974 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3977 if (bp->flags & PCIX_FLAG) {
3980 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3982 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3983 val16 & ~PCI_X_CMD_ERO);
3986 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3987 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3988 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3989 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3991 /* Initialize context mapping and zero out the quick contexts. The
3992 * context block must have already been enabled. */
3993 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3994 rc = bnx2_init_5709_context(bp);
3998 bnx2_init_context(bp);
4000 if ((rc = bnx2_init_cpus(bp)) != 0)
4003 bnx2_init_nvram(bp);
4005 bnx2_set_mac_addr(bp);
4007 val = REG_RD(bp, BNX2_MQ_CONFIG);
4008 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4009 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4010 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4011 val |= BNX2_MQ_CONFIG_HALT_DIS;
4013 REG_WR(bp, BNX2_MQ_CONFIG, val);
4015 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4016 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4017 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4019 val = (BCM_PAGE_BITS - 8) << 24;
4020 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4022 /* Configure page size. */
4023 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4024 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4025 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4026 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4028 val = bp->mac_addr[0] +
4029 (bp->mac_addr[1] << 8) +
4030 (bp->mac_addr[2] << 16) +
4032 (bp->mac_addr[4] << 8) +
4033 (bp->mac_addr[5] << 16);
4034 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4036 /* Program the MTU. Also include 4 bytes for CRC32. */
4037 val = bp->dev->mtu + ETH_HLEN + 4;
4038 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4039 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4040 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4042 bp->last_status_idx = 0;
4043 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4045 /* Set up how to generate a link change interrupt. */
4046 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4048 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4049 (u64) bp->status_blk_mapping & 0xffffffff);
4050 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4052 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4053 (u64) bp->stats_blk_mapping & 0xffffffff);
4054 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4055 (u64) bp->stats_blk_mapping >> 32);
4057 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4058 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4060 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4061 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4063 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4064 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4066 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4068 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4070 REG_WR(bp, BNX2_HC_COM_TICKS,
4071 (bp->com_ticks_int << 16) | bp->com_ticks);
4073 REG_WR(bp, BNX2_HC_CMD_TICKS,
4074 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4076 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4077 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4079 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4080 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4082 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4083 val = BNX2_HC_CONFIG_COLLECT_STATS;
4085 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4086 BNX2_HC_CONFIG_COLLECT_STATS;
4089 if (bp->flags & ONE_SHOT_MSI_FLAG)
4090 val |= BNX2_HC_CONFIG_ONE_SHOT;
4092 REG_WR(bp, BNX2_HC_CONFIG, val);
4094 /* Clear internal stats counters. */
4095 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4097 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4099 /* Initialize the receive filter. */
4100 bnx2_set_rx_mode(bp->dev);
4102 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4103 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4104 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4105 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4107 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4110 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4111 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4115 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4121 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4123 u32 val, offset0, offset1, offset2, offset3;
4125 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4126 offset0 = BNX2_L2CTX_TYPE_XI;
4127 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4128 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4129 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4131 offset0 = BNX2_L2CTX_TYPE;
4132 offset1 = BNX2_L2CTX_CMD_TYPE;
4133 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4134 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4136 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4137 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4139 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4140 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4142 val = (u64) bp->tx_desc_mapping >> 32;
4143 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4145 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4146 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4150 bnx2_init_tx_ring(struct bnx2 *bp)
4155 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4157 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4159 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4160 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4165 bp->tx_prod_bseq = 0;
4168 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4169 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4171 bnx2_init_tx_context(bp, cid);
4175 bnx2_init_rx_ring(struct bnx2 *bp)
4179 u16 prod, ring_prod;
4182 /* 8 for CRC and VLAN */
4183 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4185 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4187 ring_prod = prod = bp->rx_prod = 0;
4189 bp->rx_prod_bseq = 0;
4191 for (i = 0; i < bp->rx_max_ring; i++) {
4194 rxbd = &bp->rx_desc_ring[i][0];
4195 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4196 rxbd->rx_bd_len = bp->rx_buf_use_size;
4197 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4199 if (i == (bp->rx_max_ring - 1))
4203 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4204 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4208 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4213 val = (u64) bp->rx_desc_mapping[0] >> 32;
4214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4216 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4217 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4219 for (i = 0; i < bp->rx_ring_size; i++) {
4220 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4223 prod = NEXT_RX_BD(prod);
4224 ring_prod = RX_RING_IDX(prod);
4228 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4230 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4234 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4238 bp->rx_ring_size = size;
4240 while (size > MAX_RX_DESC_CNT) {
4241 size -= MAX_RX_DESC_CNT;
4244 /* round to next power of 2 */
4246 while ((max & num_rings) == 0)
4249 if (num_rings != max)
4252 bp->rx_max_ring = max;
4253 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4257 bnx2_free_tx_skbs(struct bnx2 *bp)
4261 if (bp->tx_buf_ring == NULL)
4264 for (i = 0; i < TX_DESC_CNT; ) {
4265 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4266 struct sk_buff *skb = tx_buf->skb;
4274 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4275 skb_headlen(skb), PCI_DMA_TODEVICE);
4279 last = skb_shinfo(skb)->nr_frags;
4280 for (j = 0; j < last; j++) {
4281 tx_buf = &bp->tx_buf_ring[i + j + 1];
4282 pci_unmap_page(bp->pdev,
4283 pci_unmap_addr(tx_buf, mapping),
4284 skb_shinfo(skb)->frags[j].size,
4294 bnx2_free_rx_skbs(struct bnx2 *bp)
4298 if (bp->rx_buf_ring == NULL)
4301 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4302 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4303 struct sk_buff *skb = rx_buf->skb;
4308 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4309 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4318 bnx2_free_skbs(struct bnx2 *bp)
4320 bnx2_free_tx_skbs(bp);
4321 bnx2_free_rx_skbs(bp);
4325 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4329 rc = bnx2_reset_chip(bp, reset_code);
4334 if ((rc = bnx2_init_chip(bp)) != 0)
4337 bnx2_init_tx_ring(bp);
4338 bnx2_init_rx_ring(bp);
4343 bnx2_init_nic(struct bnx2 *bp)
4347 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4350 spin_lock_bh(&bp->phy_lock);
4353 spin_unlock_bh(&bp->phy_lock);
4358 bnx2_test_registers(struct bnx2 *bp)
4362 static const struct {
4365 #define BNX2_FL_NOT_5709 1
4369 { 0x006c, 0, 0x00000000, 0x0000003f },
4370 { 0x0090, 0, 0xffffffff, 0x00000000 },
4371 { 0x0094, 0, 0x00000000, 0x00000000 },
4373 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4374 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4375 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4376 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4377 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4378 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4379 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4380 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4383 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4384 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4385 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4387 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4388 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4390 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4391 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4392 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4394 { 0x1000, 0, 0x00000000, 0x00000001 },
4395 { 0x1004, 0, 0x00000000, 0x000f0001 },
4397 { 0x1408, 0, 0x01c00800, 0x00000000 },
4398 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4399 { 0x14a8, 0, 0x00000000, 0x000001ff },
4400 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4401 { 0x14b0, 0, 0x00000002, 0x00000001 },
4402 { 0x14b8, 0, 0x00000000, 0x00000000 },
4403 { 0x14c0, 0, 0x00000000, 0x00000009 },
4404 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4405 { 0x14cc, 0, 0x00000000, 0x00000001 },
4406 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4408 { 0x1800, 0, 0x00000000, 0x00000001 },
4409 { 0x1804, 0, 0x00000000, 0x00000003 },
4411 { 0x2800, 0, 0x00000000, 0x00000001 },
4412 { 0x2804, 0, 0x00000000, 0x00003f01 },
4413 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4414 { 0x2810, 0, 0xffff0000, 0x00000000 },
4415 { 0x2814, 0, 0xffff0000, 0x00000000 },
4416 { 0x2818, 0, 0xffff0000, 0x00000000 },
4417 { 0x281c, 0, 0xffff0000, 0x00000000 },
4418 { 0x2834, 0, 0xffffffff, 0x00000000 },
4419 { 0x2840, 0, 0x00000000, 0xffffffff },
4420 { 0x2844, 0, 0x00000000, 0xffffffff },
4421 { 0x2848, 0, 0xffffffff, 0x00000000 },
4422 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4424 { 0x2c00, 0, 0x00000000, 0x00000011 },
4425 { 0x2c04, 0, 0x00000000, 0x00030007 },
4427 { 0x3c00, 0, 0x00000000, 0x00000001 },
4428 { 0x3c04, 0, 0x00000000, 0x00070000 },
4429 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4430 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4431 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4432 { 0x3c14, 0, 0x00000000, 0xffffffff },
4433 { 0x3c18, 0, 0x00000000, 0xffffffff },
4434 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4435 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4437 { 0x5004, 0, 0x00000000, 0x0000007f },
4438 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4440 { 0x5c00, 0, 0x00000000, 0x00000001 },
4441 { 0x5c04, 0, 0x00000000, 0x0003000f },
4442 { 0x5c08, 0, 0x00000003, 0x00000000 },
4443 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4444 { 0x5c10, 0, 0x00000000, 0xffffffff },
4445 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4446 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4447 { 0x5c88, 0, 0x00000000, 0x00077373 },
4448 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4450 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4451 { 0x680c, 0, 0xffffffff, 0x00000000 },
4452 { 0x6810, 0, 0xffffffff, 0x00000000 },
4453 { 0x6814, 0, 0xffffffff, 0x00000000 },
4454 { 0x6818, 0, 0xffffffff, 0x00000000 },
4455 { 0x681c, 0, 0xffffffff, 0x00000000 },
4456 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4457 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4458 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4459 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4461 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4462 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4463 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4464 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4465 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4466 { 0x684c, 0, 0xffffffff, 0x00000000 },
4467 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4469 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4470 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4471 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4472 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4474 { 0xffff, 0, 0x00000000, 0x00000000 },
4479 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4482 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4483 u32 offset, rw_mask, ro_mask, save_val, val;
4484 u16 flags = reg_tbl[i].flags;
4486 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4489 offset = (u32) reg_tbl[i].offset;
4490 rw_mask = reg_tbl[i].rw_mask;
4491 ro_mask = reg_tbl[i].ro_mask;
4493 save_val = readl(bp->regview + offset);
4495 writel(0, bp->regview + offset);
4497 val = readl(bp->regview + offset);
4498 if ((val & rw_mask) != 0) {
4502 if ((val & ro_mask) != (save_val & ro_mask)) {
4506 writel(0xffffffff, bp->regview + offset);
4508 val = readl(bp->regview + offset);
4509 if ((val & rw_mask) != rw_mask) {
4513 if ((val & ro_mask) != (save_val & ro_mask)) {
4517 writel(save_val, bp->regview + offset);
4521 writel(save_val, bp->regview + offset);
4529 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4531 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4532 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4535 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4538 for (offset = 0; offset < size; offset += 4) {
4540 REG_WR_IND(bp, start + offset, test_pattern[i]);
4542 if (REG_RD_IND(bp, start + offset) !=
4552 bnx2_test_memory(struct bnx2 *bp)
4556 static struct mem_entry {
4559 } mem_tbl_5706[] = {
4560 { 0x60000, 0x4000 },
4561 { 0xa0000, 0x3000 },
4562 { 0xe0000, 0x4000 },
4563 { 0x120000, 0x4000 },
4564 { 0x1a0000, 0x4000 },
4565 { 0x160000, 0x4000 },
4569 { 0x60000, 0x4000 },
4570 { 0xa0000, 0x3000 },
4571 { 0xe0000, 0x4000 },
4572 { 0x120000, 0x4000 },
4573 { 0x1a0000, 0x4000 },
4576 struct mem_entry *mem_tbl;
4578 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4579 mem_tbl = mem_tbl_5709;
4581 mem_tbl = mem_tbl_5706;
4583 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4584 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4585 mem_tbl[i].len)) != 0) {
4593 #define BNX2_MAC_LOOPBACK 0
4594 #define BNX2_PHY_LOOPBACK 1
4597 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4599 unsigned int pkt_size, num_pkts, i;
4600 struct sk_buff *skb, *rx_skb;
4601 unsigned char *packet;
4602 u16 rx_start_idx, rx_idx;
4605 struct sw_bd *rx_buf;
4606 struct l2_fhdr *rx_hdr;
4609 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4610 bp->loopback = MAC_LOOPBACK;
4611 bnx2_set_mac_loopback(bp);
4613 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4614 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4617 bp->loopback = PHY_LOOPBACK;
4618 bnx2_set_phy_loopback(bp);
4624 skb = netdev_alloc_skb(bp->dev, pkt_size);
4627 packet = skb_put(skb, pkt_size);
4628 memcpy(packet, bp->dev->dev_addr, 6);
4629 memset(packet + 6, 0x0, 8);
4630 for (i = 14; i < pkt_size; i++)
4631 packet[i] = (unsigned char) (i & 0xff);
4633 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4636 REG_WR(bp, BNX2_HC_COMMAND,
4637 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4639 REG_RD(bp, BNX2_HC_COMMAND);
4642 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4646 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4648 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4649 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4650 txbd->tx_bd_mss_nbytes = pkt_size;
4651 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4654 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4655 bp->tx_prod_bseq += pkt_size;
4657 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4658 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4662 REG_WR(bp, BNX2_HC_COMMAND,
4663 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4665 REG_RD(bp, BNX2_HC_COMMAND);
4669 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4672 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4673 goto loopback_test_done;
4676 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4677 if (rx_idx != rx_start_idx + num_pkts) {
4678 goto loopback_test_done;
4681 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4682 rx_skb = rx_buf->skb;
4684 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4685 skb_reserve(rx_skb, bp->rx_offset);
4687 pci_dma_sync_single_for_cpu(bp->pdev,
4688 pci_unmap_addr(rx_buf, mapping),
4689 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4691 if (rx_hdr->l2_fhdr_status &
4692 (L2_FHDR_ERRORS_BAD_CRC |
4693 L2_FHDR_ERRORS_PHY_DECODE |
4694 L2_FHDR_ERRORS_ALIGNMENT |
4695 L2_FHDR_ERRORS_TOO_SHORT |
4696 L2_FHDR_ERRORS_GIANT_FRAME)) {
4698 goto loopback_test_done;
4701 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4702 goto loopback_test_done;
4705 for (i = 14; i < pkt_size; i++) {
4706 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4707 goto loopback_test_done;
4718 #define BNX2_MAC_LOOPBACK_FAILED 1
4719 #define BNX2_PHY_LOOPBACK_FAILED 2
4720 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4721 BNX2_PHY_LOOPBACK_FAILED)
4724 bnx2_test_loopback(struct bnx2 *bp)
4728 if (!netif_running(bp->dev))
4729 return BNX2_LOOPBACK_FAILED;
4731 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4732 spin_lock_bh(&bp->phy_lock);
4734 spin_unlock_bh(&bp->phy_lock);
4735 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4736 rc |= BNX2_MAC_LOOPBACK_FAILED;
4737 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4738 rc |= BNX2_PHY_LOOPBACK_FAILED;
4742 #define NVRAM_SIZE 0x200
4743 #define CRC32_RESIDUAL 0xdebb20e3
4746 bnx2_test_nvram(struct bnx2 *bp)
4748 u32 buf[NVRAM_SIZE / 4];
4749 u8 *data = (u8 *) buf;
4753 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4754 goto test_nvram_done;
4756 magic = be32_to_cpu(buf[0]);
4757 if (magic != 0x669955aa) {
4759 goto test_nvram_done;
4762 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4763 goto test_nvram_done;
4765 csum = ether_crc_le(0x100, data);
4766 if (csum != CRC32_RESIDUAL) {
4768 goto test_nvram_done;
4771 csum = ether_crc_le(0x100, data + 0x100);
4772 if (csum != CRC32_RESIDUAL) {
4781 bnx2_test_link(struct bnx2 *bp)
4785 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4790 spin_lock_bh(&bp->phy_lock);
4791 bnx2_enable_bmsr1(bp);
4792 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4794 bnx2_disable_bmsr1(bp);
4795 spin_unlock_bh(&bp->phy_lock);
4797 if (bmsr & BMSR_LSTATUS) {
4804 bnx2_test_intr(struct bnx2 *bp)
4809 if (!netif_running(bp->dev))
4812 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4814 /* This register is not touched during run-time. */
4815 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4816 REG_RD(bp, BNX2_HC_COMMAND);
4818 for (i = 0; i < 10; i++) {
4819 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4825 msleep_interruptible(10);
4834 bnx2_5706_serdes_timer(struct bnx2 *bp)
4836 spin_lock(&bp->phy_lock);
4837 if (bp->serdes_an_pending)
4838 bp->serdes_an_pending--;
4839 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4842 bp->current_interval = bp->timer_interval;
4844 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4846 if (bmcr & BMCR_ANENABLE) {
4849 bnx2_write_phy(bp, 0x1c, 0x7c00);
4850 bnx2_read_phy(bp, 0x1c, &phy1);
4852 bnx2_write_phy(bp, 0x17, 0x0f01);
4853 bnx2_read_phy(bp, 0x15, &phy2);
4854 bnx2_write_phy(bp, 0x17, 0x0f01);
4855 bnx2_read_phy(bp, 0x15, &phy2);
4857 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4858 !(phy2 & 0x20)) { /* no CONFIG */
4860 bmcr &= ~BMCR_ANENABLE;
4861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4862 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4863 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4867 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4868 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4871 bnx2_write_phy(bp, 0x17, 0x0f01);
4872 bnx2_read_phy(bp, 0x15, &phy2);
4876 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4877 bmcr |= BMCR_ANENABLE;
4878 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4880 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4883 bp->current_interval = bp->timer_interval;
4885 spin_unlock(&bp->phy_lock);
4889 bnx2_5708_serdes_timer(struct bnx2 *bp)
4891 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4894 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4895 bp->serdes_an_pending = 0;
4899 spin_lock(&bp->phy_lock);
4900 if (bp->serdes_an_pending)
4901 bp->serdes_an_pending--;
4902 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4905 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4906 if (bmcr & BMCR_ANENABLE) {
4907 bnx2_enable_forced_2g5(bp);
4908 bp->current_interval = SERDES_FORCED_TIMEOUT;
4910 bnx2_disable_forced_2g5(bp);
4911 bp->serdes_an_pending = 2;
4912 bp->current_interval = bp->timer_interval;
4916 bp->current_interval = bp->timer_interval;
4918 spin_unlock(&bp->phy_lock);
4922 bnx2_timer(unsigned long data)
4924 struct bnx2 *bp = (struct bnx2 *) data;
4926 if (!netif_running(bp->dev))
4929 if (atomic_read(&bp->intr_sem) != 0)
4930 goto bnx2_restart_timer;
4932 bnx2_send_heart_beat(bp);
4934 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4936 /* workaround occasional corrupted counters */
4937 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4938 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4939 BNX2_HC_COMMAND_STATS_NOW);
4941 if (bp->phy_flags & PHY_SERDES_FLAG) {
4942 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4943 bnx2_5706_serdes_timer(bp);
4945 bnx2_5708_serdes_timer(bp);
4949 mod_timer(&bp->timer, jiffies + bp->current_interval);
4953 bnx2_request_irq(struct bnx2 *bp)
4955 struct net_device *dev = bp->dev;
4958 if (bp->flags & USING_MSI_FLAG) {
4959 irq_handler_t fn = bnx2_msi;
4961 if (bp->flags & ONE_SHOT_MSI_FLAG)
4962 fn = bnx2_msi_1shot;
4964 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4966 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4967 IRQF_SHARED, dev->name, dev);
4972 bnx2_free_irq(struct bnx2 *bp)
4974 struct net_device *dev = bp->dev;
4976 if (bp->flags & USING_MSI_FLAG) {
4977 free_irq(bp->pdev->irq, dev);
4978 pci_disable_msi(bp->pdev);
4979 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4981 free_irq(bp->pdev->irq, dev);
4984 /* Called with rtnl_lock */
4986 bnx2_open(struct net_device *dev)
4988 struct bnx2 *bp = netdev_priv(dev);
4991 netif_carrier_off(dev);
4993 bnx2_set_power_state(bp, PCI_D0);
4994 bnx2_disable_int(bp);
4996 rc = bnx2_alloc_mem(bp);
5000 napi_enable(&bp->napi);
5002 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5003 if (pci_enable_msi(bp->pdev) == 0) {
5004 bp->flags |= USING_MSI_FLAG;
5005 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5006 bp->flags |= ONE_SHOT_MSI_FLAG;
5009 rc = bnx2_request_irq(bp);
5012 napi_disable(&bp->napi);
5017 rc = bnx2_init_nic(bp);
5020 napi_disable(&bp->napi);
5027 mod_timer(&bp->timer, jiffies + bp->current_interval);
5029 atomic_set(&bp->intr_sem, 0);
5031 bnx2_enable_int(bp);
5033 if (bp->flags & USING_MSI_FLAG) {
5034 /* Test MSI to make sure it is working
5035 * If MSI test fails, go back to INTx mode
5037 if (bnx2_test_intr(bp) != 0) {
5038 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5039 " using MSI, switching to INTx mode. Please"
5040 " report this failure to the PCI maintainer"
5041 " and include system chipset information.\n",
5044 bnx2_disable_int(bp);
5047 rc = bnx2_init_nic(bp);
5050 rc = bnx2_request_irq(bp);
5053 napi_disable(&bp->napi);
5056 del_timer_sync(&bp->timer);
5059 bnx2_enable_int(bp);
5062 if (bp->flags & USING_MSI_FLAG) {
5063 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5066 netif_start_queue(dev);
5072 bnx2_reset_task(struct work_struct *work)
5074 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5076 if (!netif_running(bp->dev))
5079 bp->in_reset_task = 1;
5080 bnx2_netif_stop(bp);
5084 atomic_set(&bp->intr_sem, 1);
5085 bnx2_netif_start(bp);
5086 bp->in_reset_task = 0;
5090 bnx2_tx_timeout(struct net_device *dev)
5092 struct bnx2 *bp = netdev_priv(dev);
5094 /* This allows the netif to be shutdown gracefully before resetting */
5095 schedule_work(&bp->reset_task);
5099 /* Called with rtnl_lock */
5101 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5103 struct bnx2 *bp = netdev_priv(dev);
5105 bnx2_netif_stop(bp);
5108 bnx2_set_rx_mode(dev);
5110 bnx2_netif_start(bp);
5114 /* Called with netif_tx_lock.
5115 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5116 * netif_wake_queue().
5119 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5121 struct bnx2 *bp = netdev_priv(dev);
5124 struct sw_bd *tx_buf;
5125 u32 len, vlan_tag_flags, last_frag, mss;
5126 u16 prod, ring_prod;
5129 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5130 netif_stop_queue(dev);
5131 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5134 return NETDEV_TX_BUSY;
5136 len = skb_headlen(skb);
5138 ring_prod = TX_RING_IDX(prod);
5141 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5142 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5145 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5147 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5149 if ((mss = skb_shinfo(skb)->gso_size)) {
5150 u32 tcp_opt_len, ip_tcp_len;
5153 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5155 tcp_opt_len = tcp_optlen(skb);
5157 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5158 u32 tcp_off = skb_transport_offset(skb) -
5159 sizeof(struct ipv6hdr) - ETH_HLEN;
5161 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5162 TX_BD_FLAGS_SW_FLAGS;
5163 if (likely(tcp_off == 0))
5164 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5167 vlan_tag_flags |= ((tcp_off & 0x3) <<
5168 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5169 ((tcp_off & 0x10) <<
5170 TX_BD_FLAGS_TCP6_OFF4_SHL);
5171 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5174 if (skb_header_cloned(skb) &&
5175 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5177 return NETDEV_TX_OK;
5180 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5184 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5185 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5189 if (tcp_opt_len || (iph->ihl > 5)) {
5190 vlan_tag_flags |= ((iph->ihl - 5) +
5191 (tcp_opt_len >> 2)) << 8;
5197 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5199 tx_buf = &bp->tx_buf_ring[ring_prod];
5201 pci_unmap_addr_set(tx_buf, mapping, mapping);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5205 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5206 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5207 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5208 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5210 last_frag = skb_shinfo(skb)->nr_frags;
5212 for (i = 0; i < last_frag; i++) {
5213 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5215 prod = NEXT_TX_BD(prod);
5216 ring_prod = TX_RING_IDX(prod);
5217 txbd = &bp->tx_desc_ring[ring_prod];
5220 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5221 len, PCI_DMA_TODEVICE);
5222 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5225 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5226 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5227 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5228 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5231 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5233 prod = NEXT_TX_BD(prod);
5234 bp->tx_prod_bseq += skb->len;
5236 REG_WR16(bp, bp->tx_bidx_addr, prod);
5237 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5242 dev->trans_start = jiffies;
5244 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5245 netif_stop_queue(dev);
5246 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5247 netif_wake_queue(dev);
5250 return NETDEV_TX_OK;
5253 /* Called with rtnl_lock */
5255 bnx2_close(struct net_device *dev)
5257 struct bnx2 *bp = netdev_priv(dev);
5260 /* Calling flush_scheduled_work() may deadlock because
5261 * linkwatch_event() may be on the workqueue and it will try to get
5262 * the rtnl_lock which we are holding.
5264 while (bp->in_reset_task)
5267 bnx2_disable_int_sync(bp);
5268 napi_disable(&bp->napi);
5269 del_timer_sync(&bp->timer);
5270 if (bp->flags & NO_WOL_FLAG)
5271 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5273 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5275 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5276 bnx2_reset_chip(bp, reset_code);
5281 netif_carrier_off(bp->dev);
5282 bnx2_set_power_state(bp, PCI_D3hot);
5286 #define GET_NET_STATS64(ctr) \
5287 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5288 (unsigned long) (ctr##_lo)
5290 #define GET_NET_STATS32(ctr) \
5293 #if (BITS_PER_LONG == 64)
5294 #define GET_NET_STATS GET_NET_STATS64
5296 #define GET_NET_STATS GET_NET_STATS32
5299 static struct net_device_stats *
5300 bnx2_get_stats(struct net_device *dev)
5302 struct bnx2 *bp = netdev_priv(dev);
5303 struct statistics_block *stats_blk = bp->stats_blk;
5304 struct net_device_stats *net_stats = &bp->net_stats;
5306 if (bp->stats_blk == NULL) {
5309 net_stats->rx_packets =
5310 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5311 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5312 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5314 net_stats->tx_packets =
5315 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5317 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5319 net_stats->rx_bytes =
5320 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5322 net_stats->tx_bytes =
5323 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5325 net_stats->multicast =
5326 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5328 net_stats->collisions =
5329 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5331 net_stats->rx_length_errors =
5332 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5333 stats_blk->stat_EtherStatsOverrsizePkts);
5335 net_stats->rx_over_errors =
5336 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5338 net_stats->rx_frame_errors =
5339 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5341 net_stats->rx_crc_errors =
5342 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5344 net_stats->rx_errors = net_stats->rx_length_errors +
5345 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5346 net_stats->rx_crc_errors;
5348 net_stats->tx_aborted_errors =
5349 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5350 stats_blk->stat_Dot3StatsLateCollisions);
5352 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5353 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5354 net_stats->tx_carrier_errors = 0;
5356 net_stats->tx_carrier_errors =
5358 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5361 net_stats->tx_errors =
5363 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5365 net_stats->tx_aborted_errors +
5366 net_stats->tx_carrier_errors;
5368 net_stats->rx_missed_errors =
5369 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5370 stats_blk->stat_FwRxDrop);
5375 /* All ethtool functions called with rtnl_lock */
5378 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5380 struct bnx2 *bp = netdev_priv(dev);
5381 int support_serdes = 0, support_copper = 0;
5383 cmd->supported = SUPPORTED_Autoneg;
5384 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5387 } else if (bp->phy_port == PORT_FIBRE)
5392 if (support_serdes) {
5393 cmd->supported |= SUPPORTED_1000baseT_Full |
5395 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5396 cmd->supported |= SUPPORTED_2500baseX_Full;
5399 if (support_copper) {
5400 cmd->supported |= SUPPORTED_10baseT_Half |
5401 SUPPORTED_10baseT_Full |
5402 SUPPORTED_100baseT_Half |
5403 SUPPORTED_100baseT_Full |
5404 SUPPORTED_1000baseT_Full |
5409 spin_lock_bh(&bp->phy_lock);
5410 cmd->port = bp->phy_port;
5411 cmd->advertising = bp->advertising;
5413 if (bp->autoneg & AUTONEG_SPEED) {
5414 cmd->autoneg = AUTONEG_ENABLE;
5417 cmd->autoneg = AUTONEG_DISABLE;
5420 if (netif_carrier_ok(dev)) {
5421 cmd->speed = bp->line_speed;
5422 cmd->duplex = bp->duplex;
5428 spin_unlock_bh(&bp->phy_lock);
5430 cmd->transceiver = XCVR_INTERNAL;
5431 cmd->phy_address = bp->phy_addr;
5437 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5439 struct bnx2 *bp = netdev_priv(dev);
5440 u8 autoneg = bp->autoneg;
5441 u8 req_duplex = bp->req_duplex;
5442 u16 req_line_speed = bp->req_line_speed;
5443 u32 advertising = bp->advertising;
5446 spin_lock_bh(&bp->phy_lock);
5448 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5449 goto err_out_unlock;
5451 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5452 goto err_out_unlock;
5454 if (cmd->autoneg == AUTONEG_ENABLE) {
5455 autoneg |= AUTONEG_SPEED;
5457 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5459 /* allow advertising 1 speed */
5460 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5461 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5462 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5463 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5465 if (cmd->port == PORT_FIBRE)
5466 goto err_out_unlock;
5468 advertising = cmd->advertising;
5470 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5471 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5472 (cmd->port == PORT_TP))
5473 goto err_out_unlock;
5474 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5475 advertising = cmd->advertising;
5476 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5477 goto err_out_unlock;
5479 if (cmd->port == PORT_FIBRE)
5480 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5482 advertising = ETHTOOL_ALL_COPPER_SPEED;
5484 advertising |= ADVERTISED_Autoneg;
5487 if (cmd->port == PORT_FIBRE) {
5488 if ((cmd->speed != SPEED_1000 &&
5489 cmd->speed != SPEED_2500) ||
5490 (cmd->duplex != DUPLEX_FULL))
5491 goto err_out_unlock;
5493 if (cmd->speed == SPEED_2500 &&
5494 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5495 goto err_out_unlock;
5497 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5498 goto err_out_unlock;
5500 autoneg &= ~AUTONEG_SPEED;
5501 req_line_speed = cmd->speed;
5502 req_duplex = cmd->duplex;
5506 bp->autoneg = autoneg;
5507 bp->advertising = advertising;
5508 bp->req_line_speed = req_line_speed;
5509 bp->req_duplex = req_duplex;
5511 err = bnx2_setup_phy(bp, cmd->port);
5514 spin_unlock_bh(&bp->phy_lock);
5520 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5522 struct bnx2 *bp = netdev_priv(dev);
5524 strcpy(info->driver, DRV_MODULE_NAME);
5525 strcpy(info->version, DRV_MODULE_VERSION);
5526 strcpy(info->bus_info, pci_name(bp->pdev));
5527 strcpy(info->fw_version, bp->fw_version);
5530 #define BNX2_REGDUMP_LEN (32 * 1024)
5533 bnx2_get_regs_len(struct net_device *dev)
5535 return BNX2_REGDUMP_LEN;
5539 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5541 u32 *p = _p, i, offset;
5543 struct bnx2 *bp = netdev_priv(dev);
5544 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5545 0x0800, 0x0880, 0x0c00, 0x0c10,
5546 0x0c30, 0x0d08, 0x1000, 0x101c,
5547 0x1040, 0x1048, 0x1080, 0x10a4,
5548 0x1400, 0x1490, 0x1498, 0x14f0,
5549 0x1500, 0x155c, 0x1580, 0x15dc,
5550 0x1600, 0x1658, 0x1680, 0x16d8,
5551 0x1800, 0x1820, 0x1840, 0x1854,
5552 0x1880, 0x1894, 0x1900, 0x1984,
5553 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5554 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5555 0x2000, 0x2030, 0x23c0, 0x2400,
5556 0x2800, 0x2820, 0x2830, 0x2850,
5557 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5558 0x3c00, 0x3c94, 0x4000, 0x4010,
5559 0x4080, 0x4090, 0x43c0, 0x4458,
5560 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5561 0x4fc0, 0x5010, 0x53c0, 0x5444,
5562 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5563 0x5fc0, 0x6000, 0x6400, 0x6428,
5564 0x6800, 0x6848, 0x684c, 0x6860,
5565 0x6888, 0x6910, 0x8000 };
5569 memset(p, 0, BNX2_REGDUMP_LEN);
5571 if (!netif_running(bp->dev))
5575 offset = reg_boundaries[0];
5577 while (offset < BNX2_REGDUMP_LEN) {
5578 *p++ = REG_RD(bp, offset);
5580 if (offset == reg_boundaries[i + 1]) {
5581 offset = reg_boundaries[i + 2];
5582 p = (u32 *) (orig_p + offset);
5589 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5591 struct bnx2 *bp = netdev_priv(dev);
5593 if (bp->flags & NO_WOL_FLAG) {
5598 wol->supported = WAKE_MAGIC;
5600 wol->wolopts = WAKE_MAGIC;
5604 memset(&wol->sopass, 0, sizeof(wol->sopass));
5608 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5610 struct bnx2 *bp = netdev_priv(dev);
5612 if (wol->wolopts & ~WAKE_MAGIC)
5615 if (wol->wolopts & WAKE_MAGIC) {
5616 if (bp->flags & NO_WOL_FLAG)
5628 bnx2_nway_reset(struct net_device *dev)
5630 struct bnx2 *bp = netdev_priv(dev);
5633 if (!(bp->autoneg & AUTONEG_SPEED)) {
5637 spin_lock_bh(&bp->phy_lock);
5639 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5642 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5643 spin_unlock_bh(&bp->phy_lock);
5647 /* Force a link down visible on the other side */
5648 if (bp->phy_flags & PHY_SERDES_FLAG) {
5649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5650 spin_unlock_bh(&bp->phy_lock);
5654 spin_lock_bh(&bp->phy_lock);
5656 bp->current_interval = SERDES_AN_TIMEOUT;
5657 bp->serdes_an_pending = 1;
5658 mod_timer(&bp->timer, jiffies + bp->current_interval);
5661 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5662 bmcr &= ~BMCR_LOOPBACK;
5663 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5665 spin_unlock_bh(&bp->phy_lock);
5671 bnx2_get_eeprom_len(struct net_device *dev)
5673 struct bnx2 *bp = netdev_priv(dev);
5675 if (bp->flash_info == NULL)
5678 return (int) bp->flash_size;
5682 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5685 struct bnx2 *bp = netdev_priv(dev);
5688 /* parameters already validated in ethtool_get_eeprom */
5690 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5696 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5699 struct bnx2 *bp = netdev_priv(dev);
5702 /* parameters already validated in ethtool_set_eeprom */
5704 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5710 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5712 struct bnx2 *bp = netdev_priv(dev);
5714 memset(coal, 0, sizeof(struct ethtool_coalesce));
5716 coal->rx_coalesce_usecs = bp->rx_ticks;
5717 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5718 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5719 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5721 coal->tx_coalesce_usecs = bp->tx_ticks;
5722 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5723 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5724 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5726 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5732 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5734 struct bnx2 *bp = netdev_priv(dev);
5736 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5737 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5739 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5740 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5742 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5743 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5745 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5746 if (bp->rx_quick_cons_trip_int > 0xff)
5747 bp->rx_quick_cons_trip_int = 0xff;
5749 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5750 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5752 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5753 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5755 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5756 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5758 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5759 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5762 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5764 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5765 bp->stats_ticks = USEC_PER_SEC;
5767 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5768 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5769 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5771 if (netif_running(bp->dev)) {
5772 bnx2_netif_stop(bp);
5774 bnx2_netif_start(bp);
5781 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5783 struct bnx2 *bp = netdev_priv(dev);
5785 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5786 ering->rx_mini_max_pending = 0;
5787 ering->rx_jumbo_max_pending = 0;
5789 ering->rx_pending = bp->rx_ring_size;
5790 ering->rx_mini_pending = 0;
5791 ering->rx_jumbo_pending = 0;
5793 ering->tx_max_pending = MAX_TX_DESC_CNT;
5794 ering->tx_pending = bp->tx_ring_size;
5798 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5800 struct bnx2 *bp = netdev_priv(dev);
5802 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5803 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5804 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5808 if (netif_running(bp->dev)) {
5809 bnx2_netif_stop(bp);
5810 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5815 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5816 bp->tx_ring_size = ering->tx_pending;
5818 if (netif_running(bp->dev)) {
5821 rc = bnx2_alloc_mem(bp);
5825 bnx2_netif_start(bp);
5832 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5834 struct bnx2 *bp = netdev_priv(dev);
5836 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5837 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5838 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5842 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5844 struct bnx2 *bp = netdev_priv(dev);
5846 bp->req_flow_ctrl = 0;
5847 if (epause->rx_pause)
5848 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5849 if (epause->tx_pause)
5850 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5852 if (epause->autoneg) {
5853 bp->autoneg |= AUTONEG_FLOW_CTRL;
5856 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5859 spin_lock_bh(&bp->phy_lock);
5861 bnx2_setup_phy(bp, bp->phy_port);
5863 spin_unlock_bh(&bp->phy_lock);
5869 bnx2_get_rx_csum(struct net_device *dev)
5871 struct bnx2 *bp = netdev_priv(dev);
5877 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5879 struct bnx2 *bp = netdev_priv(dev);
5886 bnx2_set_tso(struct net_device *dev, u32 data)
5888 struct bnx2 *bp = netdev_priv(dev);
5891 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5892 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5893 dev->features |= NETIF_F_TSO6;
5895 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5900 #define BNX2_NUM_STATS 46
5903 char string[ETH_GSTRING_LEN];
5904 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5906 { "rx_error_bytes" },
5908 { "tx_error_bytes" },
5909 { "rx_ucast_packets" },
5910 { "rx_mcast_packets" },
5911 { "rx_bcast_packets" },
5912 { "tx_ucast_packets" },
5913 { "tx_mcast_packets" },
5914 { "tx_bcast_packets" },
5915 { "tx_mac_errors" },
5916 { "tx_carrier_errors" },
5917 { "rx_crc_errors" },
5918 { "rx_align_errors" },
5919 { "tx_single_collisions" },
5920 { "tx_multi_collisions" },
5922 { "tx_excess_collisions" },
5923 { "tx_late_collisions" },
5924 { "tx_total_collisions" },
5927 { "rx_undersize_packets" },
5928 { "rx_oversize_packets" },
5929 { "rx_64_byte_packets" },
5930 { "rx_65_to_127_byte_packets" },
5931 { "rx_128_to_255_byte_packets" },
5932 { "rx_256_to_511_byte_packets" },
5933 { "rx_512_to_1023_byte_packets" },
5934 { "rx_1024_to_1522_byte_packets" },
5935 { "rx_1523_to_9022_byte_packets" },
5936 { "tx_64_byte_packets" },
5937 { "tx_65_to_127_byte_packets" },
5938 { "tx_128_to_255_byte_packets" },
5939 { "tx_256_to_511_byte_packets" },
5940 { "tx_512_to_1023_byte_packets" },
5941 { "tx_1024_to_1522_byte_packets" },
5942 { "tx_1523_to_9022_byte_packets" },
5943 { "rx_xon_frames" },
5944 { "rx_xoff_frames" },
5945 { "tx_xon_frames" },
5946 { "tx_xoff_frames" },
5947 { "rx_mac_ctrl_frames" },
5948 { "rx_filtered_packets" },
5950 { "rx_fw_discards" },
5953 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5955 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5956 STATS_OFFSET32(stat_IfHCInOctets_hi),
5957 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5958 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5959 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5960 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5961 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5962 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5963 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5964 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5965 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5966 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5967 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5968 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5969 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5970 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5971 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5972 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5973 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5974 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5975 STATS_OFFSET32(stat_EtherStatsCollisions),
5976 STATS_OFFSET32(stat_EtherStatsFragments),
5977 STATS_OFFSET32(stat_EtherStatsJabbers),
5978 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5979 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5980 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5982 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5983 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5984 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5985 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5986 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5987 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_XonPauseFramesReceived),
5995 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5996 STATS_OFFSET32(stat_OutXonSent),
5997 STATS_OFFSET32(stat_OutXoffSent),
5998 STATS_OFFSET32(stat_MacControlFramesReceived),
5999 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6000 STATS_OFFSET32(stat_IfInMBUFDiscards),
6001 STATS_OFFSET32(stat_FwRxDrop),
6004 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6005 * skipped because of errata.
6007 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6008 8,0,8,8,8,8,8,8,8,8,
6009 4,0,4,4,4,4,4,4,4,4,
6010 4,4,4,4,4,4,4,4,4,4,
6011 4,4,4,4,4,4,4,4,4,4,
6015 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6016 8,0,8,8,8,8,8,8,8,8,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
6019 4,4,4,4,4,4,4,4,4,4,
6023 #define BNX2_NUM_TESTS 6
6026 char string[ETH_GSTRING_LEN];
6027 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6028 { "register_test (offline)" },
6029 { "memory_test (offline)" },
6030 { "loopback_test (offline)" },
6031 { "nvram_test (online)" },
6032 { "interrupt_test (online)" },
6033 { "link_test (online)" },
6037 bnx2_get_sset_count(struct net_device *dev, int sset)
6041 return BNX2_NUM_TESTS;
6043 return BNX2_NUM_STATS;
6050 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6052 struct bnx2 *bp = netdev_priv(dev);
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6062 if (bnx2_test_registers(bp) != 0) {
6064 etest->flags |= ETH_TEST_FL_FAILED;
6066 if (bnx2_test_memory(bp) != 0) {
6068 etest->flags |= ETH_TEST_FL_FAILED;
6070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6071 etest->flags |= ETH_TEST_FL_FAILED;
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6078 bnx2_netif_start(bp);
6081 /* wait for link up */
6082 for (i = 0; i < 7; i++) {
6085 msleep_interruptible(1000);
6089 if (bnx2_test_nvram(bp) != 0) {
6091 etest->flags |= ETH_TEST_FL_FAILED;
6093 if (bnx2_test_intr(bp) != 0) {
6095 etest->flags |= ETH_TEST_FL_FAILED;
6098 if (bnx2_test_link(bp) != 0) {
6100 etest->flags |= ETH_TEST_FL_FAILED;
6106 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6108 switch (stringset) {
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6121 bnx2_get_ethtool_stats(struct net_device *dev,
6122 struct ethtool_stats *stats, u64 *buf)
6124 struct bnx2 *bp = netdev_priv(dev);
6126 u32 *hw_stats = (u32 *) bp->stats_blk;
6127 u8 *stats_len_arr = NULL;
6129 if (hw_stats == NULL) {
6130 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6134 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6135 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6136 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6137 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6138 stats_len_arr = bnx2_5706_stats_len_arr;
6140 stats_len_arr = bnx2_5708_stats_len_arr;
6142 for (i = 0; i < BNX2_NUM_STATS; i++) {
6143 if (stats_len_arr[i] == 0) {
6144 /* skip this counter */
6148 if (stats_len_arr[i] == 4) {
6149 /* 4-byte counter */
6151 *(hw_stats + bnx2_stats_offset_arr[i]);
6154 /* 8-byte counter */
6155 buf[i] = (((u64) *(hw_stats +
6156 bnx2_stats_offset_arr[i])) << 32) +
6157 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6162 bnx2_phys_id(struct net_device *dev, u32 data)
6164 struct bnx2 *bp = netdev_priv(dev);
6171 save = REG_RD(bp, BNX2_MISC_CFG);
6172 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6174 for (i = 0; i < (data * 2); i++) {
6176 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6179 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6180 BNX2_EMAC_LED_1000MB_OVERRIDE |
6181 BNX2_EMAC_LED_100MB_OVERRIDE |
6182 BNX2_EMAC_LED_10MB_OVERRIDE |
6183 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6184 BNX2_EMAC_LED_TRAFFIC);
6186 msleep_interruptible(500);
6187 if (signal_pending(current))
6190 REG_WR(bp, BNX2_EMAC_LED, 0);
6191 REG_WR(bp, BNX2_MISC_CFG, save);
6196 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6198 struct bnx2 *bp = netdev_priv(dev);
6200 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6201 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6203 return (ethtool_op_set_tx_csum(dev, data));
6206 static const struct ethtool_ops bnx2_ethtool_ops = {
6207 .get_settings = bnx2_get_settings,
6208 .set_settings = bnx2_set_settings,
6209 .get_drvinfo = bnx2_get_drvinfo,
6210 .get_regs_len = bnx2_get_regs_len,
6211 .get_regs = bnx2_get_regs,
6212 .get_wol = bnx2_get_wol,
6213 .set_wol = bnx2_set_wol,
6214 .nway_reset = bnx2_nway_reset,
6215 .get_link = ethtool_op_get_link,
6216 .get_eeprom_len = bnx2_get_eeprom_len,
6217 .get_eeprom = bnx2_get_eeprom,
6218 .set_eeprom = bnx2_set_eeprom,
6219 .get_coalesce = bnx2_get_coalesce,
6220 .set_coalesce = bnx2_set_coalesce,
6221 .get_ringparam = bnx2_get_ringparam,
6222 .set_ringparam = bnx2_set_ringparam,
6223 .get_pauseparam = bnx2_get_pauseparam,
6224 .set_pauseparam = bnx2_set_pauseparam,
6225 .get_rx_csum = bnx2_get_rx_csum,
6226 .set_rx_csum = bnx2_set_rx_csum,
6227 .set_tx_csum = bnx2_set_tx_csum,
6228 .set_sg = ethtool_op_set_sg,
6229 .set_tso = bnx2_set_tso,
6230 .self_test = bnx2_self_test,
6231 .get_strings = bnx2_get_strings,
6232 .phys_id = bnx2_phys_id,
6233 .get_ethtool_stats = bnx2_get_ethtool_stats,
6234 .get_sset_count = bnx2_get_sset_count,
6237 /* Called with rtnl_lock */
6239 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6241 struct mii_ioctl_data *data = if_mii(ifr);
6242 struct bnx2 *bp = netdev_priv(dev);
6247 data->phy_id = bp->phy_addr;
6253 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6256 if (!netif_running(dev))
6259 spin_lock_bh(&bp->phy_lock);
6260 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6261 spin_unlock_bh(&bp->phy_lock);
6263 data->val_out = mii_regval;
6269 if (!capable(CAP_NET_ADMIN))
6272 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6275 if (!netif_running(dev))
6278 spin_lock_bh(&bp->phy_lock);
6279 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6280 spin_unlock_bh(&bp->phy_lock);
6291 /* Called with rtnl_lock */
6293 bnx2_change_mac_addr(struct net_device *dev, void *p)
6295 struct sockaddr *addr = p;
6296 struct bnx2 *bp = netdev_priv(dev);
6298 if (!is_valid_ether_addr(addr->sa_data))
6301 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6302 if (netif_running(dev))
6303 bnx2_set_mac_addr(bp);
6308 /* Called with rtnl_lock */
6310 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6312 struct bnx2 *bp = netdev_priv(dev);
6314 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6315 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6319 if (netif_running(dev)) {
6320 bnx2_netif_stop(bp);
6324 bnx2_netif_start(bp);
6329 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6331 poll_bnx2(struct net_device *dev)
6333 struct bnx2 *bp = netdev_priv(dev);
6335 disable_irq(bp->pdev->irq);
6336 bnx2_interrupt(bp->pdev->irq, dev);
6337 enable_irq(bp->pdev->irq);
6341 static void __devinit
6342 bnx2_get_5709_media(struct bnx2 *bp)
6344 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6345 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6348 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6350 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6351 bp->phy_flags |= PHY_SERDES_FLAG;
6355 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6356 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6358 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6360 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6365 bp->phy_flags |= PHY_SERDES_FLAG;
6373 bp->phy_flags |= PHY_SERDES_FLAG;
6379 static void __devinit
6380 bnx2_get_pci_speed(struct bnx2 *bp)
6384 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6385 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6388 bp->flags |= PCIX_FLAG;
6390 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6392 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6395 bp->bus_speed_mhz = 133;
6398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6399 bp->bus_speed_mhz = 100;
6402 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6404 bp->bus_speed_mhz = 66;
6407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6408 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6409 bp->bus_speed_mhz = 50;
6412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6415 bp->bus_speed_mhz = 33;
6420 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6421 bp->bus_speed_mhz = 66;
6423 bp->bus_speed_mhz = 33;
6426 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6427 bp->flags |= PCI_32BIT_FLAG;
6431 static int __devinit
6432 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6435 unsigned long mem_len;
6438 u64 dma_mask, persist_dma_mask;
6440 SET_NETDEV_DEV(dev, &pdev->dev);
6441 bp = netdev_priv(dev);
6446 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6447 rc = pci_enable_device(pdev);
6449 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6455 "Cannot find PCI device base address, aborting.\n");
6457 goto err_out_disable;
6460 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6462 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6463 goto err_out_disable;
6466 pci_set_master(pdev);
6468 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6469 if (bp->pm_cap == 0) {
6471 "Cannot find power management capability, aborting.\n");
6473 goto err_out_release;
6479 spin_lock_init(&bp->phy_lock);
6480 spin_lock_init(&bp->indirect_lock);
6481 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6483 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6484 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6485 dev->mem_end = dev->mem_start + mem_len;
6486 dev->irq = pdev->irq;
6488 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6491 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6493 goto err_out_release;
6496 /* Configure byte swap and enable write to the reg_window registers.
6497 * Rely on CPU to do target byte swapping on big endian systems
6498 * The chip's target access swapping will not swap all accesses
6500 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6501 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6502 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6504 bnx2_set_power_state(bp, PCI_D0);
6506 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6508 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6509 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6511 "Cannot find PCIE capability, aborting.\n");
6515 bp->flags |= PCIE_FLAG;
6517 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6518 if (bp->pcix_cap == 0) {
6520 "Cannot find PCIX capability, aborting.\n");
6526 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6527 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6528 bp->flags |= MSI_CAP_FLAG;
6531 /* 5708 cannot support DMA addresses > 40-bit. */
6532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6533 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6535 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6537 /* Configure DMA attributes. */
6538 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6539 dev->features |= NETIF_F_HIGHDMA;
6540 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6543 "pci_set_consistent_dma_mask failed, aborting.\n");
6546 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6547 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6551 if (!(bp->flags & PCIE_FLAG))
6552 bnx2_get_pci_speed(bp);
6554 /* 5706A0 may falsely detect SERR and PERR. */
6555 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6556 reg = REG_RD(bp, PCI_COMMAND);
6557 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6558 REG_WR(bp, PCI_COMMAND, reg);
6560 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6561 !(bp->flags & PCIX_FLAG)) {
6564 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6568 bnx2_init_nvram(bp);
6570 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6572 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6573 BNX2_SHM_HDR_SIGNATURE_SIG) {
6574 u32 off = PCI_FUNC(pdev->devfn) << 2;
6576 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6578 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6580 /* Get the permanent MAC address. First we need to make sure the
6581 * firmware is actually running.
6583 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6585 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6586 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6587 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6592 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6593 for (i = 0, j = 0; i < 3; i++) {
6596 num = (u8) (reg >> (24 - (i * 8)));
6597 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6598 if (num >= k || !skip0 || k == 1) {
6599 bp->fw_version[j++] = (num / k) + '0';
6604 bp->fw_version[j++] = '.';
6606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6607 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6610 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6611 bp->flags |= ASF_ENABLE_FLAG;
6613 for (i = 0; i < 30; i++) {
6614 reg = REG_RD_IND(bp, bp->shmem_base +
6615 BNX2_BC_STATE_CONDITION);
6616 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6621 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6622 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6623 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6624 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6626 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6628 bp->fw_version[j++] = ' ';
6629 for (i = 0; i < 3; i++) {
6630 reg = REG_RD_IND(bp, addr + i * 4);
6632 memcpy(&bp->fw_version[j], ®, 4);
6637 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6638 bp->mac_addr[0] = (u8) (reg >> 8);
6639 bp->mac_addr[1] = (u8) reg;
6641 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6642 bp->mac_addr[2] = (u8) (reg >> 24);
6643 bp->mac_addr[3] = (u8) (reg >> 16);
6644 bp->mac_addr[4] = (u8) (reg >> 8);
6645 bp->mac_addr[5] = (u8) reg;
6647 bp->tx_ring_size = MAX_TX_DESC_CNT;
6648 bnx2_set_rx_ring_size(bp, 255);
6652 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6654 bp->tx_quick_cons_trip_int = 20;
6655 bp->tx_quick_cons_trip = 20;
6656 bp->tx_ticks_int = 80;
6659 bp->rx_quick_cons_trip_int = 6;
6660 bp->rx_quick_cons_trip = 6;
6661 bp->rx_ticks_int = 18;
6664 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6666 bp->timer_interval = HZ;
6667 bp->current_interval = HZ;
6671 /* Disable WOL support if we are running on a SERDES chip. */
6672 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6673 bnx2_get_5709_media(bp);
6674 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6675 bp->phy_flags |= PHY_SERDES_FLAG;
6677 bp->phy_port = PORT_TP;
6678 if (bp->phy_flags & PHY_SERDES_FLAG) {
6679 bp->phy_port = PORT_FIBRE;
6680 reg = REG_RD_IND(bp, bp->shmem_base +
6681 BNX2_SHARED_HW_CFG_CONFIG);
6682 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6683 bp->flags |= NO_WOL_FLAG;
6686 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6688 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6689 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6691 bnx2_init_remote_phy(bp);
6693 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6694 CHIP_NUM(bp) == CHIP_NUM_5708)
6695 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6696 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6697 (CHIP_REV(bp) == CHIP_REV_Ax ||
6698 CHIP_REV(bp) == CHIP_REV_Bx))
6699 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6701 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6702 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6703 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6704 bp->flags |= NO_WOL_FLAG;
6708 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6709 bp->tx_quick_cons_trip_int =
6710 bp->tx_quick_cons_trip;
6711 bp->tx_ticks_int = bp->tx_ticks;
6712 bp->rx_quick_cons_trip_int =
6713 bp->rx_quick_cons_trip;
6714 bp->rx_ticks_int = bp->rx_ticks;
6715 bp->comp_prod_trip_int = bp->comp_prod_trip;
6716 bp->com_ticks_int = bp->com_ticks;
6717 bp->cmd_ticks_int = bp->cmd_ticks;
6720 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6722 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6723 * with byte enables disabled on the unused 32-bit word. This is legal
6724 * but causes problems on the AMD 8132 which will eventually stop
6725 * responding after a while.
6727 * AMD believes this incompatibility is unique to the 5706, and
6728 * prefers to locally disable MSI rather than globally disabling it.
6730 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6731 struct pci_dev *amd_8132 = NULL;
6733 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6734 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6737 if (amd_8132->revision >= 0x10 &&
6738 amd_8132->revision <= 0x13) {
6740 pci_dev_put(amd_8132);
6746 bnx2_set_default_link(bp);
6747 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6749 init_timer(&bp->timer);
6750 bp->timer.expires = RUN_AT(bp->timer_interval);
6751 bp->timer.data = (unsigned long) bp;
6752 bp->timer.function = bnx2_timer;
6758 iounmap(bp->regview);
6763 pci_release_regions(pdev);
6766 pci_disable_device(pdev);
6767 pci_set_drvdata(pdev, NULL);
6773 static char * __devinit
6774 bnx2_bus_string(struct bnx2 *bp, char *str)
6778 if (bp->flags & PCIE_FLAG) {
6779 s += sprintf(s, "PCI Express");
6781 s += sprintf(s, "PCI");
6782 if (bp->flags & PCIX_FLAG)
6783 s += sprintf(s, "-X");
6784 if (bp->flags & PCI_32BIT_FLAG)
6785 s += sprintf(s, " 32-bit");
6787 s += sprintf(s, " 64-bit");
6788 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6793 static int __devinit
6794 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6796 static int version_printed = 0;
6797 struct net_device *dev = NULL;
6801 DECLARE_MAC_BUF(mac);
6803 if (version_printed++ == 0)
6804 printk(KERN_INFO "%s", version);
6806 /* dev zeroed in init_etherdev */
6807 dev = alloc_etherdev(sizeof(*bp));
6812 rc = bnx2_init_board(pdev, dev);
6818 dev->open = bnx2_open;
6819 dev->hard_start_xmit = bnx2_start_xmit;
6820 dev->stop = bnx2_close;
6821 dev->get_stats = bnx2_get_stats;
6822 dev->set_multicast_list = bnx2_set_rx_mode;
6823 dev->do_ioctl = bnx2_ioctl;
6824 dev->set_mac_address = bnx2_change_mac_addr;
6825 dev->change_mtu = bnx2_change_mtu;
6826 dev->tx_timeout = bnx2_tx_timeout;
6827 dev->watchdog_timeo = TX_TIMEOUT;
6829 dev->vlan_rx_register = bnx2_vlan_rx_register;
6831 dev->ethtool_ops = &bnx2_ethtool_ops;
6833 bp = netdev_priv(dev);
6834 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6836 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6837 dev->poll_controller = poll_bnx2;
6840 pci_set_drvdata(pdev, dev);
6842 memcpy(dev->dev_addr, bp->mac_addr, 6);
6843 memcpy(dev->perm_addr, bp->mac_addr, 6);
6844 bp->name = board_info[ent->driver_data].name;
6846 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6847 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6848 dev->features |= NETIF_F_IPV6_CSUM;
6851 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6853 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6854 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6855 dev->features |= NETIF_F_TSO6;
6857 if ((rc = register_netdev(dev))) {
6858 dev_err(&pdev->dev, "Cannot register net device\n");
6860 iounmap(bp->regview);
6861 pci_release_regions(pdev);
6862 pci_disable_device(pdev);
6863 pci_set_drvdata(pdev, NULL);
6868 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6869 "IRQ %d, node addr %s\n",
6872 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6873 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6874 bnx2_bus_string(bp, str),
6876 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6881 static void __devexit
6882 bnx2_remove_one(struct pci_dev *pdev)
6884 struct net_device *dev = pci_get_drvdata(pdev);
6885 struct bnx2 *bp = netdev_priv(dev);
6887 flush_scheduled_work();
6889 unregister_netdev(dev);
6892 iounmap(bp->regview);
6895 pci_release_regions(pdev);
6896 pci_disable_device(pdev);
6897 pci_set_drvdata(pdev, NULL);
6901 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6903 struct net_device *dev = pci_get_drvdata(pdev);
6904 struct bnx2 *bp = netdev_priv(dev);
6907 /* PCI register 4 needs to be saved whether netif_running() or not.
6908 * MSI address and data need to be saved if using MSI and
6911 pci_save_state(pdev);
6912 if (!netif_running(dev))
6915 flush_scheduled_work();
6916 bnx2_netif_stop(bp);
6917 netif_device_detach(dev);
6918 del_timer_sync(&bp->timer);
6919 if (bp->flags & NO_WOL_FLAG)
6920 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6922 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6924 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6925 bnx2_reset_chip(bp, reset_code);
6927 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6932 bnx2_resume(struct pci_dev *pdev)
6934 struct net_device *dev = pci_get_drvdata(pdev);
6935 struct bnx2 *bp = netdev_priv(dev);
6937 pci_restore_state(pdev);
6938 if (!netif_running(dev))
6941 bnx2_set_power_state(bp, PCI_D0);
6942 netif_device_attach(dev);
6944 bnx2_netif_start(bp);
6948 static struct pci_driver bnx2_pci_driver = {
6949 .name = DRV_MODULE_NAME,
6950 .id_table = bnx2_pci_tbl,
6951 .probe = bnx2_init_one,
6952 .remove = __devexit_p(bnx2_remove_one),
6953 .suspend = bnx2_suspend,
6954 .resume = bnx2_resume,
6957 static int __init bnx2_init(void)
6959 return pci_register_driver(&bnx2_pci_driver);
6962 static void __exit bnx2_cleanup(void)
6964 pci_unregister_driver(&bnx2_pci_driver);
6967 module_init(bnx2_init);
6968 module_exit(bnx2_cleanup);