1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.5"
60 #define DRV_MODULE_RELDATE "April 29, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
507 bp->ctx_blk_mapping[i]);
508 bp->ctx_blk[i] = NULL;
511 if (bp->status_blk) {
512 pci_free_consistent(bp->pdev, bp->status_stats_size,
513 bp->status_blk, bp->status_blk_mapping);
514 bp->status_blk = NULL;
515 bp->stats_blk = NULL;
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
546 bnx2_alloc_mem(struct bnx2 *bp)
548 int i, status_blk_size;
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL)
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 for (i = 0; i < bp->rx_max_ring; i++) {
566 bp->rx_desc_ring[i] =
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
574 if (bp->rx_pg_ring_size) {
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
577 if (bp->rx_pg_ring == NULL)
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
584 for (i = 0; i < bp->rx_max_pg_ring; i++) {
585 bp->rx_pg_desc_ring[i] =
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587 &bp->rx_pg_desc_mapping[i]);
588 if (bp->rx_pg_desc_ring[i] == NULL)
593 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595 if (bp->flags & BNX2_FLAG_MSIX_CAP)
596 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597 BNX2_SBLK_MSIX_ALIGN_SIZE);
598 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block);
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL)
606 memset(bp->status_blk, 0, bp->status_stats_size);
608 bp->bnx2_napi[0].status_blk = bp->status_blk;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
613 bnapi->status_blk_msix = (void *)
614 ((unsigned long) bp->status_blk +
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616 bnapi->int_num = i << 24;
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
625 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627 if (bp->ctx_pages == 0)
629 for (i = 0; i < bp->ctx_pages; i++) {
630 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
632 &bp->ctx_blk_mapping[i]);
633 if (bp->ctx_blk[i] == NULL)
645 bnx2_report_fw_link(struct bnx2 *bp)
647 u32 fw_link_status = 0;
649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 switch (bp->line_speed) {
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_10HALF;
660 fw_link_status = BNX2_LINK_STATUS_10FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_100HALF;
666 fw_link_status = BNX2_LINK_STATUS_100FULL;
669 if (bp->duplex == DUPLEX_HALF)
670 fw_link_status = BNX2_LINK_STATUS_1000HALF;
672 fw_link_status = BNX2_LINK_STATUS_1000FULL;
675 if (bp->duplex == DUPLEX_HALF)
676 fw_link_status = BNX2_LINK_STATUS_2500HALF;
678 fw_link_status = BNX2_LINK_STATUS_2500FULL;
682 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
685 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
690 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
694 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
698 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
700 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
704 bnx2_xceiver_str(struct bnx2 *bp)
706 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
712 bnx2_report_link(struct bnx2 *bp)
715 netif_carrier_on(bp->dev);
716 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717 bnx2_xceiver_str(bp));
719 printk("%d Mbps ", bp->line_speed);
721 if (bp->duplex == DUPLEX_FULL)
722 printk("full duplex");
724 printk("half duplex");
727 if (bp->flow_ctrl & FLOW_CTRL_RX) {
728 printk(", receive ");
729 if (bp->flow_ctrl & FLOW_CTRL_TX)
730 printk("& transmit ");
733 printk(", transmit ");
735 printk("flow control ON");
740 netif_carrier_off(bp->dev);
741 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742 bnx2_xceiver_str(bp));
745 bnx2_report_fw_link(bp);
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
751 u32 local_adv, remote_adv;
754 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
757 if (bp->duplex == DUPLEX_FULL) {
758 bp->flow_ctrl = bp->req_flow_ctrl;
763 if (bp->duplex != DUPLEX_FULL) {
767 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
771 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773 bp->flow_ctrl |= FLOW_CTRL_TX;
774 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775 bp->flow_ctrl |= FLOW_CTRL_RX;
779 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
782 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783 u32 new_local_adv = 0;
784 u32 new_remote_adv = 0;
786 if (local_adv & ADVERTISE_1000XPAUSE)
787 new_local_adv |= ADVERTISE_PAUSE_CAP;
788 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789 new_local_adv |= ADVERTISE_PAUSE_ASYM;
790 if (remote_adv & ADVERTISE_1000XPAUSE)
791 new_remote_adv |= ADVERTISE_PAUSE_CAP;
792 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
795 local_adv = new_local_adv;
796 remote_adv = new_remote_adv;
799 /* See Table 28B-3 of 802.3ab-1999 spec. */
800 if (local_adv & ADVERTISE_PAUSE_CAP) {
801 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802 if (remote_adv & ADVERTISE_PAUSE_CAP) {
803 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
805 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806 bp->flow_ctrl = FLOW_CTRL_RX;
810 if (remote_adv & ADVERTISE_PAUSE_CAP) {
811 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
815 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
819 bp->flow_ctrl = FLOW_CTRL_TX;
825 bnx2_5709s_linkup(struct bnx2 *bp)
831 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
835 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836 bp->line_speed = bp->req_line_speed;
837 bp->duplex = bp->req_duplex;
840 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
842 case MII_BNX2_GP_TOP_AN_SPEED_10:
843 bp->line_speed = SPEED_10;
845 case MII_BNX2_GP_TOP_AN_SPEED_100:
846 bp->line_speed = SPEED_100;
848 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850 bp->line_speed = SPEED_1000;
852 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853 bp->line_speed = SPEED_2500;
856 if (val & MII_BNX2_GP_TOP_AN_FD)
857 bp->duplex = DUPLEX_FULL;
859 bp->duplex = DUPLEX_HALF;
864 bnx2_5708s_linkup(struct bnx2 *bp)
869 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871 case BCM5708S_1000X_STAT1_SPEED_10:
872 bp->line_speed = SPEED_10;
874 case BCM5708S_1000X_STAT1_SPEED_100:
875 bp->line_speed = SPEED_100;
877 case BCM5708S_1000X_STAT1_SPEED_1G:
878 bp->line_speed = SPEED_1000;
880 case BCM5708S_1000X_STAT1_SPEED_2G5:
881 bp->line_speed = SPEED_2500;
884 if (val & BCM5708S_1000X_STAT1_FD)
885 bp->duplex = DUPLEX_FULL;
887 bp->duplex = DUPLEX_HALF;
893 bnx2_5706s_linkup(struct bnx2 *bp)
895 u32 bmcr, local_adv, remote_adv, common;
898 bp->line_speed = SPEED_1000;
900 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
908 if (!(bmcr & BMCR_ANENABLE)) {
912 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
915 common = local_adv & remote_adv;
916 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
918 if (common & ADVERTISE_1000XFULL) {
919 bp->duplex = DUPLEX_FULL;
922 bp->duplex = DUPLEX_HALF;
930 bnx2_copper_linkup(struct bnx2 *bp)
934 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935 if (bmcr & BMCR_ANENABLE) {
936 u32 local_adv, remote_adv, common;
938 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
941 common = local_adv & (remote_adv >> 2);
942 if (common & ADVERTISE_1000FULL) {
943 bp->line_speed = SPEED_1000;
944 bp->duplex = DUPLEX_FULL;
946 else if (common & ADVERTISE_1000HALF) {
947 bp->line_speed = SPEED_1000;
948 bp->duplex = DUPLEX_HALF;
951 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
954 common = local_adv & remote_adv;
955 if (common & ADVERTISE_100FULL) {
956 bp->line_speed = SPEED_100;
957 bp->duplex = DUPLEX_FULL;
959 else if (common & ADVERTISE_100HALF) {
960 bp->line_speed = SPEED_100;
961 bp->duplex = DUPLEX_HALF;
963 else if (common & ADVERTISE_10FULL) {
964 bp->line_speed = SPEED_10;
965 bp->duplex = DUPLEX_FULL;
967 else if (common & ADVERTISE_10HALF) {
968 bp->line_speed = SPEED_10;
969 bp->duplex = DUPLEX_HALF;
978 if (bmcr & BMCR_SPEED100) {
979 bp->line_speed = SPEED_100;
982 bp->line_speed = SPEED_10;
984 if (bmcr & BMCR_FULLDPLX) {
985 bp->duplex = DUPLEX_FULL;
988 bp->duplex = DUPLEX_HALF;
996 bnx2_init_rx_context0(struct bnx2 *bp)
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1005 u32 lo_water, hi_water;
1007 if (bp->flow_ctrl & FLOW_CTRL_TX)
1008 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1010 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1011 if (lo_water >= bp->rx_ring_size)
1014 hi_water = bp->rx_ring_size / 4;
1016 if (hi_water <= lo_water)
1019 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1020 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1024 else if (hi_water == 0)
1026 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1032 bnx2_set_mac_link(struct bnx2 *bp)
1036 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1037 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1038 (bp->duplex == DUPLEX_HALF)) {
1039 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1042 /* Configure the EMAC mode register. */
1043 val = REG_RD(bp, BNX2_EMAC_MODE);
1045 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1046 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1047 BNX2_EMAC_MODE_25G_MODE);
1050 switch (bp->line_speed) {
1052 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1053 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1058 val |= BNX2_EMAC_MODE_PORT_MII;
1061 val |= BNX2_EMAC_MODE_25G_MODE;
1064 val |= BNX2_EMAC_MODE_PORT_GMII;
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1072 /* Set the MAC to operate in the appropriate duplex mode. */
1073 if (bp->duplex == DUPLEX_HALF)
1074 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1075 REG_WR(bp, BNX2_EMAC_MODE, val);
1077 /* Enable/disable rx PAUSE. */
1078 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1080 if (bp->flow_ctrl & FLOW_CTRL_RX)
1081 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1082 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1084 /* Enable/disable tx PAUSE. */
1085 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1086 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1088 if (bp->flow_ctrl & FLOW_CTRL_TX)
1089 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1090 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1092 /* Acknowledge the interrupt. */
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp);
1102 bnx2_enable_bmsr1(struct bnx2 *bp)
1104 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1105 (CHIP_NUM(bp) == CHIP_NUM_5709))
1106 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1107 MII_BNX2_BLK_ADDR_GP_STATUS);
1111 bnx2_disable_bmsr1(struct bnx2 *bp)
1113 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1114 (CHIP_NUM(bp) == CHIP_NUM_5709))
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1116 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1125 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1128 if (bp->autoneg & AUTONEG_SPEED)
1129 bp->advertising |= ADVERTISED_2500baseX_Full;
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1132 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1134 bnx2_read_phy(bp, bp->mii_up1, &up1);
1135 if (!(up1 & BCM5708S_UP1_2G5)) {
1136 up1 |= BCM5708S_UP1_2G5;
1137 bnx2_write_phy(bp, bp->mii_up1, up1);
1141 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1142 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1143 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1149 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1154 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1158 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1160 bnx2_read_phy(bp, bp->mii_up1, &up1);
1161 if (up1 & BCM5708S_UP1_2G5) {
1162 up1 &= ~BCM5708S_UP1_2G5;
1163 bnx2_write_phy(bp, bp->mii_up1, up1);
1167 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1169 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1175 bnx2_enable_forced_2g5(struct bnx2 *bp)
1179 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1182 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1186 MII_BNX2_BLK_ADDR_SERDES_DIG);
1187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1188 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1189 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1193 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1197 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1198 bmcr |= BCM5708S_BMCR_FORCE_2500;
1201 if (bp->autoneg & AUTONEG_SPEED) {
1202 bmcr &= ~BMCR_ANENABLE;
1203 if (bp->req_duplex == DUPLEX_FULL)
1204 bmcr |= BMCR_FULLDPLX;
1206 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1210 bnx2_disable_forced_2g5(struct bnx2 *bp)
1214 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1217 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221 MII_BNX2_BLK_ADDR_SERDES_DIG);
1222 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1223 val &= ~MII_BNX2_SD_MISC1_FORCE;
1224 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1227 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1228 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1230 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1231 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1232 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1237 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1241 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1245 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1246 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1248 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1250 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1254 bnx2_set_link(struct bnx2 *bp)
1259 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1267 link_up = bp->link_up;
1269 bnx2_enable_bmsr1(bp);
1270 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1272 bnx2_disable_bmsr1(bp);
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1290 bmsr |= BMSR_LSTATUS;
1292 bmsr &= ~BMSR_LSTATUS;
1295 if (bmsr & BMSR_LSTATUS) {
1298 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1299 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1300 bnx2_5706s_linkup(bp);
1301 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1302 bnx2_5708s_linkup(bp);
1303 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1304 bnx2_5709s_linkup(bp);
1307 bnx2_copper_linkup(bp);
1309 bnx2_resolve_flow_ctrl(bp);
1312 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1313 (bp->autoneg & AUTONEG_SPEED))
1314 bnx2_disable_forced_2g5(bp);
1316 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1319 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1320 bmcr |= BMCR_ANENABLE;
1321 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1323 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1328 if (bp->link_up != link_up) {
1329 bnx2_report_link(bp);
1332 bnx2_set_mac_link(bp);
1338 bnx2_reset_phy(struct bnx2 *bp)
1343 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1345 #define PHY_RESET_MAX_WAIT 100
1346 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1349 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1350 if (!(reg & BMCR_RESET)) {
1355 if (i == PHY_RESET_MAX_WAIT) {
1362 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1366 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1367 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1369 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1370 adv = ADVERTISE_1000XPAUSE;
1373 adv = ADVERTISE_PAUSE_CAP;
1376 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1378 adv = ADVERTISE_1000XPSE_ASYM;
1381 adv = ADVERTISE_PAUSE_ASYM;
1384 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1386 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1389 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1395 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1398 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1400 u32 speed_arg = 0, pause_adv;
1402 pause_adv = bnx2_phy_get_pause_adv(bp);
1404 if (bp->autoneg & AUTONEG_SPEED) {
1405 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1406 if (bp->advertising & ADVERTISED_10baseT_Half)
1407 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1408 if (bp->advertising & ADVERTISED_10baseT_Full)
1409 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1410 if (bp->advertising & ADVERTISED_100baseT_Half)
1411 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1412 if (bp->advertising & ADVERTISED_100baseT_Full)
1413 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1414 if (bp->advertising & ADVERTISED_1000baseT_Full)
1415 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1416 if (bp->advertising & ADVERTISED_2500baseX_Full)
1417 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1419 if (bp->req_line_speed == SPEED_2500)
1420 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1421 else if (bp->req_line_speed == SPEED_1000)
1422 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1423 else if (bp->req_line_speed == SPEED_100) {
1424 if (bp->req_duplex == DUPLEX_FULL)
1425 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1428 } else if (bp->req_line_speed == SPEED_10) {
1429 if (bp->req_duplex == DUPLEX_FULL)
1430 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1432 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1436 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1437 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1438 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1439 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1441 if (port == PORT_TP)
1442 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1443 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1447 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1449 spin_lock_bh(&bp->phy_lock);
1455 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1460 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1461 return (bnx2_setup_remote_phy(bp, port));
1463 if (!(bp->autoneg & AUTONEG_SPEED)) {
1465 int force_link_down = 0;
1467 if (bp->req_line_speed == SPEED_2500) {
1468 if (!bnx2_test_and_enable_2g5(bp))
1469 force_link_down = 1;
1470 } else if (bp->req_line_speed == SPEED_1000) {
1471 if (bnx2_test_and_disable_2g5(bp))
1472 force_link_down = 1;
1474 bnx2_read_phy(bp, bp->mii_adv, &adv);
1475 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 new_bmcr = bmcr & ~BMCR_ANENABLE;
1479 new_bmcr |= BMCR_SPEED1000;
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482 if (bp->req_line_speed == SPEED_2500)
1483 bnx2_enable_forced_2g5(bp);
1484 else if (bp->req_line_speed == SPEED_1000) {
1485 bnx2_disable_forced_2g5(bp);
1486 new_bmcr &= ~0x2000;
1489 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1490 if (bp->req_line_speed == SPEED_2500)
1491 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1493 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1496 if (bp->req_duplex == DUPLEX_FULL) {
1497 adv |= ADVERTISE_1000XFULL;
1498 new_bmcr |= BMCR_FULLDPLX;
1501 adv |= ADVERTISE_1000XHALF;
1502 new_bmcr &= ~BMCR_FULLDPLX;
1504 if ((new_bmcr != bmcr) || (force_link_down)) {
1505 /* Force a link down visible on the other side */
1507 bnx2_write_phy(bp, bp->mii_adv, adv &
1508 ~(ADVERTISE_1000XFULL |
1509 ADVERTISE_1000XHALF));
1510 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1511 BMCR_ANRESTART | BMCR_ANENABLE);
1514 netif_carrier_off(bp->dev);
1515 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1516 bnx2_report_link(bp);
1518 bnx2_write_phy(bp, bp->mii_adv, adv);
1519 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1521 bnx2_resolve_flow_ctrl(bp);
1522 bnx2_set_mac_link(bp);
1527 bnx2_test_and_enable_2g5(bp);
1529 if (bp->advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= ADVERTISE_1000XFULL;
1532 new_adv |= bnx2_phy_get_pause_adv(bp);
1534 bnx2_read_phy(bp, bp->mii_adv, &adv);
1535 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1537 bp->serdes_an_pending = 0;
1538 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1539 /* Force a link down visible on the other side */
1541 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1542 spin_unlock_bh(&bp->phy_lock);
1544 spin_lock_bh(&bp->phy_lock);
1547 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1548 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1550 /* Speed up link-up time when the link partner
1551 * does not autonegotiate which is very common
1552 * in blade servers. Some blade servers use
1553 * IPMI for kerboard input and it's important
1554 * to minimize link disruptions. Autoneg. involves
1555 * exchanging base pages plus 3 next pages and
1556 * normally completes in about 120 msec.
1558 bp->current_interval = SERDES_AN_TIMEOUT;
1559 bp->serdes_an_pending = 1;
1560 mod_timer(&bp->timer, jiffies + bp->current_interval);
1562 bnx2_resolve_flow_ctrl(bp);
1563 bnx2_set_mac_link(bp);
1569 #define ETHTOOL_ALL_FIBRE_SPEED \
1570 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1571 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1572 (ADVERTISED_1000baseT_Full)
1574 #define ETHTOOL_ALL_COPPER_SPEED \
1575 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1576 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1577 ADVERTISED_1000baseT_Full)
1579 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1580 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1582 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1585 bnx2_set_default_remote_link(struct bnx2 *bp)
1589 if (bp->phy_port == PORT_TP)
1590 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1592 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1594 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1595 bp->req_line_speed = 0;
1596 bp->autoneg |= AUTONEG_SPEED;
1597 bp->advertising = ADVERTISED_Autoneg;
1598 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1599 bp->advertising |= ADVERTISED_10baseT_Half;
1600 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1601 bp->advertising |= ADVERTISED_10baseT_Full;
1602 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1603 bp->advertising |= ADVERTISED_100baseT_Half;
1604 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1605 bp->advertising |= ADVERTISED_100baseT_Full;
1606 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1607 bp->advertising |= ADVERTISED_1000baseT_Full;
1608 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1609 bp->advertising |= ADVERTISED_2500baseX_Full;
1612 bp->advertising = 0;
1613 bp->req_duplex = DUPLEX_FULL;
1614 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1615 bp->req_line_speed = SPEED_10;
1616 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1617 bp->req_duplex = DUPLEX_HALF;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1620 bp->req_line_speed = SPEED_100;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1622 bp->req_duplex = DUPLEX_HALF;
1624 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1625 bp->req_line_speed = SPEED_1000;
1626 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1627 bp->req_line_speed = SPEED_2500;
1632 bnx2_set_default_link(struct bnx2 *bp)
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 bnx2_set_default_remote_link(bp);
1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1640 bp->req_line_speed = 0;
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1646 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1647 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1648 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1650 bp->req_line_speed = bp->line_speed = SPEED_1000;
1651 bp->req_duplex = DUPLEX_FULL;
1654 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1658 bnx2_send_heart_beat(struct bnx2 *bp)
1663 spin_lock(&bp->indirect_lock);
1664 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1665 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1666 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1667 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1668 spin_unlock(&bp->indirect_lock);
1672 bnx2_remote_phy_event(struct bnx2 *bp)
1675 u8 link_up = bp->link_up;
1678 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1680 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1681 bnx2_send_heart_beat(bp);
1683 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1685 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1691 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1692 bp->duplex = DUPLEX_FULL;
1694 case BNX2_LINK_STATUS_10HALF:
1695 bp->duplex = DUPLEX_HALF;
1696 case BNX2_LINK_STATUS_10FULL:
1697 bp->line_speed = SPEED_10;
1699 case BNX2_LINK_STATUS_100HALF:
1700 bp->duplex = DUPLEX_HALF;
1701 case BNX2_LINK_STATUS_100BASE_T4:
1702 case BNX2_LINK_STATUS_100FULL:
1703 bp->line_speed = SPEED_100;
1705 case BNX2_LINK_STATUS_1000HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_1000FULL:
1708 bp->line_speed = SPEED_1000;
1710 case BNX2_LINK_STATUS_2500HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_2500FULL:
1713 bp->line_speed = SPEED_2500;
1721 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1722 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1723 if (bp->duplex == DUPLEX_FULL)
1724 bp->flow_ctrl = bp->req_flow_ctrl;
1726 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1727 bp->flow_ctrl |= FLOW_CTRL_TX;
1728 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1729 bp->flow_ctrl |= FLOW_CTRL_RX;
1732 old_port = bp->phy_port;
1733 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1734 bp->phy_port = PORT_FIBRE;
1736 bp->phy_port = PORT_TP;
1738 if (old_port != bp->phy_port)
1739 bnx2_set_default_link(bp);
1742 if (bp->link_up != link_up)
1743 bnx2_report_link(bp);
1745 bnx2_set_mac_link(bp);
1749 bnx2_set_remote_link(struct bnx2 *bp)
1753 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1755 case BNX2_FW_EVT_CODE_LINK_EVENT:
1756 bnx2_remote_phy_event(bp);
1758 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1760 bnx2_send_heart_beat(bp);
1767 bnx2_setup_copper_phy(struct bnx2 *bp)
1772 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1774 if (bp->autoneg & AUTONEG_SPEED) {
1775 u32 adv_reg, adv1000_reg;
1776 u32 new_adv_reg = 0;
1777 u32 new_adv1000_reg = 0;
1779 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1780 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1781 ADVERTISE_PAUSE_ASYM);
1783 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1784 adv1000_reg &= PHY_ALL_1000_SPEED;
1786 if (bp->advertising & ADVERTISED_10baseT_Half)
1787 new_adv_reg |= ADVERTISE_10HALF;
1788 if (bp->advertising & ADVERTISED_10baseT_Full)
1789 new_adv_reg |= ADVERTISE_10FULL;
1790 if (bp->advertising & ADVERTISED_100baseT_Half)
1791 new_adv_reg |= ADVERTISE_100HALF;
1792 if (bp->advertising & ADVERTISED_100baseT_Full)
1793 new_adv_reg |= ADVERTISE_100FULL;
1794 if (bp->advertising & ADVERTISED_1000baseT_Full)
1795 new_adv1000_reg |= ADVERTISE_1000FULL;
1797 new_adv_reg |= ADVERTISE_CSMA;
1799 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1801 if ((adv1000_reg != new_adv1000_reg) ||
1802 (adv_reg != new_adv_reg) ||
1803 ((bmcr & BMCR_ANENABLE) == 0)) {
1805 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1806 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1807 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1810 else if (bp->link_up) {
1811 /* Flow ctrl may have changed from auto to forced */
1812 /* or vice-versa. */
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
1821 if (bp->req_line_speed == SPEED_100) {
1822 new_bmcr |= BMCR_SPEED100;
1824 if (bp->req_duplex == DUPLEX_FULL) {
1825 new_bmcr |= BMCR_FULLDPLX;
1827 if (new_bmcr != bmcr) {
1830 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1831 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1833 if (bmsr & BMSR_LSTATUS) {
1834 /* Force link down */
1835 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1836 spin_unlock_bh(&bp->phy_lock);
1838 spin_lock_bh(&bp->phy_lock);
1840 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1841 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1844 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1846 /* Normally, the new speed is setup after the link has
1847 * gone down and up again. In some cases, link will not go
1848 * down so we need to set up the new speed here.
1850 if (bmsr & BMSR_LSTATUS) {
1851 bp->line_speed = bp->req_line_speed;
1852 bp->duplex = bp->req_duplex;
1853 bnx2_resolve_flow_ctrl(bp);
1854 bnx2_set_mac_link(bp);
1857 bnx2_resolve_flow_ctrl(bp);
1858 bnx2_set_mac_link(bp);
1864 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1866 if (bp->loopback == MAC_LOOPBACK)
1869 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1870 return (bnx2_setup_serdes_phy(bp, port));
1873 return (bnx2_setup_copper_phy(bp));
1878 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1882 bp->mii_bmcr = MII_BMCR + 0x10;
1883 bp->mii_bmsr = MII_BMSR + 0x10;
1884 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1885 bp->mii_adv = MII_ADVERTISE + 0x10;
1886 bp->mii_lpa = MII_LPA + 0x10;
1887 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1889 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1896 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1898 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1899 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1900 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1901 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1903 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1904 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1905 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1906 val |= BCM5708S_UP1_2G5;
1908 val &= ~BCM5708S_UP1_2G5;
1909 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1911 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1912 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1913 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1914 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1916 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1918 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1919 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1920 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1922 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1928 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1935 bp->mii_up1 = BCM5708S_UP1;
1937 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1938 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1939 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1941 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1942 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1943 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1945 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1946 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1947 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1949 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1950 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1951 val |= BCM5708S_UP1_2G5;
1952 bnx2_write_phy(bp, BCM5708S_UP1, val);
1955 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1956 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1957 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1958 /* increase tx signal amplitude */
1959 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1960 BCM5708S_BLK_ADDR_TX_MISC);
1961 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1962 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1963 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1964 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1967 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1968 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1973 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1974 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1975 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1976 BCM5708S_BLK_ADDR_TX_MISC);
1977 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1978 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1979 BCM5708S_BLK_ADDR_DIG);
1986 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
1991 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1993 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1994 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1996 if (bp->dev->mtu > 1500) {
1999 /* Set extended packet length bit */
2000 bnx2_write_phy(bp, 0x18, 0x7);
2001 bnx2_read_phy(bp, 0x18, &val);
2002 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2004 bnx2_write_phy(bp, 0x1c, 0x6c00);
2005 bnx2_read_phy(bp, 0x1c, &val);
2006 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2011 bnx2_write_phy(bp, 0x18, 0x7);
2012 bnx2_read_phy(bp, 0x18, &val);
2013 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2015 bnx2_write_phy(bp, 0x1c, 0x6c00);
2016 bnx2_read_phy(bp, 0x1c, &val);
2017 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2024 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2031 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2032 bnx2_write_phy(bp, 0x18, 0x0c00);
2033 bnx2_write_phy(bp, 0x17, 0x000a);
2034 bnx2_write_phy(bp, 0x15, 0x310b);
2035 bnx2_write_phy(bp, 0x17, 0x201f);
2036 bnx2_write_phy(bp, 0x15, 0x9506);
2037 bnx2_write_phy(bp, 0x17, 0x401f);
2038 bnx2_write_phy(bp, 0x15, 0x14e2);
2039 bnx2_write_phy(bp, 0x18, 0x0400);
2042 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2043 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2044 MII_BNX2_DSP_EXPAND_REG | 0x8);
2045 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2047 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2050 if (bp->dev->mtu > 1500) {
2051 /* Set extended packet length bit */
2052 bnx2_write_phy(bp, 0x18, 0x7);
2053 bnx2_read_phy(bp, 0x18, &val);
2054 bnx2_write_phy(bp, 0x18, val | 0x4000);
2056 bnx2_read_phy(bp, 0x10, &val);
2057 bnx2_write_phy(bp, 0x10, val | 0x1);
2060 bnx2_write_phy(bp, 0x18, 0x7);
2061 bnx2_read_phy(bp, 0x18, &val);
2062 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2064 bnx2_read_phy(bp, 0x10, &val);
2065 bnx2_write_phy(bp, 0x10, val & ~0x1);
2068 /* ethernet@wirespeed */
2069 bnx2_write_phy(bp, 0x18, 0x7007);
2070 bnx2_read_phy(bp, 0x18, &val);
2071 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2077 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2082 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2083 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2085 bp->mii_bmcr = MII_BMCR;
2086 bp->mii_bmsr = MII_BMSR;
2087 bp->mii_bmsr1 = MII_BMSR;
2088 bp->mii_adv = MII_ADVERTISE;
2089 bp->mii_lpa = MII_LPA;
2091 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2093 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2096 bnx2_read_phy(bp, MII_PHYSID1, &val);
2097 bp->phy_id = val << 16;
2098 bnx2_read_phy(bp, MII_PHYSID2, &val);
2099 bp->phy_id |= val & 0xffff;
2101 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103 rc = bnx2_init_5706s_phy(bp, reset_phy);
2104 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2105 rc = bnx2_init_5708s_phy(bp, reset_phy);
2106 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2107 rc = bnx2_init_5709s_phy(bp, reset_phy);
2110 rc = bnx2_init_copper_phy(bp, reset_phy);
2115 rc = bnx2_setup_phy(bp, bp->phy_port);
2121 bnx2_set_mac_loopback(struct bnx2 *bp)
2125 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2126 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2127 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2128 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2133 static int bnx2_test_link(struct bnx2 *);
2136 bnx2_set_phy_loopback(struct bnx2 *bp)
2141 spin_lock_bh(&bp->phy_lock);
2142 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2144 spin_unlock_bh(&bp->phy_lock);
2148 for (i = 0; i < 10; i++) {
2149 if (bnx2_test_link(bp) == 0)
2154 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2155 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2156 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2157 BNX2_EMAC_MODE_25G_MODE);
2159 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2160 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2166 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2172 msg_data |= bp->fw_wr_seq;
2174 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2176 /* wait for an acknowledgement. */
2177 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2180 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2182 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2185 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2188 /* If we timed out, inform the firmware that this is the case. */
2189 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2191 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2194 msg_data &= ~BNX2_DRV_MSG_CODE;
2195 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2197 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2202 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2209 bnx2_init_5709_context(struct bnx2 *bp)
2214 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2215 val |= (BCM_PAGE_BITS - 8) << 16;
2216 REG_WR(bp, BNX2_CTX_COMMAND, val);
2217 for (i = 0; i < 10; i++) {
2218 val = REG_RD(bp, BNX2_CTX_COMMAND);
2219 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2223 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2226 for (i = 0; i < bp->ctx_pages; i++) {
2230 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2234 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2235 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2236 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2237 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2238 (u64) bp->ctx_blk_mapping[i] >> 32);
2239 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2240 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2241 for (j = 0; j < 10; j++) {
2243 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2244 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2248 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2257 bnx2_init_context(struct bnx2 *bp)
2263 u32 vcid_addr, pcid_addr, offset;
2268 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2271 vcid_addr = GET_PCID_ADDR(vcid);
2273 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2278 pcid_addr = GET_PCID_ADDR(new_vcid);
2281 vcid_addr = GET_CID_ADDR(vcid);
2282 pcid_addr = vcid_addr;
2285 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2286 vcid_addr += (i << PHY_CTX_SHIFT);
2287 pcid_addr += (i << PHY_CTX_SHIFT);
2289 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2290 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2292 /* Zero out the context. */
2293 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2294 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2300 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2306 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2307 if (good_mbuf == NULL) {
2308 printk(KERN_ERR PFX "Failed to allocate memory in "
2309 "bnx2_alloc_bad_rbuf\n");
2313 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2314 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2318 /* Allocate a bunch of mbufs and save the good ones in an array. */
2319 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2320 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2321 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2322 BNX2_RBUF_COMMAND_ALLOC_REQ);
2324 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2326 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2328 /* The addresses with Bit 9 set are bad memory blocks. */
2329 if (!(val & (1 << 9))) {
2330 good_mbuf[good_mbuf_cnt] = (u16) val;
2334 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2337 /* Free the good ones back to the mbuf pool thus discarding
2338 * all the bad ones. */
2339 while (good_mbuf_cnt) {
2342 val = good_mbuf[good_mbuf_cnt];
2343 val = (val << 9) | val | 1;
2345 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2352 bnx2_set_mac_addr(struct bnx2 *bp)
2355 u8 *mac_addr = bp->dev->dev_addr;
2357 val = (mac_addr[0] << 8) | mac_addr[1];
2359 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2361 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2362 (mac_addr[4] << 8) | mac_addr[5];
2364 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2368 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2371 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2372 struct rx_bd *rxbd =
2373 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2374 struct page *page = alloc_page(GFP_ATOMIC);
2378 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2379 PCI_DMA_FROMDEVICE);
2381 pci_unmap_addr_set(rx_pg, mapping, mapping);
2382 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2383 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2388 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2390 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2391 struct page *page = rx_pg->page;
2396 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2397 PCI_DMA_FROMDEVICE);
2404 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2406 struct sk_buff *skb;
2407 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2409 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2410 unsigned long align;
2412 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2417 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2418 skb_reserve(skb, BNX2_RX_ALIGN - align);
2420 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2421 PCI_DMA_FROMDEVICE);
2424 pci_unmap_addr_set(rx_buf, mapping, mapping);
2426 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2427 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2429 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2435 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2437 struct status_block *sblk = bnapi->status_blk;
2438 u32 new_link_state, old_link_state;
2441 new_link_state = sblk->status_attn_bits & event;
2442 old_link_state = sblk->status_attn_bits_ack & event;
2443 if (new_link_state != old_link_state) {
2445 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2447 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2455 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2457 spin_lock(&bp->phy_lock);
2459 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2461 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2462 bnx2_set_remote_link(bp);
2464 spin_unlock(&bp->phy_lock);
2469 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2473 if (bnapi->int_num == 0)
2474 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2476 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2478 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2484 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2486 u16 hw_cons, sw_cons, sw_ring_cons;
2489 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2490 sw_cons = bnapi->tx_cons;
2492 while (sw_cons != hw_cons) {
2493 struct sw_bd *tx_buf;
2494 struct sk_buff *skb;
2497 sw_ring_cons = TX_RING_IDX(sw_cons);
2499 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2502 /* partial BD completions possible with TSO packets */
2503 if (skb_is_gso(skb)) {
2504 u16 last_idx, last_ring_idx;
2506 last_idx = sw_cons +
2507 skb_shinfo(skb)->nr_frags + 1;
2508 last_ring_idx = sw_ring_cons +
2509 skb_shinfo(skb)->nr_frags + 1;
2510 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2513 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2518 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2519 skb_headlen(skb), PCI_DMA_TODEVICE);
2522 last = skb_shinfo(skb)->nr_frags;
2524 for (i = 0; i < last; i++) {
2525 sw_cons = NEXT_TX_BD(sw_cons);
2527 pci_unmap_page(bp->pdev,
2529 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2531 skb_shinfo(skb)->frags[i].size,
2535 sw_cons = NEXT_TX_BD(sw_cons);
2539 if (tx_pkt == budget)
2542 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2545 bnapi->hw_tx_cons = hw_cons;
2546 bnapi->tx_cons = sw_cons;
2547 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2548 * before checking for netif_queue_stopped(). Without the
2549 * memory barrier, there is a small possibility that bnx2_start_xmit()
2550 * will miss it and cause the queue to be stopped forever.
2554 if (unlikely(netif_queue_stopped(bp->dev)) &&
2555 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2556 netif_tx_lock(bp->dev);
2557 if ((netif_queue_stopped(bp->dev)) &&
2558 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2559 netif_wake_queue(bp->dev);
2560 netif_tx_unlock(bp->dev);
2566 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2567 struct sk_buff *skb, int count)
2569 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2570 struct rx_bd *cons_bd, *prod_bd;
2573 u16 hw_prod = bnapi->rx_pg_prod, prod;
2574 u16 cons = bnapi->rx_pg_cons;
2576 for (i = 0; i < count; i++) {
2577 prod = RX_PG_RING_IDX(hw_prod);
2579 prod_rx_pg = &bp->rx_pg_ring[prod];
2580 cons_rx_pg = &bp->rx_pg_ring[cons];
2581 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2582 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2584 if (i == 0 && skb) {
2586 struct skb_shared_info *shinfo;
2588 shinfo = skb_shinfo(skb);
2590 page = shinfo->frags[shinfo->nr_frags].page;
2591 shinfo->frags[shinfo->nr_frags].page = NULL;
2592 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2593 PCI_DMA_FROMDEVICE);
2594 cons_rx_pg->page = page;
2595 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2599 prod_rx_pg->page = cons_rx_pg->page;
2600 cons_rx_pg->page = NULL;
2601 pci_unmap_addr_set(prod_rx_pg, mapping,
2602 pci_unmap_addr(cons_rx_pg, mapping));
2604 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2605 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2608 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2609 hw_prod = NEXT_RX_BD(hw_prod);
2611 bnapi->rx_pg_prod = hw_prod;
2612 bnapi->rx_pg_cons = cons;
2616 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2619 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2620 struct rx_bd *cons_bd, *prod_bd;
2622 cons_rx_buf = &bp->rx_buf_ring[cons];
2623 prod_rx_buf = &bp->rx_buf_ring[prod];
2625 pci_dma_sync_single_for_device(bp->pdev,
2626 pci_unmap_addr(cons_rx_buf, mapping),
2627 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2629 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2631 prod_rx_buf->skb = skb;
2636 pci_unmap_addr_set(prod_rx_buf, mapping,
2637 pci_unmap_addr(cons_rx_buf, mapping));
2639 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2640 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2641 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2642 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2646 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2647 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2651 u16 prod = ring_idx & 0xffff;
2653 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2654 if (unlikely(err)) {
2655 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2657 unsigned int raw_len = len + 4;
2658 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2660 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2665 skb_reserve(skb, bp->rx_offset);
2666 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2667 PCI_DMA_FROMDEVICE);
2673 unsigned int i, frag_len, frag_size, pages;
2674 struct sw_pg *rx_pg;
2675 u16 pg_cons = bnapi->rx_pg_cons;
2676 u16 pg_prod = bnapi->rx_pg_prod;
2678 frag_size = len + 4 - hdr_len;
2679 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2680 skb_put(skb, hdr_len);
2682 for (i = 0; i < pages; i++) {
2683 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2684 if (unlikely(frag_len <= 4)) {
2685 unsigned int tail = 4 - frag_len;
2687 bnapi->rx_pg_cons = pg_cons;
2688 bnapi->rx_pg_prod = pg_prod;
2689 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2696 &skb_shinfo(skb)->frags[i - 1];
2698 skb->data_len -= tail;
2699 skb->truesize -= tail;
2703 rx_pg = &bp->rx_pg_ring[pg_cons];
2705 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2706 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2711 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2714 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2715 if (unlikely(err)) {
2716 bnapi->rx_pg_cons = pg_cons;
2717 bnapi->rx_pg_prod = pg_prod;
2718 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2723 frag_size -= frag_len;
2724 skb->data_len += frag_len;
2725 skb->truesize += frag_len;
2726 skb->len += frag_len;
2728 pg_prod = NEXT_RX_BD(pg_prod);
2729 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2731 bnapi->rx_pg_prod = pg_prod;
2732 bnapi->rx_pg_cons = pg_cons;
2738 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2740 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2742 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2748 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2750 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2751 struct l2_fhdr *rx_hdr;
2752 int rx_pkt = 0, pg_ring_used = 0;
2754 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2755 sw_cons = bnapi->rx_cons;
2756 sw_prod = bnapi->rx_prod;
2758 /* Memory barrier necessary as speculative reads of the rx
2759 * buffer can be ahead of the index in the status block
2762 while (sw_cons != hw_cons) {
2763 unsigned int len, hdr_len;
2765 struct sw_bd *rx_buf;
2766 struct sk_buff *skb;
2767 dma_addr_t dma_addr;
2769 sw_ring_cons = RX_RING_IDX(sw_cons);
2770 sw_ring_prod = RX_RING_IDX(sw_prod);
2772 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2777 dma_addr = pci_unmap_addr(rx_buf, mapping);
2779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2780 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2782 rx_hdr = (struct l2_fhdr *) skb->data;
2783 len = rx_hdr->l2_fhdr_pkt_len;
2785 if ((status = rx_hdr->l2_fhdr_status) &
2786 (L2_FHDR_ERRORS_BAD_CRC |
2787 L2_FHDR_ERRORS_PHY_DECODE |
2788 L2_FHDR_ERRORS_ALIGNMENT |
2789 L2_FHDR_ERRORS_TOO_SHORT |
2790 L2_FHDR_ERRORS_GIANT_FRAME)) {
2792 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2797 if (status & L2_FHDR_STATUS_SPLIT) {
2798 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2800 } else if (len > bp->rx_jumbo_thresh) {
2801 hdr_len = bp->rx_jumbo_thresh;
2807 if (len <= bp->rx_copy_thresh) {
2808 struct sk_buff *new_skb;
2810 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2811 if (new_skb == NULL) {
2812 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2818 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2819 new_skb->data, len + 2);
2820 skb_reserve(new_skb, 2);
2821 skb_put(new_skb, len);
2823 bnx2_reuse_rx_skb(bp, bnapi, skb,
2824 sw_ring_cons, sw_ring_prod);
2827 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2828 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2831 skb->protocol = eth_type_trans(skb, bp->dev);
2833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2834 (ntohs(skb->protocol) != 0x8100)) {
2841 skb->ip_summed = CHECKSUM_NONE;
2843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2848 skb->ip_summed = CHECKSUM_UNNECESSARY;
2852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2854 rx_hdr->l2_fhdr_vlan_tag);
2858 netif_receive_skb(skb);
2860 bp->dev->last_rx = jiffies;
2864 sw_cons = NEXT_RX_BD(sw_cons);
2865 sw_prod = NEXT_RX_BD(sw_prod);
2867 if ((rx_pkt == budget))
2870 /* Refresh hw_cons to see if there is new work */
2871 if (sw_cons == hw_cons) {
2872 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2876 bnapi->rx_cons = sw_cons;
2877 bnapi->rx_prod = sw_prod;
2880 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2883 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2885 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2893 /* MSI ISR - The only difference between this and the INTx ISR
2894 * is that the MSI interrupt is always serviced.
2897 bnx2_msi(int irq, void *dev_instance)
2899 struct net_device *dev = dev_instance;
2900 struct bnx2 *bp = netdev_priv(dev);
2901 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2903 prefetch(bnapi->status_blk);
2904 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2905 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2906 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2908 /* Return here if interrupt is disabled. */
2909 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2912 netif_rx_schedule(dev, &bnapi->napi);
2918 bnx2_msi_1shot(int irq, void *dev_instance)
2920 struct net_device *dev = dev_instance;
2921 struct bnx2 *bp = netdev_priv(dev);
2922 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2924 prefetch(bnapi->status_blk);
2926 /* Return here if interrupt is disabled. */
2927 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2930 netif_rx_schedule(dev, &bnapi->napi);
2936 bnx2_interrupt(int irq, void *dev_instance)
2938 struct net_device *dev = dev_instance;
2939 struct bnx2 *bp = netdev_priv(dev);
2940 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2941 struct status_block *sblk = bnapi->status_blk;
2943 /* When using INTx, it is possible for the interrupt to arrive
2944 * at the CPU before the status block posted prior to the
2945 * interrupt. Reading a register will flush the status block.
2946 * When using MSI, the MSI message will always complete after
2947 * the status block write.
2949 if ((sblk->status_idx == bnapi->last_status_idx) &&
2950 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2951 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2954 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2955 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2956 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2958 /* Read back to deassert IRQ immediately to avoid too many
2959 * spurious interrupts.
2961 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2963 /* Return here if interrupt is shared and is disabled. */
2964 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2967 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2968 bnapi->last_status_idx = sblk->status_idx;
2969 __netif_rx_schedule(dev, &bnapi->napi);
2976 bnx2_tx_msix(int irq, void *dev_instance)
2978 struct net_device *dev = dev_instance;
2979 struct bnx2 *bp = netdev_priv(dev);
2980 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2982 prefetch(bnapi->status_blk_msix);
2984 /* Return here if interrupt is disabled. */
2985 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2988 netif_rx_schedule(dev, &bnapi->napi);
2992 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2993 STATUS_ATTN_BITS_TIMER_ABORT)
2996 bnx2_has_work(struct bnx2_napi *bnapi)
2998 struct status_block *sblk = bnapi->status_blk;
3000 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3001 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
3004 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3005 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3011 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3013 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3014 struct bnx2 *bp = bnapi->bp;
3016 struct status_block_msix *sblk = bnapi->status_blk_msix;
3019 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3020 if (unlikely(work_done >= budget))
3023 bnapi->last_status_idx = sblk->status_idx;
3025 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3027 netif_rx_complete(bp->dev, napi);
3028 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3029 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3030 bnapi->last_status_idx);
3034 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3035 int work_done, int budget)
3037 struct status_block *sblk = bnapi->status_blk;
3038 u32 status_attn_bits = sblk->status_attn_bits;
3039 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3041 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3042 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3044 bnx2_phy_int(bp, bnapi);
3046 /* This is needed to take care of transient status
3047 * during link changes.
3049 REG_WR(bp, BNX2_HC_COMMAND,
3050 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3051 REG_RD(bp, BNX2_HC_COMMAND);
3054 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3055 bnx2_tx_int(bp, bnapi, 0);
3057 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3058 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3063 static int bnx2_poll(struct napi_struct *napi, int budget)
3065 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3066 struct bnx2 *bp = bnapi->bp;
3068 struct status_block *sblk = bnapi->status_blk;
3071 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3073 if (unlikely(work_done >= budget))
3076 /* bnapi->last_status_idx is used below to tell the hw how
3077 * much work has been processed, so we must read it before
3078 * checking for more work.
3080 bnapi->last_status_idx = sblk->status_idx;
3082 if (likely(!bnx2_has_work(bnapi))) {
3083 netif_rx_complete(bp->dev, napi);
3084 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3085 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3086 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3087 bnapi->last_status_idx);
3090 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3091 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3092 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3093 bnapi->last_status_idx);
3095 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3096 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3097 bnapi->last_status_idx);
3105 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3106 * from set_multicast.
3109 bnx2_set_rx_mode(struct net_device *dev)
3111 struct bnx2 *bp = netdev_priv(dev);
3112 u32 rx_mode, sort_mode;
3115 spin_lock_bh(&bp->phy_lock);
3117 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3118 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3119 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3121 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3122 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3124 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3127 if (dev->flags & IFF_PROMISC) {
3128 /* Promiscuous mode. */
3129 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3130 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3131 BNX2_RPM_SORT_USER0_PROM_VLAN;
3133 else if (dev->flags & IFF_ALLMULTI) {
3134 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3135 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3138 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3141 /* Accept one or more multicast(s). */
3142 struct dev_mc_list *mclist;
3143 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3148 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3150 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3151 i++, mclist = mclist->next) {
3153 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3155 regidx = (bit & 0xe0) >> 5;
3157 mc_filter[regidx] |= (1 << bit);
3160 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3161 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3165 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3168 if (rx_mode != bp->rx_mode) {
3169 bp->rx_mode = rx_mode;
3170 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3173 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3174 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3175 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3177 spin_unlock_bh(&bp->phy_lock);
3181 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3187 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3188 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3189 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3190 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3191 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3194 for (i = 0; i < rv2p_code_len; i += 8) {
3195 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3197 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3200 if (rv2p_proc == RV2P_PROC1) {
3201 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3202 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3205 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3206 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3210 /* Reset the processor, un-stall is done later. */
3211 if (rv2p_proc == RV2P_PROC1) {
3212 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3215 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3220 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3227 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3228 val |= cpu_reg->mode_value_halt;
3229 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3230 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3232 /* Load the Text area. */
3233 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3237 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3242 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3243 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3247 /* Load the Data area. */
3248 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3252 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3253 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3257 /* Load the SBSS area. */
3258 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3262 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3263 bnx2_reg_wr_ind(bp, offset, 0);
3267 /* Load the BSS area. */
3268 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3272 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3273 bnx2_reg_wr_ind(bp, offset, 0);
3277 /* Load the Read-Only area. */
3278 offset = cpu_reg->spad_base +
3279 (fw->rodata_addr - cpu_reg->mips_view_base);
3283 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3284 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3288 /* Clear the pre-fetch instruction. */
3289 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3290 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3292 /* Start the CPU. */
3293 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3294 val &= ~cpu_reg->mode_value_halt;
3295 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3296 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3302 bnx2_init_cpus(struct bnx2 *bp)
3304 struct cpu_reg cpu_reg;
3309 /* Initialize the RV2P processor. */
3310 text = vmalloc(FW_BUF_SIZE);
3313 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3314 rv2p = bnx2_xi_rv2p_proc1;
3315 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3317 rv2p = bnx2_rv2p_proc1;
3318 rv2p_len = sizeof(bnx2_rv2p_proc1);
3320 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3324 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3326 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3327 rv2p = bnx2_xi_rv2p_proc2;
3328 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3330 rv2p = bnx2_rv2p_proc2;
3331 rv2p_len = sizeof(bnx2_rv2p_proc2);
3333 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3337 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3339 /* Initialize the RX Processor. */
3340 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3341 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3342 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3343 cpu_reg.state = BNX2_RXP_CPU_STATE;
3344 cpu_reg.state_value_clear = 0xffffff;
3345 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3346 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3347 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3348 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3349 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3350 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3351 cpu_reg.mips_view_base = 0x8000000;
3353 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3354 fw = &bnx2_rxp_fw_09;
3356 fw = &bnx2_rxp_fw_06;
3359 rc = load_cpu_fw(bp, &cpu_reg, fw);
3363 /* Initialize the TX Processor. */
3364 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3365 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3366 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3367 cpu_reg.state = BNX2_TXP_CPU_STATE;
3368 cpu_reg.state_value_clear = 0xffffff;
3369 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3370 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3371 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3372 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3373 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3374 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3375 cpu_reg.mips_view_base = 0x8000000;
3377 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3378 fw = &bnx2_txp_fw_09;
3380 fw = &bnx2_txp_fw_06;
3383 rc = load_cpu_fw(bp, &cpu_reg, fw);
3387 /* Initialize the TX Patch-up Processor. */
3388 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3389 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3390 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3391 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3392 cpu_reg.state_value_clear = 0xffffff;
3393 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3394 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3395 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3396 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3397 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3398 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3399 cpu_reg.mips_view_base = 0x8000000;
3401 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3402 fw = &bnx2_tpat_fw_09;
3404 fw = &bnx2_tpat_fw_06;
3407 rc = load_cpu_fw(bp, &cpu_reg, fw);
3411 /* Initialize the Completion Processor. */
3412 cpu_reg.mode = BNX2_COM_CPU_MODE;
3413 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3414 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3415 cpu_reg.state = BNX2_COM_CPU_STATE;
3416 cpu_reg.state_value_clear = 0xffffff;
3417 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3418 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3419 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3420 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3421 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3422 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3423 cpu_reg.mips_view_base = 0x8000000;
3425 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3426 fw = &bnx2_com_fw_09;
3428 fw = &bnx2_com_fw_06;
3431 rc = load_cpu_fw(bp, &cpu_reg, fw);
3435 /* Initialize the Command Processor. */
3436 cpu_reg.mode = BNX2_CP_CPU_MODE;
3437 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3438 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3439 cpu_reg.state = BNX2_CP_CPU_STATE;
3440 cpu_reg.state_value_clear = 0xffffff;
3441 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3442 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3443 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3444 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3445 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3446 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3447 cpu_reg.mips_view_base = 0x8000000;
3449 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3450 fw = &bnx2_cp_fw_09;
3452 fw = &bnx2_cp_fw_06;
3455 rc = load_cpu_fw(bp, &cpu_reg, fw);
3463 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3467 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3473 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3474 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3475 PCI_PM_CTRL_PME_STATUS);
3477 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3478 /* delay required during transition out of D3hot */
3481 val = REG_RD(bp, BNX2_EMAC_MODE);
3482 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3483 val &= ~BNX2_EMAC_MODE_MPKT;
3484 REG_WR(bp, BNX2_EMAC_MODE, val);
3486 val = REG_RD(bp, BNX2_RPM_CONFIG);
3487 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3488 REG_WR(bp, BNX2_RPM_CONFIG, val);
3499 autoneg = bp->autoneg;
3500 advertising = bp->advertising;
3502 if (bp->phy_port == PORT_TP) {
3503 bp->autoneg = AUTONEG_SPEED;
3504 bp->advertising = ADVERTISED_10baseT_Half |
3505 ADVERTISED_10baseT_Full |
3506 ADVERTISED_100baseT_Half |
3507 ADVERTISED_100baseT_Full |
3511 spin_lock_bh(&bp->phy_lock);
3512 bnx2_setup_phy(bp, bp->phy_port);
3513 spin_unlock_bh(&bp->phy_lock);
3515 bp->autoneg = autoneg;
3516 bp->advertising = advertising;
3518 bnx2_set_mac_addr(bp);
3520 val = REG_RD(bp, BNX2_EMAC_MODE);
3522 /* Enable port mode. */
3523 val &= ~BNX2_EMAC_MODE_PORT;
3524 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3525 BNX2_EMAC_MODE_ACPI_RCVD |
3526 BNX2_EMAC_MODE_MPKT;
3527 if (bp->phy_port == PORT_TP)
3528 val |= BNX2_EMAC_MODE_PORT_MII;
3530 val |= BNX2_EMAC_MODE_PORT_GMII;
3531 if (bp->line_speed == SPEED_2500)
3532 val |= BNX2_EMAC_MODE_25G_MODE;
3535 REG_WR(bp, BNX2_EMAC_MODE, val);
3537 /* receive all multicast */
3538 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3539 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3542 REG_WR(bp, BNX2_EMAC_RX_MODE,
3543 BNX2_EMAC_RX_MODE_SORT_MODE);
3545 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3546 BNX2_RPM_SORT_USER0_MC_EN;
3547 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3548 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3549 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3550 BNX2_RPM_SORT_USER0_ENA);
3552 /* Need to enable EMAC and RPM for WOL. */
3553 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3554 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3555 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3556 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3558 val = REG_RD(bp, BNX2_RPM_CONFIG);
3559 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3560 REG_WR(bp, BNX2_RPM_CONFIG, val);
3562 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3565 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3568 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3569 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3571 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3572 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3573 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3582 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3584 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3587 /* No more memory access after this point until
3588 * device is brought back to D0.
3600 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3605 /* Request access to the flash interface. */
3606 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3607 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3608 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3609 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3615 if (j >= NVRAM_TIMEOUT_COUNT)
3622 bnx2_release_nvram_lock(struct bnx2 *bp)
3627 /* Relinquish nvram interface. */
3628 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3630 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3631 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3632 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3638 if (j >= NVRAM_TIMEOUT_COUNT)
3646 bnx2_enable_nvram_write(struct bnx2 *bp)
3650 val = REG_RD(bp, BNX2_MISC_CFG);
3651 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3653 if (bp->flash_info->flags & BNX2_NV_WREN) {
3656 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3657 REG_WR(bp, BNX2_NVM_COMMAND,
3658 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3660 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3663 val = REG_RD(bp, BNX2_NVM_COMMAND);
3664 if (val & BNX2_NVM_COMMAND_DONE)
3668 if (j >= NVRAM_TIMEOUT_COUNT)
3675 bnx2_disable_nvram_write(struct bnx2 *bp)
3679 val = REG_RD(bp, BNX2_MISC_CFG);
3680 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3685 bnx2_enable_nvram_access(struct bnx2 *bp)
3689 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3690 /* Enable both bits, even on read. */
3691 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3692 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3696 bnx2_disable_nvram_access(struct bnx2 *bp)
3700 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3701 /* Disable both bits, even after read. */
3702 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3703 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3704 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3708 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3713 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3714 /* Buffered flash, no erase needed */
3717 /* Build an erase command */
3718 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3719 BNX2_NVM_COMMAND_DOIT;
3721 /* Need to clear DONE bit separately. */
3722 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3724 /* Address of the NVRAM to read from. */
3725 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3727 /* Issue an erase command. */
3728 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3730 /* Wait for completion. */
3731 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3736 val = REG_RD(bp, BNX2_NVM_COMMAND);
3737 if (val & BNX2_NVM_COMMAND_DONE)
3741 if (j >= NVRAM_TIMEOUT_COUNT)
3748 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3753 /* Build the command word. */
3754 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3756 /* Calculate an offset of a buffered flash, not needed for 5709. */
3757 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3758 offset = ((offset / bp->flash_info->page_size) <<
3759 bp->flash_info->page_bits) +
3760 (offset % bp->flash_info->page_size);
3763 /* Need to clear DONE bit separately. */
3764 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3766 /* Address of the NVRAM to read from. */
3767 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3769 /* Issue a read command. */
3770 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3772 /* Wait for completion. */
3773 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3778 val = REG_RD(bp, BNX2_NVM_COMMAND);
3779 if (val & BNX2_NVM_COMMAND_DONE) {
3780 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3781 memcpy(ret_val, &v, 4);
3785 if (j >= NVRAM_TIMEOUT_COUNT)
3793 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3799 /* Build the command word. */
3800 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3802 /* Calculate an offset of a buffered flash, not needed for 5709. */
3803 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3804 offset = ((offset / bp->flash_info->page_size) <<
3805 bp->flash_info->page_bits) +
3806 (offset % bp->flash_info->page_size);
3809 /* Need to clear DONE bit separately. */
3810 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3812 memcpy(&val32, val, 4);
3814 /* Write the data. */
3815 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3817 /* Address of the NVRAM to write to. */
3818 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3820 /* Issue the write command. */
3821 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3823 /* Wait for completion. */
3824 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3827 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3830 if (j >= NVRAM_TIMEOUT_COUNT)
3837 bnx2_init_nvram(struct bnx2 *bp)
3840 int j, entry_count, rc = 0;
3841 struct flash_spec *flash;
3843 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3844 bp->flash_info = &flash_5709;
3845 goto get_flash_size;
3848 /* Determine the selected interface. */
3849 val = REG_RD(bp, BNX2_NVM_CFG1);
3851 entry_count = ARRAY_SIZE(flash_table);
3853 if (val & 0x40000000) {
3855 /* Flash interface has been reconfigured */
3856 for (j = 0, flash = &flash_table[0]; j < entry_count;
3858 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3859 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3860 bp->flash_info = flash;
3867 /* Not yet been reconfigured */
3869 if (val & (1 << 23))
3870 mask = FLASH_BACKUP_STRAP_MASK;
3872 mask = FLASH_STRAP_MASK;
3874 for (j = 0, flash = &flash_table[0]; j < entry_count;
3877 if ((val & mask) == (flash->strapping & mask)) {
3878 bp->flash_info = flash;
3880 /* Request access to the flash interface. */
3881 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3884 /* Enable access to flash interface */
3885 bnx2_enable_nvram_access(bp);
3887 /* Reconfigure the flash interface */
3888 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3889 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3890 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3891 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3893 /* Disable access to flash interface */
3894 bnx2_disable_nvram_access(bp);
3895 bnx2_release_nvram_lock(bp);
3900 } /* if (val & 0x40000000) */
3902 if (j == entry_count) {
3903 bp->flash_info = NULL;
3904 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3909 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3910 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3912 bp->flash_size = val;
3914 bp->flash_size = bp->flash_info->total_size;
3920 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3924 u32 cmd_flags, offset32, len32, extra;
3929 /* Request access to the flash interface. */
3930 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3933 /* Enable access to flash interface */
3934 bnx2_enable_nvram_access(bp);
3947 pre_len = 4 - (offset & 3);
3949 if (pre_len >= len32) {
3951 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3952 BNX2_NVM_COMMAND_LAST;
3955 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3958 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3963 memcpy(ret_buf, buf + (offset & 3), pre_len);
3970 extra = 4 - (len32 & 3);
3971 len32 = (len32 + 4) & ~3;
3978 cmd_flags = BNX2_NVM_COMMAND_LAST;
3980 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3981 BNX2_NVM_COMMAND_LAST;
3983 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3985 memcpy(ret_buf, buf, 4 - extra);
3987 else if (len32 > 0) {
3990 /* Read the first word. */
3994 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3996 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3998 /* Advance to the next dword. */
4003 while (len32 > 4 && rc == 0) {
4004 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4006 /* Advance to the next dword. */
4015 cmd_flags = BNX2_NVM_COMMAND_LAST;
4016 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4018 memcpy(ret_buf, buf, 4 - extra);
4021 /* Disable access to flash interface */
4022 bnx2_disable_nvram_access(bp);
4024 bnx2_release_nvram_lock(bp);
4030 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4033 u32 written, offset32, len32;
4034 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4036 int align_start, align_end;
4041 align_start = align_end = 0;
4043 if ((align_start = (offset32 & 3))) {
4045 len32 += align_start;
4048 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4053 align_end = 4 - (len32 & 3);
4055 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4059 if (align_start || align_end) {
4060 align_buf = kmalloc(len32, GFP_KERNEL);
4061 if (align_buf == NULL)
4064 memcpy(align_buf, start, 4);
4067 memcpy(align_buf + len32 - 4, end, 4);
4069 memcpy(align_buf + align_start, data_buf, buf_size);
4073 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4074 flash_buffer = kmalloc(264, GFP_KERNEL);
4075 if (flash_buffer == NULL) {
4077 goto nvram_write_end;
4082 while ((written < len32) && (rc == 0)) {
4083 u32 page_start, page_end, data_start, data_end;
4084 u32 addr, cmd_flags;
4087 /* Find the page_start addr */
4088 page_start = offset32 + written;
4089 page_start -= (page_start % bp->flash_info->page_size);
4090 /* Find the page_end addr */
4091 page_end = page_start + bp->flash_info->page_size;
4092 /* Find the data_start addr */
4093 data_start = (written == 0) ? offset32 : page_start;
4094 /* Find the data_end addr */
4095 data_end = (page_end > offset32 + len32) ?
4096 (offset32 + len32) : page_end;
4098 /* Request access to the flash interface. */
4099 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4100 goto nvram_write_end;
4102 /* Enable access to flash interface */
4103 bnx2_enable_nvram_access(bp);
4105 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4106 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4109 /* Read the whole page into the buffer
4110 * (non-buffer flash only) */
4111 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4112 if (j == (bp->flash_info->page_size - 4)) {
4113 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4115 rc = bnx2_nvram_read_dword(bp,
4121 goto nvram_write_end;
4127 /* Enable writes to flash interface (unlock write-protect) */
4128 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4129 goto nvram_write_end;
4131 /* Loop to write back the buffer data from page_start to
4134 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4135 /* Erase the page */
4136 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4137 goto nvram_write_end;
4139 /* Re-enable the write again for the actual write */
4140 bnx2_enable_nvram_write(bp);
4142 for (addr = page_start; addr < data_start;
4143 addr += 4, i += 4) {
4145 rc = bnx2_nvram_write_dword(bp, addr,
4146 &flash_buffer[i], cmd_flags);
4149 goto nvram_write_end;
4155 /* Loop to write the new data from data_start to data_end */
4156 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4157 if ((addr == page_end - 4) ||
4158 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4159 (addr == data_end - 4))) {
4161 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4163 rc = bnx2_nvram_write_dword(bp, addr, buf,
4167 goto nvram_write_end;
4173 /* Loop to write back the buffer data from data_end
4175 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4176 for (addr = data_end; addr < page_end;
4177 addr += 4, i += 4) {
4179 if (addr == page_end-4) {
4180 cmd_flags = BNX2_NVM_COMMAND_LAST;
4182 rc = bnx2_nvram_write_dword(bp, addr,
4183 &flash_buffer[i], cmd_flags);
4186 goto nvram_write_end;
4192 /* Disable writes to flash interface (lock write-protect) */
4193 bnx2_disable_nvram_write(bp);
4195 /* Disable access to flash interface */
4196 bnx2_disable_nvram_access(bp);
4197 bnx2_release_nvram_lock(bp);
4199 /* Increment written */
4200 written += data_end - data_start;
4204 kfree(flash_buffer);
4210 bnx2_init_remote_phy(struct bnx2 *bp)
4214 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4215 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4218 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4219 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4222 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4223 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4225 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4226 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4227 bp->phy_port = PORT_FIBRE;
4229 bp->phy_port = PORT_TP;
4231 if (netif_running(bp->dev)) {
4234 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4235 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4236 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4242 bnx2_setup_msix_tbl(struct bnx2 *bp)
4244 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4246 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4247 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4251 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4257 /* Wait for the current PCI transaction to complete before
4258 * issuing a reset. */
4259 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4260 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4261 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4262 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4263 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4264 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4267 /* Wait for the firmware to tell us it is ok to issue a reset. */
4268 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4270 /* Deposit a driver reset signature so the firmware knows that
4271 * this is a soft reset. */
4272 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4273 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4275 /* Do a dummy read to force the chip to complete all current transaction
4276 * before we issue a reset. */
4277 val = REG_RD(bp, BNX2_MISC_ID);
4279 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4280 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4281 REG_RD(bp, BNX2_MISC_COMMAND);
4284 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4285 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4287 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4290 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4291 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4292 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4295 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4297 /* Reading back any register after chip reset will hang the
4298 * bus on 5706 A0 and A1. The msleep below provides plenty
4299 * of margin for write posting.
4301 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4302 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4305 /* Reset takes approximate 30 usec */
4306 for (i = 0; i < 10; i++) {
4307 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4308 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4309 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4314 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4315 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4316 printk(KERN_ERR PFX "Chip reset did not complete\n");
4321 /* Make sure byte swapping is properly configured. */
4322 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4323 if (val != 0x01020304) {
4324 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4328 /* Wait for the firmware to finish its initialization. */
4329 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4333 spin_lock_bh(&bp->phy_lock);
4334 old_port = bp->phy_port;
4335 bnx2_init_remote_phy(bp);
4336 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4337 old_port != bp->phy_port)
4338 bnx2_set_default_remote_link(bp);
4339 spin_unlock_bh(&bp->phy_lock);
4341 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4342 /* Adjust the voltage regular to two steps lower. The default
4343 * of this register is 0x0000000e. */
4344 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4346 /* Remove bad rbuf memory from the free pool. */
4347 rc = bnx2_alloc_bad_rbuf(bp);
4350 if (bp->flags & BNX2_FLAG_USING_MSIX)
4351 bnx2_setup_msix_tbl(bp);
4357 bnx2_init_chip(struct bnx2 *bp)
4362 /* Make sure the interrupt is not active. */
4363 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4365 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4366 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4368 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4370 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4371 DMA_READ_CHANS << 12 |
4372 DMA_WRITE_CHANS << 16;
4374 val |= (0x2 << 20) | (1 << 11);
4376 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4379 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4380 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4381 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4383 REG_WR(bp, BNX2_DMA_CONFIG, val);
4385 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4386 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4387 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4388 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4391 if (bp->flags & BNX2_FLAG_PCIX) {
4394 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4396 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4397 val16 & ~PCI_X_CMD_ERO);
4400 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4401 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4402 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4403 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4405 /* Initialize context mapping and zero out the quick contexts. The
4406 * context block must have already been enabled. */
4407 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4408 rc = bnx2_init_5709_context(bp);
4412 bnx2_init_context(bp);
4414 if ((rc = bnx2_init_cpus(bp)) != 0)
4417 bnx2_init_nvram(bp);
4419 bnx2_set_mac_addr(bp);
4421 val = REG_RD(bp, BNX2_MQ_CONFIG);
4422 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4423 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4424 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4425 val |= BNX2_MQ_CONFIG_HALT_DIS;
4427 REG_WR(bp, BNX2_MQ_CONFIG, val);
4429 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4430 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4431 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4433 val = (BCM_PAGE_BITS - 8) << 24;
4434 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4436 /* Configure page size. */
4437 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4438 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4439 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4440 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4442 val = bp->mac_addr[0] +
4443 (bp->mac_addr[1] << 8) +
4444 (bp->mac_addr[2] << 16) +
4446 (bp->mac_addr[4] << 8) +
4447 (bp->mac_addr[5] << 16);
4448 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4450 /* Program the MTU. Also include 4 bytes for CRC32. */
4451 val = bp->dev->mtu + ETH_HLEN + 4;
4452 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4453 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4454 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4456 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4457 bp->bnx2_napi[i].last_status_idx = 0;
4459 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4461 /* Set up how to generate a link change interrupt. */
4462 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4464 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4465 (u64) bp->status_blk_mapping & 0xffffffff);
4466 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4468 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4469 (u64) bp->stats_blk_mapping & 0xffffffff);
4470 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4471 (u64) bp->stats_blk_mapping >> 32);
4473 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4474 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4476 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4477 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4479 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4480 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4482 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4484 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4486 REG_WR(bp, BNX2_HC_COM_TICKS,
4487 (bp->com_ticks_int << 16) | bp->com_ticks);
4489 REG_WR(bp, BNX2_HC_CMD_TICKS,
4490 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4492 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4493 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4495 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4496 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4498 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4499 val = BNX2_HC_CONFIG_COLLECT_STATS;
4501 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4502 BNX2_HC_CONFIG_COLLECT_STATS;
4505 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4506 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4507 BNX2_HC_SB_CONFIG_1;
4509 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4510 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4513 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4514 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4516 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4517 (bp->tx_quick_cons_trip_int << 16) |
4518 bp->tx_quick_cons_trip);
4520 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4521 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4523 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4526 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4527 val |= BNX2_HC_CONFIG_ONE_SHOT;
4529 REG_WR(bp, BNX2_HC_CONFIG, val);
4531 /* Clear internal stats counters. */
4532 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4534 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4536 /* Initialize the receive filter. */
4537 bnx2_set_rx_mode(bp->dev);
4539 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4540 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4541 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4542 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4544 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4547 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4548 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4552 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4558 bnx2_clear_ring_states(struct bnx2 *bp)
4560 struct bnx2_napi *bnapi;
4563 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4564 bnapi = &bp->bnx2_napi[i];
4567 bnapi->hw_tx_cons = 0;
4568 bnapi->rx_prod_bseq = 0;
4571 bnapi->rx_pg_prod = 0;
4572 bnapi->rx_pg_cons = 0;
4577 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4579 u32 val, offset0, offset1, offset2, offset3;
4580 u32 cid_addr = GET_CID_ADDR(cid);
4582 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4583 offset0 = BNX2_L2CTX_TYPE_XI;
4584 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4585 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4586 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4588 offset0 = BNX2_L2CTX_TYPE;
4589 offset1 = BNX2_L2CTX_CMD_TYPE;
4590 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4591 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4593 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4594 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4596 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4597 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4599 val = (u64) bp->tx_desc_mapping >> 32;
4600 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4602 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4603 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4607 bnx2_init_tx_ring(struct bnx2 *bp)
4611 struct bnx2_napi *bnapi;
4614 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4616 bp->tx_vec = BNX2_TX_VEC;
4617 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4620 bnapi = &bp->bnx2_napi[bp->tx_vec];
4622 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4624 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4626 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4627 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4630 bp->tx_prod_bseq = 0;
4632 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4633 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4635 bnx2_init_tx_context(bp, cid);
4639 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4645 for (i = 0; i < num_rings; i++) {
4648 rxbd = &rx_ring[i][0];
4649 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4650 rxbd->rx_bd_len = buf_size;
4651 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4653 if (i == (num_rings - 1))
4657 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4658 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4663 bnx2_init_rx_ring(struct bnx2 *bp)
4666 u16 prod, ring_prod;
4667 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4668 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4670 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4671 bp->rx_buf_use_size, bp->rx_max_ring);
4673 bnx2_init_rx_context0(bp);
4675 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4676 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4677 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4680 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4681 if (bp->rx_pg_ring_size) {
4682 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4683 bp->rx_pg_desc_mapping,
4684 PAGE_SIZE, bp->rx_max_pg_ring);
4685 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4686 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4687 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4688 BNX2_L2CTX_RBDC_JUMBO_KEY);
4690 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4691 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4693 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4694 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4696 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4697 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4700 val = (u64) bp->rx_desc_mapping[0] >> 32;
4701 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4703 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4704 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4706 ring_prod = prod = bnapi->rx_pg_prod;
4707 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4708 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4710 prod = NEXT_RX_BD(prod);
4711 ring_prod = RX_PG_RING_IDX(prod);
4713 bnapi->rx_pg_prod = prod;
4715 ring_prod = prod = bnapi->rx_prod;
4716 for (i = 0; i < bp->rx_ring_size; i++) {
4717 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4720 prod = NEXT_RX_BD(prod);
4721 ring_prod = RX_RING_IDX(prod);
4723 bnapi->rx_prod = prod;
4725 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4727 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4729 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4732 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4734 u32 max, num_rings = 1;
4736 while (ring_size > MAX_RX_DESC_CNT) {
4737 ring_size -= MAX_RX_DESC_CNT;
4740 /* round to next power of 2 */
4742 while ((max & num_rings) == 0)
4745 if (num_rings != max)
4752 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4754 u32 rx_size, rx_space, jumbo_size;
4756 /* 8 for CRC and VLAN */
4757 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4759 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4760 sizeof(struct skb_shared_info);
4762 bp->rx_copy_thresh = RX_COPY_THRESH;
4763 bp->rx_pg_ring_size = 0;
4764 bp->rx_max_pg_ring = 0;
4765 bp->rx_max_pg_ring_idx = 0;
4766 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4767 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4769 jumbo_size = size * pages;
4770 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4771 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4773 bp->rx_pg_ring_size = jumbo_size;
4774 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4776 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4777 rx_size = RX_COPY_THRESH + bp->rx_offset;
4778 bp->rx_copy_thresh = 0;
4781 bp->rx_buf_use_size = rx_size;
4783 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4784 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4785 bp->rx_ring_size = size;
4786 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4787 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4791 bnx2_free_tx_skbs(struct bnx2 *bp)
4795 if (bp->tx_buf_ring == NULL)
4798 for (i = 0; i < TX_DESC_CNT; ) {
4799 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4800 struct sk_buff *skb = tx_buf->skb;
4808 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4809 skb_headlen(skb), PCI_DMA_TODEVICE);
4813 last = skb_shinfo(skb)->nr_frags;
4814 for (j = 0; j < last; j++) {
4815 tx_buf = &bp->tx_buf_ring[i + j + 1];
4816 pci_unmap_page(bp->pdev,
4817 pci_unmap_addr(tx_buf, mapping),
4818 skb_shinfo(skb)->frags[j].size,
4828 bnx2_free_rx_skbs(struct bnx2 *bp)
4832 if (bp->rx_buf_ring == NULL)
4835 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4836 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4837 struct sk_buff *skb = rx_buf->skb;
4842 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4843 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4849 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4850 bnx2_free_rx_page(bp, i);
4854 bnx2_free_skbs(struct bnx2 *bp)
4856 bnx2_free_tx_skbs(bp);
4857 bnx2_free_rx_skbs(bp);
4861 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4865 rc = bnx2_reset_chip(bp, reset_code);
4870 if ((rc = bnx2_init_chip(bp)) != 0)
4873 bnx2_clear_ring_states(bp);
4874 bnx2_init_tx_ring(bp);
4875 bnx2_init_rx_ring(bp);
4880 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4884 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4887 spin_lock_bh(&bp->phy_lock);
4888 bnx2_init_phy(bp, reset_phy);
4890 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4891 bnx2_remote_phy_event(bp);
4892 spin_unlock_bh(&bp->phy_lock);
4897 bnx2_test_registers(struct bnx2 *bp)
4901 static const struct {
4904 #define BNX2_FL_NOT_5709 1
4908 { 0x006c, 0, 0x00000000, 0x0000003f },
4909 { 0x0090, 0, 0xffffffff, 0x00000000 },
4910 { 0x0094, 0, 0x00000000, 0x00000000 },
4912 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4913 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4914 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4915 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4916 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4917 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4918 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4919 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4920 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4922 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4923 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4924 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4925 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4926 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4927 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4929 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4930 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4931 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4933 { 0x1000, 0, 0x00000000, 0x00000001 },
4934 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4936 { 0x1408, 0, 0x01c00800, 0x00000000 },
4937 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4938 { 0x14a8, 0, 0x00000000, 0x000001ff },
4939 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4940 { 0x14b0, 0, 0x00000002, 0x00000001 },
4941 { 0x14b8, 0, 0x00000000, 0x00000000 },
4942 { 0x14c0, 0, 0x00000000, 0x00000009 },
4943 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4944 { 0x14cc, 0, 0x00000000, 0x00000001 },
4945 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4947 { 0x1800, 0, 0x00000000, 0x00000001 },
4948 { 0x1804, 0, 0x00000000, 0x00000003 },
4950 { 0x2800, 0, 0x00000000, 0x00000001 },
4951 { 0x2804, 0, 0x00000000, 0x00003f01 },
4952 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4953 { 0x2810, 0, 0xffff0000, 0x00000000 },
4954 { 0x2814, 0, 0xffff0000, 0x00000000 },
4955 { 0x2818, 0, 0xffff0000, 0x00000000 },
4956 { 0x281c, 0, 0xffff0000, 0x00000000 },
4957 { 0x2834, 0, 0xffffffff, 0x00000000 },
4958 { 0x2840, 0, 0x00000000, 0xffffffff },
4959 { 0x2844, 0, 0x00000000, 0xffffffff },
4960 { 0x2848, 0, 0xffffffff, 0x00000000 },
4961 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4963 { 0x2c00, 0, 0x00000000, 0x00000011 },
4964 { 0x2c04, 0, 0x00000000, 0x00030007 },
4966 { 0x3c00, 0, 0x00000000, 0x00000001 },
4967 { 0x3c04, 0, 0x00000000, 0x00070000 },
4968 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4969 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4970 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4971 { 0x3c14, 0, 0x00000000, 0xffffffff },
4972 { 0x3c18, 0, 0x00000000, 0xffffffff },
4973 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4974 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4976 { 0x5004, 0, 0x00000000, 0x0000007f },
4977 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4979 { 0x5c00, 0, 0x00000000, 0x00000001 },
4980 { 0x5c04, 0, 0x00000000, 0x0003000f },
4981 { 0x5c08, 0, 0x00000003, 0x00000000 },
4982 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4983 { 0x5c10, 0, 0x00000000, 0xffffffff },
4984 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4985 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4986 { 0x5c88, 0, 0x00000000, 0x00077373 },
4987 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4989 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4990 { 0x680c, 0, 0xffffffff, 0x00000000 },
4991 { 0x6810, 0, 0xffffffff, 0x00000000 },
4992 { 0x6814, 0, 0xffffffff, 0x00000000 },
4993 { 0x6818, 0, 0xffffffff, 0x00000000 },
4994 { 0x681c, 0, 0xffffffff, 0x00000000 },
4995 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4996 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4997 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4998 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4999 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5000 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5001 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5002 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5003 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5004 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5005 { 0x684c, 0, 0xffffffff, 0x00000000 },
5006 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5007 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5008 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5009 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5010 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5011 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5013 { 0xffff, 0, 0x00000000, 0x00000000 },
5018 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5021 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5022 u32 offset, rw_mask, ro_mask, save_val, val;
5023 u16 flags = reg_tbl[i].flags;
5025 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5028 offset = (u32) reg_tbl[i].offset;
5029 rw_mask = reg_tbl[i].rw_mask;
5030 ro_mask = reg_tbl[i].ro_mask;
5032 save_val = readl(bp->regview + offset);
5034 writel(0, bp->regview + offset);
5036 val = readl(bp->regview + offset);
5037 if ((val & rw_mask) != 0) {
5041 if ((val & ro_mask) != (save_val & ro_mask)) {
5045 writel(0xffffffff, bp->regview + offset);
5047 val = readl(bp->regview + offset);
5048 if ((val & rw_mask) != rw_mask) {
5052 if ((val & ro_mask) != (save_val & ro_mask)) {
5056 writel(save_val, bp->regview + offset);
5060 writel(save_val, bp->regview + offset);
5068 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5070 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5071 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5074 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5077 for (offset = 0; offset < size; offset += 4) {
5079 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5081 if (bnx2_reg_rd_ind(bp, start + offset) !=
5091 bnx2_test_memory(struct bnx2 *bp)
5095 static struct mem_entry {
5098 } mem_tbl_5706[] = {
5099 { 0x60000, 0x4000 },
5100 { 0xa0000, 0x3000 },
5101 { 0xe0000, 0x4000 },
5102 { 0x120000, 0x4000 },
5103 { 0x1a0000, 0x4000 },
5104 { 0x160000, 0x4000 },
5108 { 0x60000, 0x4000 },
5109 { 0xa0000, 0x3000 },
5110 { 0xe0000, 0x4000 },
5111 { 0x120000, 0x4000 },
5112 { 0x1a0000, 0x4000 },
5115 struct mem_entry *mem_tbl;
5117 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5118 mem_tbl = mem_tbl_5709;
5120 mem_tbl = mem_tbl_5706;
5122 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5123 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5124 mem_tbl[i].len)) != 0) {
5132 #define BNX2_MAC_LOOPBACK 0
5133 #define BNX2_PHY_LOOPBACK 1
5136 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5138 unsigned int pkt_size, num_pkts, i;
5139 struct sk_buff *skb, *rx_skb;
5140 unsigned char *packet;
5141 u16 rx_start_idx, rx_idx;
5144 struct sw_bd *rx_buf;
5145 struct l2_fhdr *rx_hdr;
5147 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5150 if (bp->flags & BNX2_FLAG_USING_MSIX)
5151 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5153 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5154 bp->loopback = MAC_LOOPBACK;
5155 bnx2_set_mac_loopback(bp);
5157 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5158 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5161 bp->loopback = PHY_LOOPBACK;
5162 bnx2_set_phy_loopback(bp);
5167 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5168 skb = netdev_alloc_skb(bp->dev, pkt_size);
5171 packet = skb_put(skb, pkt_size);
5172 memcpy(packet, bp->dev->dev_addr, 6);
5173 memset(packet + 6, 0x0, 8);
5174 for (i = 14; i < pkt_size; i++)
5175 packet[i] = (unsigned char) (i & 0xff);
5177 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5180 REG_WR(bp, BNX2_HC_COMMAND,
5181 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5183 REG_RD(bp, BNX2_HC_COMMAND);
5186 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5190 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5192 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5193 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5194 txbd->tx_bd_mss_nbytes = pkt_size;
5195 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5198 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5199 bp->tx_prod_bseq += pkt_size;
5201 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5202 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5206 REG_WR(bp, BNX2_HC_COMMAND,
5207 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5209 REG_RD(bp, BNX2_HC_COMMAND);
5213 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5216 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5217 goto loopback_test_done;
5219 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5220 if (rx_idx != rx_start_idx + num_pkts) {
5221 goto loopback_test_done;
5224 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5225 rx_skb = rx_buf->skb;
5227 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5228 skb_reserve(rx_skb, bp->rx_offset);
5230 pci_dma_sync_single_for_cpu(bp->pdev,
5231 pci_unmap_addr(rx_buf, mapping),
5232 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5234 if (rx_hdr->l2_fhdr_status &
5235 (L2_FHDR_ERRORS_BAD_CRC |
5236 L2_FHDR_ERRORS_PHY_DECODE |
5237 L2_FHDR_ERRORS_ALIGNMENT |
5238 L2_FHDR_ERRORS_TOO_SHORT |
5239 L2_FHDR_ERRORS_GIANT_FRAME)) {
5241 goto loopback_test_done;
5244 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5245 goto loopback_test_done;
5248 for (i = 14; i < pkt_size; i++) {
5249 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5250 goto loopback_test_done;
5261 #define BNX2_MAC_LOOPBACK_FAILED 1
5262 #define BNX2_PHY_LOOPBACK_FAILED 2
5263 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5264 BNX2_PHY_LOOPBACK_FAILED)
5267 bnx2_test_loopback(struct bnx2 *bp)
5271 if (!netif_running(bp->dev))
5272 return BNX2_LOOPBACK_FAILED;
5274 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5275 spin_lock_bh(&bp->phy_lock);
5276 bnx2_init_phy(bp, 1);
5277 spin_unlock_bh(&bp->phy_lock);
5278 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5279 rc |= BNX2_MAC_LOOPBACK_FAILED;
5280 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5281 rc |= BNX2_PHY_LOOPBACK_FAILED;
5285 #define NVRAM_SIZE 0x200
5286 #define CRC32_RESIDUAL 0xdebb20e3
5289 bnx2_test_nvram(struct bnx2 *bp)
5291 __be32 buf[NVRAM_SIZE / 4];
5292 u8 *data = (u8 *) buf;
5296 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5297 goto test_nvram_done;
5299 magic = be32_to_cpu(buf[0]);
5300 if (magic != 0x669955aa) {
5302 goto test_nvram_done;
5305 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5306 goto test_nvram_done;
5308 csum = ether_crc_le(0x100, data);
5309 if (csum != CRC32_RESIDUAL) {
5311 goto test_nvram_done;
5314 csum = ether_crc_le(0x100, data + 0x100);
5315 if (csum != CRC32_RESIDUAL) {
5324 bnx2_test_link(struct bnx2 *bp)
5328 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5333 spin_lock_bh(&bp->phy_lock);
5334 bnx2_enable_bmsr1(bp);
5335 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5336 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5337 bnx2_disable_bmsr1(bp);
5338 spin_unlock_bh(&bp->phy_lock);
5340 if (bmsr & BMSR_LSTATUS) {
5347 bnx2_test_intr(struct bnx2 *bp)
5352 if (!netif_running(bp->dev))
5355 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5357 /* This register is not touched during run-time. */
5358 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5359 REG_RD(bp, BNX2_HC_COMMAND);
5361 for (i = 0; i < 10; i++) {
5362 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5368 msleep_interruptible(10);
5376 /* Determining link for parallel detection. */
5378 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5380 u32 mode_ctl, an_dbg, exp;
5382 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5385 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5386 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5388 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5391 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5393 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5395 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5398 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5399 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5400 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5402 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5409 bnx2_5706_serdes_timer(struct bnx2 *bp)
5413 spin_lock(&bp->phy_lock);
5414 if (bp->serdes_an_pending) {
5415 bp->serdes_an_pending--;
5417 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5420 bp->current_interval = bp->timer_interval;
5422 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5424 if (bmcr & BMCR_ANENABLE) {
5425 if (bnx2_5706_serdes_has_link(bp)) {
5426 bmcr &= ~BMCR_ANENABLE;
5427 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5429 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5433 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5434 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5437 bnx2_write_phy(bp, 0x17, 0x0f01);
5438 bnx2_read_phy(bp, 0x15, &phy2);
5442 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5443 bmcr |= BMCR_ANENABLE;
5444 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5446 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5449 bp->current_interval = bp->timer_interval;
5454 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5455 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5456 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5458 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5459 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5460 bnx2_5706s_force_link_dn(bp, 1);
5461 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5464 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5467 spin_unlock(&bp->phy_lock);
5471 bnx2_5708_serdes_timer(struct bnx2 *bp)
5473 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5476 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5477 bp->serdes_an_pending = 0;
5481 spin_lock(&bp->phy_lock);
5482 if (bp->serdes_an_pending)
5483 bp->serdes_an_pending--;
5484 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5487 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5488 if (bmcr & BMCR_ANENABLE) {
5489 bnx2_enable_forced_2g5(bp);
5490 bp->current_interval = SERDES_FORCED_TIMEOUT;
5492 bnx2_disable_forced_2g5(bp);
5493 bp->serdes_an_pending = 2;
5494 bp->current_interval = bp->timer_interval;
5498 bp->current_interval = bp->timer_interval;
5500 spin_unlock(&bp->phy_lock);
5504 bnx2_timer(unsigned long data)
5506 struct bnx2 *bp = (struct bnx2 *) data;
5508 if (!netif_running(bp->dev))
5511 if (atomic_read(&bp->intr_sem) != 0)
5512 goto bnx2_restart_timer;
5514 bnx2_send_heart_beat(bp);
5516 bp->stats_blk->stat_FwRxDrop =
5517 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5519 /* workaround occasional corrupted counters */
5520 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5521 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5522 BNX2_HC_COMMAND_STATS_NOW);
5524 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5525 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5526 bnx2_5706_serdes_timer(bp);
5528 bnx2_5708_serdes_timer(bp);
5532 mod_timer(&bp->timer, jiffies + bp->current_interval);
5536 bnx2_request_irq(struct bnx2 *bp)
5538 struct net_device *dev = bp->dev;
5539 unsigned long flags;
5540 struct bnx2_irq *irq;
5543 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5546 flags = IRQF_SHARED;
5548 for (i = 0; i < bp->irq_nvecs; i++) {
5549 irq = &bp->irq_tbl[i];
5550 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5560 bnx2_free_irq(struct bnx2 *bp)
5562 struct net_device *dev = bp->dev;
5563 struct bnx2_irq *irq;
5566 for (i = 0; i < bp->irq_nvecs; i++) {
5567 irq = &bp->irq_tbl[i];
5569 free_irq(irq->vector, dev);
5572 if (bp->flags & BNX2_FLAG_USING_MSI)
5573 pci_disable_msi(bp->pdev);
5574 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5575 pci_disable_msix(bp->pdev);
5577 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5581 bnx2_enable_msix(struct bnx2 *bp)
5584 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5586 bnx2_setup_msix_tbl(bp);
5587 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5588 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5589 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5591 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5592 msix_ent[i].entry = i;
5593 msix_ent[i].vector = 0;
5596 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5600 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5601 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5603 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5604 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5605 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5606 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5608 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5609 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5610 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5611 bp->irq_tbl[i].vector = msix_ent[i].vector;
5615 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5617 bp->irq_tbl[0].handler = bnx2_interrupt;
5618 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5620 bp->irq_tbl[0].vector = bp->pdev->irq;
5622 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5623 bnx2_enable_msix(bp);
5625 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5626 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5627 if (pci_enable_msi(bp->pdev) == 0) {
5628 bp->flags |= BNX2_FLAG_USING_MSI;
5629 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5630 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5631 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5633 bp->irq_tbl[0].handler = bnx2_msi;
5635 bp->irq_tbl[0].vector = bp->pdev->irq;
5640 /* Called with rtnl_lock */
5642 bnx2_open(struct net_device *dev)
5644 struct bnx2 *bp = netdev_priv(dev);
5647 netif_carrier_off(dev);
5649 bnx2_set_power_state(bp, PCI_D0);
5650 bnx2_disable_int(bp);
5652 rc = bnx2_alloc_mem(bp);
5656 bnx2_setup_int_mode(bp, disable_msi);
5657 bnx2_napi_enable(bp);
5658 rc = bnx2_request_irq(bp);
5661 bnx2_napi_disable(bp);
5666 rc = bnx2_init_nic(bp, 1);
5669 bnx2_napi_disable(bp);
5676 mod_timer(&bp->timer, jiffies + bp->current_interval);
5678 atomic_set(&bp->intr_sem, 0);
5680 bnx2_enable_int(bp);
5682 if (bp->flags & BNX2_FLAG_USING_MSI) {
5683 /* Test MSI to make sure it is working
5684 * If MSI test fails, go back to INTx mode
5686 if (bnx2_test_intr(bp) != 0) {
5687 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5688 " using MSI, switching to INTx mode. Please"
5689 " report this failure to the PCI maintainer"
5690 " and include system chipset information.\n",
5693 bnx2_disable_int(bp);
5696 bnx2_setup_int_mode(bp, 1);
5698 rc = bnx2_init_nic(bp, 0);
5701 rc = bnx2_request_irq(bp);
5704 bnx2_napi_disable(bp);
5707 del_timer_sync(&bp->timer);
5710 bnx2_enable_int(bp);
5713 if (bp->flags & BNX2_FLAG_USING_MSI)
5714 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5715 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5716 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5718 netif_start_queue(dev);
5724 bnx2_reset_task(struct work_struct *work)
5726 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5728 if (!netif_running(bp->dev))
5731 bp->in_reset_task = 1;
5732 bnx2_netif_stop(bp);
5734 bnx2_init_nic(bp, 1);
5736 atomic_set(&bp->intr_sem, 1);
5737 bnx2_netif_start(bp);
5738 bp->in_reset_task = 0;
5742 bnx2_tx_timeout(struct net_device *dev)
5744 struct bnx2 *bp = netdev_priv(dev);
5746 /* This allows the netif to be shutdown gracefully before resetting */
5747 schedule_work(&bp->reset_task);
5751 /* Called with rtnl_lock */
5753 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5755 struct bnx2 *bp = netdev_priv(dev);
5757 bnx2_netif_stop(bp);
5760 bnx2_set_rx_mode(dev);
5762 bnx2_netif_start(bp);
5766 /* Called with netif_tx_lock.
5767 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5768 * netif_wake_queue().
5771 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5773 struct bnx2 *bp = netdev_priv(dev);
5776 struct sw_bd *tx_buf;
5777 u32 len, vlan_tag_flags, last_frag, mss;
5778 u16 prod, ring_prod;
5780 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5782 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5783 (skb_shinfo(skb)->nr_frags + 1))) {
5784 netif_stop_queue(dev);
5785 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5788 return NETDEV_TX_BUSY;
5790 len = skb_headlen(skb);
5792 ring_prod = TX_RING_IDX(prod);
5795 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5796 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5799 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5801 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5803 if ((mss = skb_shinfo(skb)->gso_size)) {
5804 u32 tcp_opt_len, ip_tcp_len;
5807 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5809 tcp_opt_len = tcp_optlen(skb);
5811 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5812 u32 tcp_off = skb_transport_offset(skb) -
5813 sizeof(struct ipv6hdr) - ETH_HLEN;
5815 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5816 TX_BD_FLAGS_SW_FLAGS;
5817 if (likely(tcp_off == 0))
5818 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5821 vlan_tag_flags |= ((tcp_off & 0x3) <<
5822 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5823 ((tcp_off & 0x10) <<
5824 TX_BD_FLAGS_TCP6_OFF4_SHL);
5825 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5828 if (skb_header_cloned(skb) &&
5829 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5831 return NETDEV_TX_OK;
5834 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5838 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5839 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5843 if (tcp_opt_len || (iph->ihl > 5)) {
5844 vlan_tag_flags |= ((iph->ihl - 5) +
5845 (tcp_opt_len >> 2)) << 8;
5851 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5853 tx_buf = &bp->tx_buf_ring[ring_prod];
5855 pci_unmap_addr_set(tx_buf, mapping, mapping);
5857 txbd = &bp->tx_desc_ring[ring_prod];
5859 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5860 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5861 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5862 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5864 last_frag = skb_shinfo(skb)->nr_frags;
5866 for (i = 0; i < last_frag; i++) {
5867 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5869 prod = NEXT_TX_BD(prod);
5870 ring_prod = TX_RING_IDX(prod);
5871 txbd = &bp->tx_desc_ring[ring_prod];
5874 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5875 len, PCI_DMA_TODEVICE);
5876 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5879 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5880 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5881 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5882 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5885 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5887 prod = NEXT_TX_BD(prod);
5888 bp->tx_prod_bseq += skb->len;
5890 REG_WR16(bp, bp->tx_bidx_addr, prod);
5891 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5896 dev->trans_start = jiffies;
5898 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5899 netif_stop_queue(dev);
5900 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5901 netif_wake_queue(dev);
5904 return NETDEV_TX_OK;
5907 /* Called with rtnl_lock */
5909 bnx2_close(struct net_device *dev)
5911 struct bnx2 *bp = netdev_priv(dev);
5914 /* Calling flush_scheduled_work() may deadlock because
5915 * linkwatch_event() may be on the workqueue and it will try to get
5916 * the rtnl_lock which we are holding.
5918 while (bp->in_reset_task)
5921 bnx2_disable_int_sync(bp);
5922 bnx2_napi_disable(bp);
5923 del_timer_sync(&bp->timer);
5924 if (bp->flags & BNX2_FLAG_NO_WOL)
5925 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5927 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5929 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5930 bnx2_reset_chip(bp, reset_code);
5935 netif_carrier_off(bp->dev);
5936 bnx2_set_power_state(bp, PCI_D3hot);
5940 #define GET_NET_STATS64(ctr) \
5941 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5942 (unsigned long) (ctr##_lo)
5944 #define GET_NET_STATS32(ctr) \
5947 #if (BITS_PER_LONG == 64)
5948 #define GET_NET_STATS GET_NET_STATS64
5950 #define GET_NET_STATS GET_NET_STATS32
5953 static struct net_device_stats *
5954 bnx2_get_stats(struct net_device *dev)
5956 struct bnx2 *bp = netdev_priv(dev);
5957 struct statistics_block *stats_blk = bp->stats_blk;
5958 struct net_device_stats *net_stats = &bp->net_stats;
5960 if (bp->stats_blk == NULL) {
5963 net_stats->rx_packets =
5964 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5965 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5966 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5968 net_stats->tx_packets =
5969 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5970 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5971 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5973 net_stats->rx_bytes =
5974 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5976 net_stats->tx_bytes =
5977 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5979 net_stats->multicast =
5980 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5982 net_stats->collisions =
5983 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5985 net_stats->rx_length_errors =
5986 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5987 stats_blk->stat_EtherStatsOverrsizePkts);
5989 net_stats->rx_over_errors =
5990 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5992 net_stats->rx_frame_errors =
5993 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5995 net_stats->rx_crc_errors =
5996 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5998 net_stats->rx_errors = net_stats->rx_length_errors +
5999 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6000 net_stats->rx_crc_errors;
6002 net_stats->tx_aborted_errors =
6003 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6004 stats_blk->stat_Dot3StatsLateCollisions);
6006 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6007 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6008 net_stats->tx_carrier_errors = 0;
6010 net_stats->tx_carrier_errors =
6012 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6015 net_stats->tx_errors =
6017 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6019 net_stats->tx_aborted_errors +
6020 net_stats->tx_carrier_errors;
6022 net_stats->rx_missed_errors =
6023 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6024 stats_blk->stat_FwRxDrop);
6029 /* All ethtool functions called with rtnl_lock */
6032 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6034 struct bnx2 *bp = netdev_priv(dev);
6035 int support_serdes = 0, support_copper = 0;
6037 cmd->supported = SUPPORTED_Autoneg;
6038 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6041 } else if (bp->phy_port == PORT_FIBRE)
6046 if (support_serdes) {
6047 cmd->supported |= SUPPORTED_1000baseT_Full |
6049 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6050 cmd->supported |= SUPPORTED_2500baseX_Full;
6053 if (support_copper) {
6054 cmd->supported |= SUPPORTED_10baseT_Half |
6055 SUPPORTED_10baseT_Full |
6056 SUPPORTED_100baseT_Half |
6057 SUPPORTED_100baseT_Full |
6058 SUPPORTED_1000baseT_Full |
6063 spin_lock_bh(&bp->phy_lock);
6064 cmd->port = bp->phy_port;
6065 cmd->advertising = bp->advertising;
6067 if (bp->autoneg & AUTONEG_SPEED) {
6068 cmd->autoneg = AUTONEG_ENABLE;
6071 cmd->autoneg = AUTONEG_DISABLE;
6074 if (netif_carrier_ok(dev)) {
6075 cmd->speed = bp->line_speed;
6076 cmd->duplex = bp->duplex;
6082 spin_unlock_bh(&bp->phy_lock);
6084 cmd->transceiver = XCVR_INTERNAL;
6085 cmd->phy_address = bp->phy_addr;
6091 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6093 struct bnx2 *bp = netdev_priv(dev);
6094 u8 autoneg = bp->autoneg;
6095 u8 req_duplex = bp->req_duplex;
6096 u16 req_line_speed = bp->req_line_speed;
6097 u32 advertising = bp->advertising;
6100 spin_lock_bh(&bp->phy_lock);
6102 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6103 goto err_out_unlock;
6105 if (cmd->port != bp->phy_port &&
6106 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6107 goto err_out_unlock;
6109 if (cmd->autoneg == AUTONEG_ENABLE) {
6110 autoneg |= AUTONEG_SPEED;
6112 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6114 /* allow advertising 1 speed */
6115 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6116 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6117 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6118 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6120 if (cmd->port == PORT_FIBRE)
6121 goto err_out_unlock;
6123 advertising = cmd->advertising;
6125 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6126 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6127 (cmd->port == PORT_TP))
6128 goto err_out_unlock;
6129 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6130 advertising = cmd->advertising;
6131 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6132 goto err_out_unlock;
6134 if (cmd->port == PORT_FIBRE)
6135 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6137 advertising = ETHTOOL_ALL_COPPER_SPEED;
6139 advertising |= ADVERTISED_Autoneg;
6142 if (cmd->port == PORT_FIBRE) {
6143 if ((cmd->speed != SPEED_1000 &&
6144 cmd->speed != SPEED_2500) ||
6145 (cmd->duplex != DUPLEX_FULL))
6146 goto err_out_unlock;
6148 if (cmd->speed == SPEED_2500 &&
6149 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6150 goto err_out_unlock;
6152 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6153 goto err_out_unlock;
6155 autoneg &= ~AUTONEG_SPEED;
6156 req_line_speed = cmd->speed;
6157 req_duplex = cmd->duplex;
6161 bp->autoneg = autoneg;
6162 bp->advertising = advertising;
6163 bp->req_line_speed = req_line_speed;
6164 bp->req_duplex = req_duplex;
6166 err = bnx2_setup_phy(bp, cmd->port);
6169 spin_unlock_bh(&bp->phy_lock);
6175 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6177 struct bnx2 *bp = netdev_priv(dev);
6179 strcpy(info->driver, DRV_MODULE_NAME);
6180 strcpy(info->version, DRV_MODULE_VERSION);
6181 strcpy(info->bus_info, pci_name(bp->pdev));
6182 strcpy(info->fw_version, bp->fw_version);
6185 #define BNX2_REGDUMP_LEN (32 * 1024)
6188 bnx2_get_regs_len(struct net_device *dev)
6190 return BNX2_REGDUMP_LEN;
6194 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6196 u32 *p = _p, i, offset;
6198 struct bnx2 *bp = netdev_priv(dev);
6199 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6200 0x0800, 0x0880, 0x0c00, 0x0c10,
6201 0x0c30, 0x0d08, 0x1000, 0x101c,
6202 0x1040, 0x1048, 0x1080, 0x10a4,
6203 0x1400, 0x1490, 0x1498, 0x14f0,
6204 0x1500, 0x155c, 0x1580, 0x15dc,
6205 0x1600, 0x1658, 0x1680, 0x16d8,
6206 0x1800, 0x1820, 0x1840, 0x1854,
6207 0x1880, 0x1894, 0x1900, 0x1984,
6208 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6209 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6210 0x2000, 0x2030, 0x23c0, 0x2400,
6211 0x2800, 0x2820, 0x2830, 0x2850,
6212 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6213 0x3c00, 0x3c94, 0x4000, 0x4010,
6214 0x4080, 0x4090, 0x43c0, 0x4458,
6215 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6216 0x4fc0, 0x5010, 0x53c0, 0x5444,
6217 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6218 0x5fc0, 0x6000, 0x6400, 0x6428,
6219 0x6800, 0x6848, 0x684c, 0x6860,
6220 0x6888, 0x6910, 0x8000 };
6224 memset(p, 0, BNX2_REGDUMP_LEN);
6226 if (!netif_running(bp->dev))
6230 offset = reg_boundaries[0];
6232 while (offset < BNX2_REGDUMP_LEN) {
6233 *p++ = REG_RD(bp, offset);
6235 if (offset == reg_boundaries[i + 1]) {
6236 offset = reg_boundaries[i + 2];
6237 p = (u32 *) (orig_p + offset);
6244 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6246 struct bnx2 *bp = netdev_priv(dev);
6248 if (bp->flags & BNX2_FLAG_NO_WOL) {
6253 wol->supported = WAKE_MAGIC;
6255 wol->wolopts = WAKE_MAGIC;
6259 memset(&wol->sopass, 0, sizeof(wol->sopass));
6263 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6265 struct bnx2 *bp = netdev_priv(dev);
6267 if (wol->wolopts & ~WAKE_MAGIC)
6270 if (wol->wolopts & WAKE_MAGIC) {
6271 if (bp->flags & BNX2_FLAG_NO_WOL)
6283 bnx2_nway_reset(struct net_device *dev)
6285 struct bnx2 *bp = netdev_priv(dev);
6288 if (!(bp->autoneg & AUTONEG_SPEED)) {
6292 spin_lock_bh(&bp->phy_lock);
6294 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6297 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6298 spin_unlock_bh(&bp->phy_lock);
6302 /* Force a link down visible on the other side */
6303 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6304 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6305 spin_unlock_bh(&bp->phy_lock);
6309 spin_lock_bh(&bp->phy_lock);
6311 bp->current_interval = SERDES_AN_TIMEOUT;
6312 bp->serdes_an_pending = 1;
6313 mod_timer(&bp->timer, jiffies + bp->current_interval);
6316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6317 bmcr &= ~BMCR_LOOPBACK;
6318 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6320 spin_unlock_bh(&bp->phy_lock);
6326 bnx2_get_eeprom_len(struct net_device *dev)
6328 struct bnx2 *bp = netdev_priv(dev);
6330 if (bp->flash_info == NULL)
6333 return (int) bp->flash_size;
6337 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6340 struct bnx2 *bp = netdev_priv(dev);
6343 /* parameters already validated in ethtool_get_eeprom */
6345 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6351 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6354 struct bnx2 *bp = netdev_priv(dev);
6357 /* parameters already validated in ethtool_set_eeprom */
6359 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6365 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6367 struct bnx2 *bp = netdev_priv(dev);
6369 memset(coal, 0, sizeof(struct ethtool_coalesce));
6371 coal->rx_coalesce_usecs = bp->rx_ticks;
6372 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6373 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6374 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6376 coal->tx_coalesce_usecs = bp->tx_ticks;
6377 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6378 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6379 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6381 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6387 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6389 struct bnx2 *bp = netdev_priv(dev);
6391 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6392 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6394 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6395 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6397 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6398 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6400 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6401 if (bp->rx_quick_cons_trip_int > 0xff)
6402 bp->rx_quick_cons_trip_int = 0xff;
6404 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6405 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6407 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6408 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6410 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6411 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6413 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6414 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6417 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6418 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6419 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6420 bp->stats_ticks = USEC_PER_SEC;
6422 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6423 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6424 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6426 if (netif_running(bp->dev)) {
6427 bnx2_netif_stop(bp);
6428 bnx2_init_nic(bp, 0);
6429 bnx2_netif_start(bp);
6436 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6438 struct bnx2 *bp = netdev_priv(dev);
6440 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6441 ering->rx_mini_max_pending = 0;
6442 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6444 ering->rx_pending = bp->rx_ring_size;
6445 ering->rx_mini_pending = 0;
6446 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6448 ering->tx_max_pending = MAX_TX_DESC_CNT;
6449 ering->tx_pending = bp->tx_ring_size;
6453 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6455 if (netif_running(bp->dev)) {
6456 bnx2_netif_stop(bp);
6457 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6462 bnx2_set_rx_ring_size(bp, rx);
6463 bp->tx_ring_size = tx;
6465 if (netif_running(bp->dev)) {
6468 rc = bnx2_alloc_mem(bp);
6471 bnx2_init_nic(bp, 0);
6472 bnx2_netif_start(bp);
6478 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6480 struct bnx2 *bp = netdev_priv(dev);
6483 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6484 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6485 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6489 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6494 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6496 struct bnx2 *bp = netdev_priv(dev);
6498 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6499 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6500 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6504 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6506 struct bnx2 *bp = netdev_priv(dev);
6508 bp->req_flow_ctrl = 0;
6509 if (epause->rx_pause)
6510 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6511 if (epause->tx_pause)
6512 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6514 if (epause->autoneg) {
6515 bp->autoneg |= AUTONEG_FLOW_CTRL;
6518 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6521 spin_lock_bh(&bp->phy_lock);
6523 bnx2_setup_phy(bp, bp->phy_port);
6525 spin_unlock_bh(&bp->phy_lock);
6531 bnx2_get_rx_csum(struct net_device *dev)
6533 struct bnx2 *bp = netdev_priv(dev);
6539 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6541 struct bnx2 *bp = netdev_priv(dev);
6548 bnx2_set_tso(struct net_device *dev, u32 data)
6550 struct bnx2 *bp = netdev_priv(dev);
6553 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6554 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6555 dev->features |= NETIF_F_TSO6;
6557 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6562 #define BNX2_NUM_STATS 46
6565 char string[ETH_GSTRING_LEN];
6566 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6568 { "rx_error_bytes" },
6570 { "tx_error_bytes" },
6571 { "rx_ucast_packets" },
6572 { "rx_mcast_packets" },
6573 { "rx_bcast_packets" },
6574 { "tx_ucast_packets" },
6575 { "tx_mcast_packets" },
6576 { "tx_bcast_packets" },
6577 { "tx_mac_errors" },
6578 { "tx_carrier_errors" },
6579 { "rx_crc_errors" },
6580 { "rx_align_errors" },
6581 { "tx_single_collisions" },
6582 { "tx_multi_collisions" },
6584 { "tx_excess_collisions" },
6585 { "tx_late_collisions" },
6586 { "tx_total_collisions" },
6589 { "rx_undersize_packets" },
6590 { "rx_oversize_packets" },
6591 { "rx_64_byte_packets" },
6592 { "rx_65_to_127_byte_packets" },
6593 { "rx_128_to_255_byte_packets" },
6594 { "rx_256_to_511_byte_packets" },
6595 { "rx_512_to_1023_byte_packets" },
6596 { "rx_1024_to_1522_byte_packets" },
6597 { "rx_1523_to_9022_byte_packets" },
6598 { "tx_64_byte_packets" },
6599 { "tx_65_to_127_byte_packets" },
6600 { "tx_128_to_255_byte_packets" },
6601 { "tx_256_to_511_byte_packets" },
6602 { "tx_512_to_1023_byte_packets" },
6603 { "tx_1024_to_1522_byte_packets" },
6604 { "tx_1523_to_9022_byte_packets" },
6605 { "rx_xon_frames" },
6606 { "rx_xoff_frames" },
6607 { "tx_xon_frames" },
6608 { "tx_xoff_frames" },
6609 { "rx_mac_ctrl_frames" },
6610 { "rx_filtered_packets" },
6612 { "rx_fw_discards" },
6615 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6617 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6618 STATS_OFFSET32(stat_IfHCInOctets_hi),
6619 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6620 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6621 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6622 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6623 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6624 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6625 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6626 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6627 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6628 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6629 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6630 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6631 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6632 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6633 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6634 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6635 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6636 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6637 STATS_OFFSET32(stat_EtherStatsCollisions),
6638 STATS_OFFSET32(stat_EtherStatsFragments),
6639 STATS_OFFSET32(stat_EtherStatsJabbers),
6640 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6641 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6642 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6643 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6644 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6645 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6646 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6647 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6648 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6649 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6650 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6651 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6652 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6653 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6654 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6655 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6656 STATS_OFFSET32(stat_XonPauseFramesReceived),
6657 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6658 STATS_OFFSET32(stat_OutXonSent),
6659 STATS_OFFSET32(stat_OutXoffSent),
6660 STATS_OFFSET32(stat_MacControlFramesReceived),
6661 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6662 STATS_OFFSET32(stat_IfInMBUFDiscards),
6663 STATS_OFFSET32(stat_FwRxDrop),
6666 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6667 * skipped because of errata.
6669 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6670 8,0,8,8,8,8,8,8,8,8,
6671 4,0,4,4,4,4,4,4,4,4,
6672 4,4,4,4,4,4,4,4,4,4,
6673 4,4,4,4,4,4,4,4,4,4,
6677 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6678 8,0,8,8,8,8,8,8,8,8,
6679 4,4,4,4,4,4,4,4,4,4,
6680 4,4,4,4,4,4,4,4,4,4,
6681 4,4,4,4,4,4,4,4,4,4,
6685 #define BNX2_NUM_TESTS 6
6688 char string[ETH_GSTRING_LEN];
6689 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6690 { "register_test (offline)" },
6691 { "memory_test (offline)" },
6692 { "loopback_test (offline)" },
6693 { "nvram_test (online)" },
6694 { "interrupt_test (online)" },
6695 { "link_test (online)" },
6699 bnx2_get_sset_count(struct net_device *dev, int sset)
6703 return BNX2_NUM_TESTS;
6705 return BNX2_NUM_STATS;
6712 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6714 struct bnx2 *bp = netdev_priv(dev);
6716 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6717 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6720 bnx2_netif_stop(bp);
6721 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6724 if (bnx2_test_registers(bp) != 0) {
6726 etest->flags |= ETH_TEST_FL_FAILED;
6728 if (bnx2_test_memory(bp) != 0) {
6730 etest->flags |= ETH_TEST_FL_FAILED;
6732 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6733 etest->flags |= ETH_TEST_FL_FAILED;
6735 if (!netif_running(bp->dev)) {
6736 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6739 bnx2_init_nic(bp, 1);
6740 bnx2_netif_start(bp);
6743 /* wait for link up */
6744 for (i = 0; i < 7; i++) {
6747 msleep_interruptible(1000);
6751 if (bnx2_test_nvram(bp) != 0) {
6753 etest->flags |= ETH_TEST_FL_FAILED;
6755 if (bnx2_test_intr(bp) != 0) {
6757 etest->flags |= ETH_TEST_FL_FAILED;
6760 if (bnx2_test_link(bp) != 0) {
6762 etest->flags |= ETH_TEST_FL_FAILED;
6768 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6770 switch (stringset) {
6772 memcpy(buf, bnx2_stats_str_arr,
6773 sizeof(bnx2_stats_str_arr));
6776 memcpy(buf, bnx2_tests_str_arr,
6777 sizeof(bnx2_tests_str_arr));
6783 bnx2_get_ethtool_stats(struct net_device *dev,
6784 struct ethtool_stats *stats, u64 *buf)
6786 struct bnx2 *bp = netdev_priv(dev);
6788 u32 *hw_stats = (u32 *) bp->stats_blk;
6789 u8 *stats_len_arr = NULL;
6791 if (hw_stats == NULL) {
6792 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6796 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6797 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6798 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6799 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6800 stats_len_arr = bnx2_5706_stats_len_arr;
6802 stats_len_arr = bnx2_5708_stats_len_arr;
6804 for (i = 0; i < BNX2_NUM_STATS; i++) {
6805 if (stats_len_arr[i] == 0) {
6806 /* skip this counter */
6810 if (stats_len_arr[i] == 4) {
6811 /* 4-byte counter */
6813 *(hw_stats + bnx2_stats_offset_arr[i]);
6816 /* 8-byte counter */
6817 buf[i] = (((u64) *(hw_stats +
6818 bnx2_stats_offset_arr[i])) << 32) +
6819 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6824 bnx2_phys_id(struct net_device *dev, u32 data)
6826 struct bnx2 *bp = netdev_priv(dev);
6833 save = REG_RD(bp, BNX2_MISC_CFG);
6834 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6836 for (i = 0; i < (data * 2); i++) {
6838 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6841 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6842 BNX2_EMAC_LED_1000MB_OVERRIDE |
6843 BNX2_EMAC_LED_100MB_OVERRIDE |
6844 BNX2_EMAC_LED_10MB_OVERRIDE |
6845 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6846 BNX2_EMAC_LED_TRAFFIC);
6848 msleep_interruptible(500);
6849 if (signal_pending(current))
6852 REG_WR(bp, BNX2_EMAC_LED, 0);
6853 REG_WR(bp, BNX2_MISC_CFG, save);
6858 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6860 struct bnx2 *bp = netdev_priv(dev);
6862 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6863 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6865 return (ethtool_op_set_tx_csum(dev, data));
6868 static const struct ethtool_ops bnx2_ethtool_ops = {
6869 .get_settings = bnx2_get_settings,
6870 .set_settings = bnx2_set_settings,
6871 .get_drvinfo = bnx2_get_drvinfo,
6872 .get_regs_len = bnx2_get_regs_len,
6873 .get_regs = bnx2_get_regs,
6874 .get_wol = bnx2_get_wol,
6875 .set_wol = bnx2_set_wol,
6876 .nway_reset = bnx2_nway_reset,
6877 .get_link = ethtool_op_get_link,
6878 .get_eeprom_len = bnx2_get_eeprom_len,
6879 .get_eeprom = bnx2_get_eeprom,
6880 .set_eeprom = bnx2_set_eeprom,
6881 .get_coalesce = bnx2_get_coalesce,
6882 .set_coalesce = bnx2_set_coalesce,
6883 .get_ringparam = bnx2_get_ringparam,
6884 .set_ringparam = bnx2_set_ringparam,
6885 .get_pauseparam = bnx2_get_pauseparam,
6886 .set_pauseparam = bnx2_set_pauseparam,
6887 .get_rx_csum = bnx2_get_rx_csum,
6888 .set_rx_csum = bnx2_set_rx_csum,
6889 .set_tx_csum = bnx2_set_tx_csum,
6890 .set_sg = ethtool_op_set_sg,
6891 .set_tso = bnx2_set_tso,
6892 .self_test = bnx2_self_test,
6893 .get_strings = bnx2_get_strings,
6894 .phys_id = bnx2_phys_id,
6895 .get_ethtool_stats = bnx2_get_ethtool_stats,
6896 .get_sset_count = bnx2_get_sset_count,
6899 /* Called with rtnl_lock */
6901 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6903 struct mii_ioctl_data *data = if_mii(ifr);
6904 struct bnx2 *bp = netdev_priv(dev);
6909 data->phy_id = bp->phy_addr;
6915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6918 if (!netif_running(dev))
6921 spin_lock_bh(&bp->phy_lock);
6922 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6923 spin_unlock_bh(&bp->phy_lock);
6925 data->val_out = mii_regval;
6931 if (!capable(CAP_NET_ADMIN))
6934 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6937 if (!netif_running(dev))
6940 spin_lock_bh(&bp->phy_lock);
6941 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6942 spin_unlock_bh(&bp->phy_lock);
6953 /* Called with rtnl_lock */
6955 bnx2_change_mac_addr(struct net_device *dev, void *p)
6957 struct sockaddr *addr = p;
6958 struct bnx2 *bp = netdev_priv(dev);
6960 if (!is_valid_ether_addr(addr->sa_data))
6963 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6964 if (netif_running(dev))
6965 bnx2_set_mac_addr(bp);
6970 /* Called with rtnl_lock */
6972 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6974 struct bnx2 *bp = netdev_priv(dev);
6976 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6977 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6981 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6984 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6986 poll_bnx2(struct net_device *dev)
6988 struct bnx2 *bp = netdev_priv(dev);
6990 disable_irq(bp->pdev->irq);
6991 bnx2_interrupt(bp->pdev->irq, dev);
6992 enable_irq(bp->pdev->irq);
6996 static void __devinit
6997 bnx2_get_5709_media(struct bnx2 *bp)
6999 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7000 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7003 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7005 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7006 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7010 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7011 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7013 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7015 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7020 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7028 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7034 static void __devinit
7035 bnx2_get_pci_speed(struct bnx2 *bp)
7039 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7040 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7043 bp->flags |= BNX2_FLAG_PCIX;
7045 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7047 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7049 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7050 bp->bus_speed_mhz = 133;
7053 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7054 bp->bus_speed_mhz = 100;
7057 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7058 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7059 bp->bus_speed_mhz = 66;
7062 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7063 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7064 bp->bus_speed_mhz = 50;
7067 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7068 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7069 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7070 bp->bus_speed_mhz = 33;
7075 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7076 bp->bus_speed_mhz = 66;
7078 bp->bus_speed_mhz = 33;
7081 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7082 bp->flags |= BNX2_FLAG_PCI_32BIT;
7086 static int __devinit
7087 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7090 unsigned long mem_len;
7093 u64 dma_mask, persist_dma_mask;
7095 SET_NETDEV_DEV(dev, &pdev->dev);
7096 bp = netdev_priv(dev);
7101 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7102 rc = pci_enable_device(pdev);
7104 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7108 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7110 "Cannot find PCI device base address, aborting.\n");
7112 goto err_out_disable;
7115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7117 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7118 goto err_out_disable;
7121 pci_set_master(pdev);
7122 pci_save_state(pdev);
7124 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7125 if (bp->pm_cap == 0) {
7127 "Cannot find power management capability, aborting.\n");
7129 goto err_out_release;
7135 spin_lock_init(&bp->phy_lock);
7136 spin_lock_init(&bp->indirect_lock);
7137 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7139 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7140 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7141 dev->mem_end = dev->mem_start + mem_len;
7142 dev->irq = pdev->irq;
7144 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7147 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7149 goto err_out_release;
7152 /* Configure byte swap and enable write to the reg_window registers.
7153 * Rely on CPU to do target byte swapping on big endian systems
7154 * The chip's target access swapping will not swap all accesses
7156 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7157 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7158 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7160 bnx2_set_power_state(bp, PCI_D0);
7162 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7164 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7165 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7167 "Cannot find PCIE capability, aborting.\n");
7171 bp->flags |= BNX2_FLAG_PCIE;
7172 if (CHIP_REV(bp) == CHIP_REV_Ax)
7173 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7175 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7176 if (bp->pcix_cap == 0) {
7178 "Cannot find PCIX capability, aborting.\n");
7184 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7185 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7186 bp->flags |= BNX2_FLAG_MSIX_CAP;
7189 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7190 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7191 bp->flags |= BNX2_FLAG_MSI_CAP;
7194 /* 5708 cannot support DMA addresses > 40-bit. */
7195 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7196 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7198 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7200 /* Configure DMA attributes. */
7201 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7202 dev->features |= NETIF_F_HIGHDMA;
7203 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7206 "pci_set_consistent_dma_mask failed, aborting.\n");
7209 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7210 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7214 if (!(bp->flags & BNX2_FLAG_PCIE))
7215 bnx2_get_pci_speed(bp);
7217 /* 5706A0 may falsely detect SERR and PERR. */
7218 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7219 reg = REG_RD(bp, PCI_COMMAND);
7220 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7221 REG_WR(bp, PCI_COMMAND, reg);
7223 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7224 !(bp->flags & BNX2_FLAG_PCIX)) {
7227 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7231 bnx2_init_nvram(bp);
7233 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7235 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7236 BNX2_SHM_HDR_SIGNATURE_SIG) {
7237 u32 off = PCI_FUNC(pdev->devfn) << 2;
7239 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7241 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7243 /* Get the permanent MAC address. First we need to make sure the
7244 * firmware is actually running.
7246 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7248 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7249 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7250 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7255 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7256 for (i = 0, j = 0; i < 3; i++) {
7259 num = (u8) (reg >> (24 - (i * 8)));
7260 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7261 if (num >= k || !skip0 || k == 1) {
7262 bp->fw_version[j++] = (num / k) + '0';
7267 bp->fw_version[j++] = '.';
7269 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7270 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7273 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7274 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7276 for (i = 0; i < 30; i++) {
7277 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7278 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7283 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7284 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7285 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7286 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7288 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7290 bp->fw_version[j++] = ' ';
7291 for (i = 0; i < 3; i++) {
7292 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7294 memcpy(&bp->fw_version[j], ®, 4);
7299 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7300 bp->mac_addr[0] = (u8) (reg >> 8);
7301 bp->mac_addr[1] = (u8) reg;
7303 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7304 bp->mac_addr[2] = (u8) (reg >> 24);
7305 bp->mac_addr[3] = (u8) (reg >> 16);
7306 bp->mac_addr[4] = (u8) (reg >> 8);
7307 bp->mac_addr[5] = (u8) reg;
7309 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7311 bp->tx_ring_size = MAX_TX_DESC_CNT;
7312 bnx2_set_rx_ring_size(bp, 255);
7316 bp->tx_quick_cons_trip_int = 20;
7317 bp->tx_quick_cons_trip = 20;
7318 bp->tx_ticks_int = 80;
7321 bp->rx_quick_cons_trip_int = 6;
7322 bp->rx_quick_cons_trip = 6;
7323 bp->rx_ticks_int = 18;
7326 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7328 bp->timer_interval = HZ;
7329 bp->current_interval = HZ;
7333 /* Disable WOL support if we are running on a SERDES chip. */
7334 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7335 bnx2_get_5709_media(bp);
7336 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7337 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7339 bp->phy_port = PORT_TP;
7340 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7341 bp->phy_port = PORT_FIBRE;
7342 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7343 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7344 bp->flags |= BNX2_FLAG_NO_WOL;
7347 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7348 /* Don't do parallel detect on this board because of
7349 * some board problems. The link will not go down
7350 * if we do parallel detect.
7352 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7353 pdev->subsystem_device == 0x310c)
7354 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7357 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7358 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7360 bnx2_init_remote_phy(bp);
7362 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7363 CHIP_NUM(bp) == CHIP_NUM_5708)
7364 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7365 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7366 (CHIP_REV(bp) == CHIP_REV_Ax ||
7367 CHIP_REV(bp) == CHIP_REV_Bx))
7368 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7370 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7371 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7372 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7373 bp->flags |= BNX2_FLAG_NO_WOL;
7377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7378 bp->tx_quick_cons_trip_int =
7379 bp->tx_quick_cons_trip;
7380 bp->tx_ticks_int = bp->tx_ticks;
7381 bp->rx_quick_cons_trip_int =
7382 bp->rx_quick_cons_trip;
7383 bp->rx_ticks_int = bp->rx_ticks;
7384 bp->comp_prod_trip_int = bp->comp_prod_trip;
7385 bp->com_ticks_int = bp->com_ticks;
7386 bp->cmd_ticks_int = bp->cmd_ticks;
7389 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7391 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7392 * with byte enables disabled on the unused 32-bit word. This is legal
7393 * but causes problems on the AMD 8132 which will eventually stop
7394 * responding after a while.
7396 * AMD believes this incompatibility is unique to the 5706, and
7397 * prefers to locally disable MSI rather than globally disabling it.
7399 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7400 struct pci_dev *amd_8132 = NULL;
7402 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7403 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7406 if (amd_8132->revision >= 0x10 &&
7407 amd_8132->revision <= 0x13) {
7409 pci_dev_put(amd_8132);
7415 bnx2_set_default_link(bp);
7416 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7418 init_timer(&bp->timer);
7419 bp->timer.expires = RUN_AT(bp->timer_interval);
7420 bp->timer.data = (unsigned long) bp;
7421 bp->timer.function = bnx2_timer;
7427 iounmap(bp->regview);
7432 pci_release_regions(pdev);
7435 pci_disable_device(pdev);
7436 pci_set_drvdata(pdev, NULL);
7442 static char * __devinit
7443 bnx2_bus_string(struct bnx2 *bp, char *str)
7447 if (bp->flags & BNX2_FLAG_PCIE) {
7448 s += sprintf(s, "PCI Express");
7450 s += sprintf(s, "PCI");
7451 if (bp->flags & BNX2_FLAG_PCIX)
7452 s += sprintf(s, "-X");
7453 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7454 s += sprintf(s, " 32-bit");
7456 s += sprintf(s, " 64-bit");
7457 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7462 static void __devinit
7463 bnx2_init_napi(struct bnx2 *bp)
7466 struct bnx2_napi *bnapi;
7468 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7469 bnapi = &bp->bnx2_napi[i];
7472 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7473 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7477 static int __devinit
7478 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7480 static int version_printed = 0;
7481 struct net_device *dev = NULL;
7485 DECLARE_MAC_BUF(mac);
7487 if (version_printed++ == 0)
7488 printk(KERN_INFO "%s", version);
7490 /* dev zeroed in init_etherdev */
7491 dev = alloc_etherdev(sizeof(*bp));
7496 rc = bnx2_init_board(pdev, dev);
7502 dev->open = bnx2_open;
7503 dev->hard_start_xmit = bnx2_start_xmit;
7504 dev->stop = bnx2_close;
7505 dev->get_stats = bnx2_get_stats;
7506 dev->set_multicast_list = bnx2_set_rx_mode;
7507 dev->do_ioctl = bnx2_ioctl;
7508 dev->set_mac_address = bnx2_change_mac_addr;
7509 dev->change_mtu = bnx2_change_mtu;
7510 dev->tx_timeout = bnx2_tx_timeout;
7511 dev->watchdog_timeo = TX_TIMEOUT;
7513 dev->vlan_rx_register = bnx2_vlan_rx_register;
7515 dev->ethtool_ops = &bnx2_ethtool_ops;
7517 bp = netdev_priv(dev);
7520 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7521 dev->poll_controller = poll_bnx2;
7524 pci_set_drvdata(pdev, dev);
7526 memcpy(dev->dev_addr, bp->mac_addr, 6);
7527 memcpy(dev->perm_addr, bp->mac_addr, 6);
7528 bp->name = board_info[ent->driver_data].name;
7530 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7531 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7532 dev->features |= NETIF_F_IPV6_CSUM;
7535 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7537 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7538 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7539 dev->features |= NETIF_F_TSO6;
7541 if ((rc = register_netdev(dev))) {
7542 dev_err(&pdev->dev, "Cannot register net device\n");
7544 iounmap(bp->regview);
7545 pci_release_regions(pdev);
7546 pci_disable_device(pdev);
7547 pci_set_drvdata(pdev, NULL);
7552 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7553 "IRQ %d, node addr %s\n",
7556 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7557 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7558 bnx2_bus_string(bp, str),
7560 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7565 static void __devexit
7566 bnx2_remove_one(struct pci_dev *pdev)
7568 struct net_device *dev = pci_get_drvdata(pdev);
7569 struct bnx2 *bp = netdev_priv(dev);
7571 flush_scheduled_work();
7573 unregister_netdev(dev);
7576 iounmap(bp->regview);
7579 pci_release_regions(pdev);
7580 pci_disable_device(pdev);
7581 pci_set_drvdata(pdev, NULL);
7585 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7587 struct net_device *dev = pci_get_drvdata(pdev);
7588 struct bnx2 *bp = netdev_priv(dev);
7591 /* PCI register 4 needs to be saved whether netif_running() or not.
7592 * MSI address and data need to be saved if using MSI and
7595 pci_save_state(pdev);
7596 if (!netif_running(dev))
7599 flush_scheduled_work();
7600 bnx2_netif_stop(bp);
7601 netif_device_detach(dev);
7602 del_timer_sync(&bp->timer);
7603 if (bp->flags & BNX2_FLAG_NO_WOL)
7604 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7606 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7608 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7609 bnx2_reset_chip(bp, reset_code);
7611 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7616 bnx2_resume(struct pci_dev *pdev)
7618 struct net_device *dev = pci_get_drvdata(pdev);
7619 struct bnx2 *bp = netdev_priv(dev);
7621 pci_restore_state(pdev);
7622 if (!netif_running(dev))
7625 bnx2_set_power_state(bp, PCI_D0);
7626 netif_device_attach(dev);
7627 bnx2_init_nic(bp, 1);
7628 bnx2_netif_start(bp);
7633 * bnx2_io_error_detected - called when PCI error is detected
7634 * @pdev: Pointer to PCI device
7635 * @state: The current pci connection state
7637 * This function is called after a PCI bus error affecting
7638 * this device has been detected.
7640 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7641 pci_channel_state_t state)
7643 struct net_device *dev = pci_get_drvdata(pdev);
7644 struct bnx2 *bp = netdev_priv(dev);
7647 netif_device_detach(dev);
7649 if (netif_running(dev)) {
7650 bnx2_netif_stop(bp);
7651 del_timer_sync(&bp->timer);
7652 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7655 pci_disable_device(pdev);
7658 /* Request a slot slot reset. */
7659 return PCI_ERS_RESULT_NEED_RESET;
7663 * bnx2_io_slot_reset - called after the pci bus has been reset.
7664 * @pdev: Pointer to PCI device
7666 * Restart the card from scratch, as if from a cold-boot.
7668 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7670 struct net_device *dev = pci_get_drvdata(pdev);
7671 struct bnx2 *bp = netdev_priv(dev);
7674 if (pci_enable_device(pdev)) {
7676 "Cannot re-enable PCI device after reset.\n");
7678 return PCI_ERS_RESULT_DISCONNECT;
7680 pci_set_master(pdev);
7681 pci_restore_state(pdev);
7683 if (netif_running(dev)) {
7684 bnx2_set_power_state(bp, PCI_D0);
7685 bnx2_init_nic(bp, 1);
7689 return PCI_ERS_RESULT_RECOVERED;
7693 * bnx2_io_resume - called when traffic can start flowing again.
7694 * @pdev: Pointer to PCI device
7696 * This callback is called when the error recovery driver tells us that
7697 * its OK to resume normal operation.
7699 static void bnx2_io_resume(struct pci_dev *pdev)
7701 struct net_device *dev = pci_get_drvdata(pdev);
7702 struct bnx2 *bp = netdev_priv(dev);
7705 if (netif_running(dev))
7706 bnx2_netif_start(bp);
7708 netif_device_attach(dev);
7712 static struct pci_error_handlers bnx2_err_handler = {
7713 .error_detected = bnx2_io_error_detected,
7714 .slot_reset = bnx2_io_slot_reset,
7715 .resume = bnx2_io_resume,
7718 static struct pci_driver bnx2_pci_driver = {
7719 .name = DRV_MODULE_NAME,
7720 .id_table = bnx2_pci_tbl,
7721 .probe = bnx2_init_one,
7722 .remove = __devexit_p(bnx2_remove_one),
7723 .suspend = bnx2_suspend,
7724 .resume = bnx2_resume,
7725 .err_handler = &bnx2_err_handler,
7728 static int __init bnx2_init(void)
7730 return pci_register_driver(&bnx2_pci_driver);
7733 static void __exit bnx2_cleanup(void)
7735 pci_unregister_driver(&bnx2_pci_driver);
7738 module_init(bnx2_init);
7739 module_exit(bnx2_cleanup);