1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.7"
60 #define DRV_MODULE_RELDATE "June 17, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
93 /* indexed by board_t, above */
96 } board_info[] __devinitdata = {
97 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
98 { "HP NC370T Multifunction Gigabit Server Adapter" },
99 { "HP NC370i Multifunction Gigabit Server Adapter" },
100 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
101 { "HP NC370F Multifunction Gigabit Server Adapter" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
103 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
105 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
106 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
128 { PCI_VENDOR_ID_BROADCOM, 0x163b,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133 static struct flash_spec flash_table[] =
135 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
136 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
138 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
139 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
140 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
142 /* Expansion entry 0001 */
143 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147 /* Saifun SA25F010 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
152 "Non-buffered flash (128kB)"},
153 /* Saifun SA25F020 (non-buffered flash) */
154 /* strap, cfg1, & write1 need updates */
155 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
156 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
157 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
158 "Non-buffered flash (256kB)"},
159 /* Expansion entry 0100 */
160 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
164 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
165 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
167 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
168 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
169 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
170 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
173 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
174 /* Saifun SA25F005 (non-buffered flash) */
175 /* strap, cfg1, & write1 need updates */
176 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
179 "Non-buffered flash (64kB)"},
181 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
182 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
183 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
185 /* Expansion entry 1001 */
186 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 /* Expansion entry 1010 */
191 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 /* ATMEL AT45DB011B (buffered flash) */
196 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
197 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
199 "Buffered flash (128kB)"},
200 /* Expansion entry 1100 */
201 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 /* Expansion entry 1101 */
206 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 /* Ateml Expansion entry 1110 */
211 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1110 (Atmel)"},
215 /* ATMEL AT45DB021B (buffered flash) */
216 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
219 "Buffered flash (256kB)"},
222 static struct flash_spec flash_5709 = {
223 .flags = BNX2_NV_BUFFERED,
224 .page_bits = BCM5709_FLASH_PAGE_BITS,
225 .page_size = BCM5709_FLASH_PAGE_SIZE,
226 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
227 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
228 .name = "5709 Buffered flash (256kB)",
231 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
233 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 /* The ring uses 256 indices for 255 entries, one of them
240 * needs to be skipped.
242 diff = txr->tx_prod - txr->tx_cons;
243 if (unlikely(diff >= TX_DESC_CNT)) {
245 if (diff == TX_DESC_CNT)
246 diff = MAX_TX_DESC_CNT;
248 return (bp->tx_ring_size - diff);
252 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
256 spin_lock_bh(&bp->indirect_lock);
257 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
258 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
259 spin_unlock_bh(&bp->indirect_lock);
264 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
266 spin_lock_bh(&bp->indirect_lock);
267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
269 spin_unlock_bh(&bp->indirect_lock);
273 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
275 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
279 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
281 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
285 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
288 spin_lock_bh(&bp->indirect_lock);
289 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
292 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
293 REG_WR(bp, BNX2_CTX_CTX_CTRL,
294 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
295 for (i = 0; i < 5; i++) {
297 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304 REG_WR(bp, BNX2_CTX_DATA, val);
306 spin_unlock_bh(&bp->indirect_lock);
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
319 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 val1 = (bp->phy_addr << 21) | (reg << 16) |
326 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327 BNX2_EMAC_MDIO_COMM_START_BUSY;
328 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
330 for (i = 0; i < 50; i++) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
344 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
353 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
357 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
387 for (i = 0; i < 50; i++) {
390 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
397 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
406 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
416 bnx2_disable_int(struct bnx2 *bp)
419 struct bnx2_napi *bnapi;
421 for (i = 0; i < bp->irq_nvecs; i++) {
422 bnapi = &bp->bnx2_napi[i];
423 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
426 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
430 bnx2_enable_int(struct bnx2 *bp)
433 struct bnx2_napi *bnapi;
435 for (i = 0; i < bp->irq_nvecs; i++) {
436 bnapi = &bp->bnx2_napi[i];
438 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445 bnapi->last_status_idx);
447 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
451 bnx2_disable_int_sync(struct bnx2 *bp)
455 atomic_inc(&bp->intr_sem);
456 bnx2_disable_int(bp);
457 for (i = 0; i < bp->irq_nvecs; i++)
458 synchronize_irq(bp->irq_tbl[i].vector);
462 bnx2_napi_disable(struct bnx2 *bp)
466 for (i = 0; i < bp->irq_nvecs; i++)
467 napi_disable(&bp->bnx2_napi[i].napi);
471 bnx2_napi_enable(struct bnx2 *bp)
475 for (i = 0; i < bp->irq_nvecs; i++)
476 napi_enable(&bp->bnx2_napi[i].napi);
480 bnx2_netif_stop(struct bnx2 *bp)
482 bnx2_disable_int_sync(bp);
483 if (netif_running(bp->dev)) {
484 bnx2_napi_disable(bp);
485 netif_tx_disable(bp->dev);
486 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491 bnx2_netif_start(struct bnx2 *bp)
493 if (atomic_dec_and_test(&bp->intr_sem)) {
494 if (netif_running(bp->dev)) {
495 netif_wake_queue(bp->dev);
496 bnx2_napi_enable(bp);
503 bnx2_free_tx_mem(struct bnx2 *bp)
507 for (i = 0; i < bp->num_tx_rings; i++) {
508 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
511 if (txr->tx_desc_ring) {
512 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
514 txr->tx_desc_mapping);
515 txr->tx_desc_ring = NULL;
517 kfree(txr->tx_buf_ring);
518 txr->tx_buf_ring = NULL;
523 bnx2_free_rx_mem(struct bnx2 *bp)
527 for (i = 0; i < bp->num_rx_rings; i++) {
528 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
532 for (j = 0; j < bp->rx_max_ring; j++) {
533 if (rxr->rx_desc_ring[j])
534 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535 rxr->rx_desc_ring[j],
536 rxr->rx_desc_mapping[j]);
537 rxr->rx_desc_ring[j] = NULL;
539 if (rxr->rx_buf_ring)
540 vfree(rxr->rx_buf_ring);
541 rxr->rx_buf_ring = NULL;
543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i],
547 rxr->rx_pg_desc_mapping[i]);
548 rxr->rx_pg_desc_ring[i] = NULL;
551 vfree(rxr->rx_pg_ring);
552 rxr->rx_pg_ring = NULL;
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
561 for (i = 0; i < bp->num_tx_rings; i++) {
562 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
565 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566 if (txr->tx_buf_ring == NULL)
570 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571 &txr->tx_desc_mapping);
572 if (txr->tx_desc_ring == NULL)
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
583 for (i = 0; i < bp->num_rx_rings; i++) {
584 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
589 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590 if (rxr->rx_buf_ring == NULL)
593 memset(rxr->rx_buf_ring, 0,
594 SW_RXBD_RING_SIZE * bp->rx_max_ring);
596 for (j = 0; j < bp->rx_max_ring; j++) {
597 rxr->rx_desc_ring[j] =
598 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599 &rxr->rx_desc_mapping[j]);
600 if (rxr->rx_desc_ring[j] == NULL)
605 if (bp->rx_pg_ring_size) {
606 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
608 if (rxr->rx_pg_ring == NULL)
611 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
615 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616 rxr->rx_pg_desc_ring[j] =
617 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618 &rxr->rx_pg_desc_mapping[j]);
619 if (rxr->rx_pg_desc_ring[j] == NULL)
628 bnx2_free_mem(struct bnx2 *bp)
631 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
633 bnx2_free_tx_mem(bp);
634 bnx2_free_rx_mem(bp);
636 for (i = 0; i < bp->ctx_pages; i++) {
637 if (bp->ctx_blk[i]) {
638 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
640 bp->ctx_blk_mapping[i]);
641 bp->ctx_blk[i] = NULL;
644 if (bnapi->status_blk.msi) {
645 pci_free_consistent(bp->pdev, bp->status_stats_size,
646 bnapi->status_blk.msi,
647 bp->status_blk_mapping);
648 bnapi->status_blk.msi = NULL;
649 bp->stats_blk = NULL;
654 bnx2_alloc_mem(struct bnx2 *bp)
656 int i, status_blk_size, err;
657 struct bnx2_napi *bnapi;
660 /* Combine status and statistics blocks into one allocation. */
661 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662 if (bp->flags & BNX2_FLAG_MSIX_CAP)
663 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664 BNX2_SBLK_MSIX_ALIGN_SIZE);
665 bp->status_stats_size = status_blk_size +
666 sizeof(struct statistics_block);
668 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669 &bp->status_blk_mapping);
670 if (status_blk == NULL)
673 memset(status_blk, 0, bp->status_stats_size);
675 bnapi = &bp->bnx2_napi[0];
676 bnapi->status_blk.msi = status_blk;
677 bnapi->hw_tx_cons_ptr =
678 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679 bnapi->hw_rx_cons_ptr =
680 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683 struct status_block_msix *sblk;
685 bnapi = &bp->bnx2_napi[i];
687 sblk = (void *) (status_blk +
688 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689 bnapi->status_blk.msix = sblk;
690 bnapi->hw_tx_cons_ptr =
691 &sblk->status_tx_quick_consumer_index;
692 bnapi->hw_rx_cons_ptr =
693 &sblk->status_rx_quick_consumer_index;
694 bnapi->int_num = i << 24;
698 bp->stats_blk = status_blk + status_blk_size;
700 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
702 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704 if (bp->ctx_pages == 0)
706 for (i = 0; i < bp->ctx_pages; i++) {
707 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
709 &bp->ctx_blk_mapping[i]);
710 if (bp->ctx_blk[i] == NULL)
715 err = bnx2_alloc_rx_mem(bp);
719 err = bnx2_alloc_tx_mem(bp);
731 bnx2_report_fw_link(struct bnx2 *bp)
733 u32 fw_link_status = 0;
735 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
741 switch (bp->line_speed) {
743 if (bp->duplex == DUPLEX_HALF)
744 fw_link_status = BNX2_LINK_STATUS_10HALF;
746 fw_link_status = BNX2_LINK_STATUS_10FULL;
749 if (bp->duplex == DUPLEX_HALF)
750 fw_link_status = BNX2_LINK_STATUS_100HALF;
752 fw_link_status = BNX2_LINK_STATUS_100FULL;
755 if (bp->duplex == DUPLEX_HALF)
756 fw_link_status = BNX2_LINK_STATUS_1000HALF;
758 fw_link_status = BNX2_LINK_STATUS_1000FULL;
761 if (bp->duplex == DUPLEX_HALF)
762 fw_link_status = BNX2_LINK_STATUS_2500HALF;
764 fw_link_status = BNX2_LINK_STATUS_2500FULL;
768 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
771 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
773 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
776 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
780 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
784 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
786 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
790 bnx2_xceiver_str(struct bnx2 *bp)
792 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798 bnx2_report_link(struct bnx2 *bp)
801 netif_carrier_on(bp->dev);
802 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803 bnx2_xceiver_str(bp));
805 printk("%d Mbps ", bp->line_speed);
807 if (bp->duplex == DUPLEX_FULL)
808 printk("full duplex");
810 printk("half duplex");
813 if (bp->flow_ctrl & FLOW_CTRL_RX) {
814 printk(", receive ");
815 if (bp->flow_ctrl & FLOW_CTRL_TX)
816 printk("& transmit ");
819 printk(", transmit ");
821 printk("flow control ON");
826 netif_carrier_off(bp->dev);
827 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828 bnx2_xceiver_str(bp));
831 bnx2_report_fw_link(bp);
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
837 u32 local_adv, remote_adv;
840 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
843 if (bp->duplex == DUPLEX_FULL) {
844 bp->flow_ctrl = bp->req_flow_ctrl;
849 if (bp->duplex != DUPLEX_FULL) {
853 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859 bp->flow_ctrl |= FLOW_CTRL_TX;
860 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861 bp->flow_ctrl |= FLOW_CTRL_RX;
865 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869 u32 new_local_adv = 0;
870 u32 new_remote_adv = 0;
872 if (local_adv & ADVERTISE_1000XPAUSE)
873 new_local_adv |= ADVERTISE_PAUSE_CAP;
874 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875 new_local_adv |= ADVERTISE_PAUSE_ASYM;
876 if (remote_adv & ADVERTISE_1000XPAUSE)
877 new_remote_adv |= ADVERTISE_PAUSE_CAP;
878 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
881 local_adv = new_local_adv;
882 remote_adv = new_remote_adv;
885 /* See Table 28B-3 of 802.3ab-1999 spec. */
886 if (local_adv & ADVERTISE_PAUSE_CAP) {
887 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888 if (remote_adv & ADVERTISE_PAUSE_CAP) {
889 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
891 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892 bp->flow_ctrl = FLOW_CTRL_RX;
896 if (remote_adv & ADVERTISE_PAUSE_CAP) {
897 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
901 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
905 bp->flow_ctrl = FLOW_CTRL_TX;
911 bnx2_5709s_linkup(struct bnx2 *bp)
917 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
921 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922 bp->line_speed = bp->req_line_speed;
923 bp->duplex = bp->req_duplex;
926 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
928 case MII_BNX2_GP_TOP_AN_SPEED_10:
929 bp->line_speed = SPEED_10;
931 case MII_BNX2_GP_TOP_AN_SPEED_100:
932 bp->line_speed = SPEED_100;
934 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936 bp->line_speed = SPEED_1000;
938 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939 bp->line_speed = SPEED_2500;
942 if (val & MII_BNX2_GP_TOP_AN_FD)
943 bp->duplex = DUPLEX_FULL;
945 bp->duplex = DUPLEX_HALF;
950 bnx2_5708s_linkup(struct bnx2 *bp)
955 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957 case BCM5708S_1000X_STAT1_SPEED_10:
958 bp->line_speed = SPEED_10;
960 case BCM5708S_1000X_STAT1_SPEED_100:
961 bp->line_speed = SPEED_100;
963 case BCM5708S_1000X_STAT1_SPEED_1G:
964 bp->line_speed = SPEED_1000;
966 case BCM5708S_1000X_STAT1_SPEED_2G5:
967 bp->line_speed = SPEED_2500;
970 if (val & BCM5708S_1000X_STAT1_FD)
971 bp->duplex = DUPLEX_FULL;
973 bp->duplex = DUPLEX_HALF;
979 bnx2_5706s_linkup(struct bnx2 *bp)
981 u32 bmcr, local_adv, remote_adv, common;
984 bp->line_speed = SPEED_1000;
986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987 if (bmcr & BMCR_FULLDPLX) {
988 bp->duplex = DUPLEX_FULL;
991 bp->duplex = DUPLEX_HALF;
994 if (!(bmcr & BMCR_ANENABLE)) {
998 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1001 common = local_adv & remote_adv;
1002 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1004 if (common & ADVERTISE_1000XFULL) {
1005 bp->duplex = DUPLEX_FULL;
1008 bp->duplex = DUPLEX_HALF;
1016 bnx2_copper_linkup(struct bnx2 *bp)
1020 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021 if (bmcr & BMCR_ANENABLE) {
1022 u32 local_adv, remote_adv, common;
1024 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1027 common = local_adv & (remote_adv >> 2);
1028 if (common & ADVERTISE_1000FULL) {
1029 bp->line_speed = SPEED_1000;
1030 bp->duplex = DUPLEX_FULL;
1032 else if (common & ADVERTISE_1000HALF) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_HALF;
1037 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040 common = local_adv & remote_adv;
1041 if (common & ADVERTISE_100FULL) {
1042 bp->line_speed = SPEED_100;
1043 bp->duplex = DUPLEX_FULL;
1045 else if (common & ADVERTISE_100HALF) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_HALF;
1049 else if (common & ADVERTISE_10FULL) {
1050 bp->line_speed = SPEED_10;
1051 bp->duplex = DUPLEX_FULL;
1053 else if (common & ADVERTISE_10HALF) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_HALF;
1064 if (bmcr & BMCR_SPEED100) {
1065 bp->line_speed = SPEED_100;
1068 bp->line_speed = SPEED_10;
1070 if (bmcr & BMCR_FULLDPLX) {
1071 bp->duplex = DUPLEX_FULL;
1074 bp->duplex = DUPLEX_HALF;
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1084 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1086 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091 u32 lo_water, hi_water;
1093 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1096 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097 if (lo_water >= bp->rx_ring_size)
1100 hi_water = bp->rx_ring_size / 4;
1102 if (hi_water <= lo_water)
1105 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1110 else if (hi_water == 0)
1112 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1114 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1126 bnx2_init_rx_context(bp, cid);
1131 bnx2_set_mac_link(struct bnx2 *bp)
1135 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137 (bp->duplex == DUPLEX_HALF)) {
1138 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1141 /* Configure the EMAC mode register. */
1142 val = REG_RD(bp, BNX2_EMAC_MODE);
1144 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146 BNX2_EMAC_MODE_25G_MODE);
1149 switch (bp->line_speed) {
1151 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157 val |= BNX2_EMAC_MODE_PORT_MII;
1160 val |= BNX2_EMAC_MODE_25G_MODE;
1163 val |= BNX2_EMAC_MODE_PORT_GMII;
1168 val |= BNX2_EMAC_MODE_PORT_GMII;
1171 /* Set the MAC to operate in the appropriate duplex mode. */
1172 if (bp->duplex == DUPLEX_HALF)
1173 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174 REG_WR(bp, BNX2_EMAC_MODE, val);
1176 /* Enable/disable rx PAUSE. */
1177 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1179 if (bp->flow_ctrl & FLOW_CTRL_RX)
1180 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1183 /* Enable/disable tx PAUSE. */
1184 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1187 if (bp->flow_ctrl & FLOW_CTRL_TX)
1188 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1191 /* Acknowledge the interrupt. */
1192 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195 bnx2_init_all_rx_contexts(bp);
1201 bnx2_enable_bmsr1(struct bnx2 *bp)
1203 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1204 (CHIP_NUM(bp) == CHIP_NUM_5709))
1205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206 MII_BNX2_BLK_ADDR_GP_STATUS);
1210 bnx2_disable_bmsr1(struct bnx2 *bp)
1212 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1213 (CHIP_NUM(bp) == CHIP_NUM_5709))
1214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1219 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1224 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227 if (bp->autoneg & AUTONEG_SPEED)
1228 bp->advertising |= ADVERTISED_2500baseX_Full;
1230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1233 bnx2_read_phy(bp, bp->mii_up1, &up1);
1234 if (!(up1 & BCM5708S_UP1_2G5)) {
1235 up1 |= BCM5708S_UP1_2G5;
1236 bnx2_write_phy(bp, bp->mii_up1, up1);
1240 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1248 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1253 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1259 bnx2_read_phy(bp, bp->mii_up1, &up1);
1260 if (up1 & BCM5708S_UP1_2G5) {
1261 up1 &= ~BCM5708S_UP1_2G5;
1262 bnx2_write_phy(bp, bp->mii_up1, up1);
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1274 bnx2_enable_forced_2g5(struct bnx2 *bp)
1278 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285 MII_BNX2_BLK_ADDR_SERDES_DIG);
1286 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1291 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1295 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1296 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300 if (bp->autoneg & AUTONEG_SPEED) {
1301 bmcr &= ~BMCR_ANENABLE;
1302 if (bp->req_duplex == DUPLEX_FULL)
1303 bmcr |= BMCR_FULLDPLX;
1305 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1309 bnx2_disable_forced_2g5(struct bnx2 *bp)
1313 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320 MII_BNX2_BLK_ADDR_SERDES_DIG);
1321 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1325 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1329 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1330 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334 if (bp->autoneg & AUTONEG_SPEED)
1335 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1340 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1344 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1347 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1353 bnx2_set_link(struct bnx2 *bp)
1358 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1363 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366 link_up = bp->link_up;
1368 bnx2_enable_bmsr1(bp);
1369 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371 bnx2_disable_bmsr1(bp);
1373 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1374 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1378 bnx2_5706s_force_link_dn(bp, 0);
1379 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1381 val = REG_RD(bp, BNX2_EMAC_STATUS);
1383 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1389 bmsr |= BMSR_LSTATUS;
1391 bmsr &= ~BMSR_LSTATUS;
1394 if (bmsr & BMSR_LSTATUS) {
1397 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1398 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399 bnx2_5706s_linkup(bp);
1400 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401 bnx2_5708s_linkup(bp);
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403 bnx2_5709s_linkup(bp);
1406 bnx2_copper_linkup(bp);
1408 bnx2_resolve_flow_ctrl(bp);
1411 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1412 (bp->autoneg & AUTONEG_SPEED))
1413 bnx2_disable_forced_2g5(bp);
1415 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419 bmcr |= BMCR_ANENABLE;
1420 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1422 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1427 if (bp->link_up != link_up) {
1428 bnx2_report_link(bp);
1431 bnx2_set_mac_link(bp);
1437 bnx2_reset_phy(struct bnx2 *bp)
1442 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1444 #define PHY_RESET_MAX_WAIT 100
1445 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1449 if (!(reg & BMCR_RESET)) {
1454 if (i == PHY_RESET_MAX_WAIT) {
1461 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1465 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1468 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1469 adv = ADVERTISE_1000XPAUSE;
1472 adv = ADVERTISE_PAUSE_CAP;
1475 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1476 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477 adv = ADVERTISE_1000XPSE_ASYM;
1480 adv = ADVERTISE_PAUSE_ASYM;
1483 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1484 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1494 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1499 u32 speed_arg = 0, pause_adv;
1501 pause_adv = bnx2_phy_get_pause_adv(bp);
1503 if (bp->autoneg & AUTONEG_SPEED) {
1504 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505 if (bp->advertising & ADVERTISED_10baseT_Half)
1506 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507 if (bp->advertising & ADVERTISED_10baseT_Full)
1508 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509 if (bp->advertising & ADVERTISED_100baseT_Half)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511 if (bp->advertising & ADVERTISED_100baseT_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1518 if (bp->req_line_speed == SPEED_2500)
1519 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520 else if (bp->req_line_speed == SPEED_1000)
1521 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522 else if (bp->req_line_speed == SPEED_100) {
1523 if (bp->req_duplex == DUPLEX_FULL)
1524 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1526 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527 } else if (bp->req_line_speed == SPEED_10) {
1528 if (bp->req_duplex == DUPLEX_FULL)
1529 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1535 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1537 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1538 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1540 if (port == PORT_TP)
1541 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1544 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1546 spin_unlock_bh(&bp->phy_lock);
1547 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1548 spin_lock_bh(&bp->phy_lock);
1554 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560 return (bnx2_setup_remote_phy(bp, port));
1562 if (!(bp->autoneg & AUTONEG_SPEED)) {
1564 int force_link_down = 0;
1566 if (bp->req_line_speed == SPEED_2500) {
1567 if (!bnx2_test_and_enable_2g5(bp))
1568 force_link_down = 1;
1569 } else if (bp->req_line_speed == SPEED_1000) {
1570 if (bnx2_test_and_disable_2g5(bp))
1571 force_link_down = 1;
1573 bnx2_read_phy(bp, bp->mii_adv, &adv);
1574 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577 new_bmcr = bmcr & ~BMCR_ANENABLE;
1578 new_bmcr |= BMCR_SPEED1000;
1580 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581 if (bp->req_line_speed == SPEED_2500)
1582 bnx2_enable_forced_2g5(bp);
1583 else if (bp->req_line_speed == SPEED_1000) {
1584 bnx2_disable_forced_2g5(bp);
1585 new_bmcr &= ~0x2000;
1588 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1589 if (bp->req_line_speed == SPEED_2500)
1590 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1592 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1595 if (bp->req_duplex == DUPLEX_FULL) {
1596 adv |= ADVERTISE_1000XFULL;
1597 new_bmcr |= BMCR_FULLDPLX;
1600 adv |= ADVERTISE_1000XHALF;
1601 new_bmcr &= ~BMCR_FULLDPLX;
1603 if ((new_bmcr != bmcr) || (force_link_down)) {
1604 /* Force a link down visible on the other side */
1606 bnx2_write_phy(bp, bp->mii_adv, adv &
1607 ~(ADVERTISE_1000XFULL |
1608 ADVERTISE_1000XHALF));
1609 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1610 BMCR_ANRESTART | BMCR_ANENABLE);
1613 netif_carrier_off(bp->dev);
1614 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615 bnx2_report_link(bp);
1617 bnx2_write_phy(bp, bp->mii_adv, adv);
1618 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1620 bnx2_resolve_flow_ctrl(bp);
1621 bnx2_set_mac_link(bp);
1626 bnx2_test_and_enable_2g5(bp);
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv |= ADVERTISE_1000XFULL;
1631 new_adv |= bnx2_phy_get_pause_adv(bp);
1633 bnx2_read_phy(bp, bp->mii_adv, &adv);
1634 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1636 bp->serdes_an_pending = 0;
1637 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638 /* Force a link down visible on the other side */
1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1641 spin_unlock_bh(&bp->phy_lock);
1643 spin_lock_bh(&bp->phy_lock);
1646 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1649 /* Speed up link-up time when the link partner
1650 * does not autonegotiate which is very common
1651 * in blade servers. Some blade servers use
1652 * IPMI for kerboard input and it's important
1653 * to minimize link disruptions. Autoneg. involves
1654 * exchanging base pages plus 3 next pages and
1655 * normally completes in about 120 msec.
1657 bp->current_interval = SERDES_AN_TIMEOUT;
1658 bp->serdes_an_pending = 1;
1659 mod_timer(&bp->timer, jiffies + bp->current_interval);
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 #define ETHTOOL_ALL_FIBRE_SPEED \
1669 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1670 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671 (ADVERTISED_1000baseT_Full)
1673 #define ETHTOOL_ALL_COPPER_SPEED \
1674 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1675 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1676 ADVERTISED_1000baseT_Full)
1678 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1681 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1684 bnx2_set_default_remote_link(struct bnx2 *bp)
1688 if (bp->phy_port == PORT_TP)
1689 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1691 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1693 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694 bp->req_line_speed = 0;
1695 bp->autoneg |= AUTONEG_SPEED;
1696 bp->advertising = ADVERTISED_Autoneg;
1697 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698 bp->advertising |= ADVERTISED_10baseT_Half;
1699 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700 bp->advertising |= ADVERTISED_10baseT_Full;
1701 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702 bp->advertising |= ADVERTISED_100baseT_Half;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704 bp->advertising |= ADVERTISED_100baseT_Full;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706 bp->advertising |= ADVERTISED_1000baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708 bp->advertising |= ADVERTISED_2500baseX_Full;
1711 bp->advertising = 0;
1712 bp->req_duplex = DUPLEX_FULL;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714 bp->req_line_speed = SPEED_10;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716 bp->req_duplex = DUPLEX_HALF;
1718 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719 bp->req_line_speed = SPEED_100;
1720 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721 bp->req_duplex = DUPLEX_HALF;
1723 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724 bp->req_line_speed = SPEED_1000;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726 bp->req_line_speed = SPEED_2500;
1731 bnx2_set_default_link(struct bnx2 *bp)
1733 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734 bnx2_set_default_remote_link(bp);
1738 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739 bp->req_line_speed = 0;
1740 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1743 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1745 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1746 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1749 bp->req_line_speed = bp->line_speed = SPEED_1000;
1750 bp->req_duplex = DUPLEX_FULL;
1753 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1757 bnx2_send_heart_beat(struct bnx2 *bp)
1762 spin_lock(&bp->indirect_lock);
1763 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767 spin_unlock(&bp->indirect_lock);
1771 bnx2_remote_phy_event(struct bnx2 *bp)
1774 u8 link_up = bp->link_up;
1777 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1779 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780 bnx2_send_heart_beat(bp);
1782 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1784 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1790 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791 bp->duplex = DUPLEX_FULL;
1793 case BNX2_LINK_STATUS_10HALF:
1794 bp->duplex = DUPLEX_HALF;
1795 case BNX2_LINK_STATUS_10FULL:
1796 bp->line_speed = SPEED_10;
1798 case BNX2_LINK_STATUS_100HALF:
1799 bp->duplex = DUPLEX_HALF;
1800 case BNX2_LINK_STATUS_100BASE_T4:
1801 case BNX2_LINK_STATUS_100FULL:
1802 bp->line_speed = SPEED_100;
1804 case BNX2_LINK_STATUS_1000HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_1000FULL:
1807 bp->line_speed = SPEED_1000;
1809 case BNX2_LINK_STATUS_2500HALF:
1810 bp->duplex = DUPLEX_HALF;
1811 case BNX2_LINK_STATUS_2500FULL:
1812 bp->line_speed = SPEED_2500;
1820 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->flow_ctrl = bp->req_flow_ctrl;
1825 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826 bp->flow_ctrl |= FLOW_CTRL_TX;
1827 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828 bp->flow_ctrl |= FLOW_CTRL_RX;
1831 old_port = bp->phy_port;
1832 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833 bp->phy_port = PORT_FIBRE;
1835 bp->phy_port = PORT_TP;
1837 if (old_port != bp->phy_port)
1838 bnx2_set_default_link(bp);
1841 if (bp->link_up != link_up)
1842 bnx2_report_link(bp);
1844 bnx2_set_mac_link(bp);
1848 bnx2_set_remote_link(struct bnx2 *bp)
1852 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1854 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855 bnx2_remote_phy_event(bp);
1857 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1859 bnx2_send_heart_beat(bp);
1866 bnx2_setup_copper_phy(struct bnx2 *bp)
1871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1873 if (bp->autoneg & AUTONEG_SPEED) {
1874 u32 adv_reg, adv1000_reg;
1875 u32 new_adv_reg = 0;
1876 u32 new_adv1000_reg = 0;
1878 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1879 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880 ADVERTISE_PAUSE_ASYM);
1882 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883 adv1000_reg &= PHY_ALL_1000_SPEED;
1885 if (bp->advertising & ADVERTISED_10baseT_Half)
1886 new_adv_reg |= ADVERTISE_10HALF;
1887 if (bp->advertising & ADVERTISED_10baseT_Full)
1888 new_adv_reg |= ADVERTISE_10FULL;
1889 if (bp->advertising & ADVERTISED_100baseT_Half)
1890 new_adv_reg |= ADVERTISE_100HALF;
1891 if (bp->advertising & ADVERTISED_100baseT_Full)
1892 new_adv_reg |= ADVERTISE_100FULL;
1893 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894 new_adv1000_reg |= ADVERTISE_1000FULL;
1896 new_adv_reg |= ADVERTISE_CSMA;
1898 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1900 if ((adv1000_reg != new_adv1000_reg) ||
1901 (adv_reg != new_adv_reg) ||
1902 ((bmcr & BMCR_ANENABLE) == 0)) {
1904 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1905 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1906 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1909 else if (bp->link_up) {
1910 /* Flow ctrl may have changed from auto to forced */
1911 /* or vice-versa. */
1913 bnx2_resolve_flow_ctrl(bp);
1914 bnx2_set_mac_link(bp);
1920 if (bp->req_line_speed == SPEED_100) {
1921 new_bmcr |= BMCR_SPEED100;
1923 if (bp->req_duplex == DUPLEX_FULL) {
1924 new_bmcr |= BMCR_FULLDPLX;
1926 if (new_bmcr != bmcr) {
1929 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1932 if (bmsr & BMSR_LSTATUS) {
1933 /* Force link down */
1934 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1935 spin_unlock_bh(&bp->phy_lock);
1937 spin_lock_bh(&bp->phy_lock);
1939 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1945 /* Normally, the new speed is setup after the link has
1946 * gone down and up again. In some cases, link will not go
1947 * down so we need to set up the new speed here.
1949 if (bmsr & BMSR_LSTATUS) {
1950 bp->line_speed = bp->req_line_speed;
1951 bp->duplex = bp->req_duplex;
1952 bnx2_resolve_flow_ctrl(bp);
1953 bnx2_set_mac_link(bp);
1956 bnx2_resolve_flow_ctrl(bp);
1957 bnx2_set_mac_link(bp);
1963 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1965 if (bp->loopback == MAC_LOOPBACK)
1968 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1969 return (bnx2_setup_serdes_phy(bp, port));
1972 return (bnx2_setup_copper_phy(bp));
1977 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1981 bp->mii_bmcr = MII_BMCR + 0x10;
1982 bp->mii_bmsr = MII_BMSR + 0x10;
1983 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984 bp->mii_adv = MII_ADVERTISE + 0x10;
1985 bp->mii_lpa = MII_LPA + 0x10;
1986 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1988 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1995 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1997 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2004 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2005 val |= BCM5708S_UP1_2G5;
2007 val &= ~BCM5708S_UP1_2G5;
2008 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2015 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2017 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2027 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2034 bp->mii_up1 = BCM5708S_UP1;
2036 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2040 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2044 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2048 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2049 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050 val |= BCM5708S_UP1_2G5;
2051 bnx2_write_phy(bp, BCM5708S_UP1, val);
2054 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2055 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2057 /* increase tx signal amplitude */
2058 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059 BCM5708S_BLK_ADDR_TX_MISC);
2060 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2066 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2067 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2072 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2073 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075 BCM5708S_BLK_ADDR_TX_MISC);
2076 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078 BCM5708S_BLK_ADDR_DIG);
2085 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2090 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2092 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2095 if (bp->dev->mtu > 1500) {
2098 /* Set extended packet length bit */
2099 bnx2_write_phy(bp, 0x18, 0x7);
2100 bnx2_read_phy(bp, 0x18, &val);
2101 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2103 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104 bnx2_read_phy(bp, 0x1c, &val);
2105 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2110 bnx2_write_phy(bp, 0x18, 0x7);
2111 bnx2_read_phy(bp, 0x18, &val);
2112 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2114 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115 bnx2_read_phy(bp, 0x1c, &val);
2116 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2123 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2130 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2131 bnx2_write_phy(bp, 0x18, 0x0c00);
2132 bnx2_write_phy(bp, 0x17, 0x000a);
2133 bnx2_write_phy(bp, 0x15, 0x310b);
2134 bnx2_write_phy(bp, 0x17, 0x201f);
2135 bnx2_write_phy(bp, 0x15, 0x9506);
2136 bnx2_write_phy(bp, 0x17, 0x401f);
2137 bnx2_write_phy(bp, 0x15, 0x14e2);
2138 bnx2_write_phy(bp, 0x18, 0x0400);
2141 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2142 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143 MII_BNX2_DSP_EXPAND_REG | 0x8);
2144 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2146 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2149 if (bp->dev->mtu > 1500) {
2150 /* Set extended packet length bit */
2151 bnx2_write_phy(bp, 0x18, 0x7);
2152 bnx2_read_phy(bp, 0x18, &val);
2153 bnx2_write_phy(bp, 0x18, val | 0x4000);
2155 bnx2_read_phy(bp, 0x10, &val);
2156 bnx2_write_phy(bp, 0x10, val | 0x1);
2159 bnx2_write_phy(bp, 0x18, 0x7);
2160 bnx2_read_phy(bp, 0x18, &val);
2161 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2163 bnx2_read_phy(bp, 0x10, &val);
2164 bnx2_write_phy(bp, 0x10, val & ~0x1);
2167 /* ethernet@wirespeed */
2168 bnx2_write_phy(bp, 0x18, 0x7007);
2169 bnx2_read_phy(bp, 0x18, &val);
2170 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2176 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2181 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2184 bp->mii_bmcr = MII_BMCR;
2185 bp->mii_bmsr = MII_BMSR;
2186 bp->mii_bmsr1 = MII_BMSR;
2187 bp->mii_adv = MII_ADVERTISE;
2188 bp->mii_lpa = MII_LPA;
2190 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2192 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2195 bnx2_read_phy(bp, MII_PHYSID1, &val);
2196 bp->phy_id = val << 16;
2197 bnx2_read_phy(bp, MII_PHYSID2, &val);
2198 bp->phy_id |= val & 0xffff;
2200 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2201 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2202 rc = bnx2_init_5706s_phy(bp, reset_phy);
2203 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2204 rc = bnx2_init_5708s_phy(bp, reset_phy);
2205 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2206 rc = bnx2_init_5709s_phy(bp, reset_phy);
2209 rc = bnx2_init_copper_phy(bp, reset_phy);
2214 rc = bnx2_setup_phy(bp, bp->phy_port);
2220 bnx2_set_mac_loopback(struct bnx2 *bp)
2224 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2232 static int bnx2_test_link(struct bnx2 *);
2235 bnx2_set_phy_loopback(struct bnx2 *bp)
2240 spin_lock_bh(&bp->phy_lock);
2241 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2243 spin_unlock_bh(&bp->phy_lock);
2247 for (i = 0; i < 10; i++) {
2248 if (bnx2_test_link(bp) == 0)
2253 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2256 BNX2_EMAC_MODE_25G_MODE);
2258 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2265 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2271 msg_data |= bp->fw_wr_seq;
2273 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2278 /* wait for an acknowledgement. */
2279 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2282 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2284 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2287 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2290 /* If we timed out, inform the firmware that this is the case. */
2291 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2293 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2296 msg_data &= ~BNX2_DRV_MSG_CODE;
2297 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2299 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2304 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2311 bnx2_init_5709_context(struct bnx2 *bp)
2316 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2317 val |= (BCM_PAGE_BITS - 8) << 16;
2318 REG_WR(bp, BNX2_CTX_COMMAND, val);
2319 for (i = 0; i < 10; i++) {
2320 val = REG_RD(bp, BNX2_CTX_COMMAND);
2321 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2325 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2328 for (i = 0; i < bp->ctx_pages; i++) {
2332 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2336 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2337 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2338 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2339 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2340 (u64) bp->ctx_blk_mapping[i] >> 32);
2341 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2342 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2343 for (j = 0; j < 10; j++) {
2345 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2346 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2350 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2359 bnx2_init_context(struct bnx2 *bp)
2365 u32 vcid_addr, pcid_addr, offset;
2370 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2373 vcid_addr = GET_PCID_ADDR(vcid);
2375 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2380 pcid_addr = GET_PCID_ADDR(new_vcid);
2383 vcid_addr = GET_CID_ADDR(vcid);
2384 pcid_addr = vcid_addr;
2387 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2388 vcid_addr += (i << PHY_CTX_SHIFT);
2389 pcid_addr += (i << PHY_CTX_SHIFT);
2391 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2392 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2394 /* Zero out the context. */
2395 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2396 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2402 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2408 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2409 if (good_mbuf == NULL) {
2410 printk(KERN_ERR PFX "Failed to allocate memory in "
2411 "bnx2_alloc_bad_rbuf\n");
2415 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2416 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2420 /* Allocate a bunch of mbufs and save the good ones in an array. */
2421 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2422 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2423 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2424 BNX2_RBUF_COMMAND_ALLOC_REQ);
2426 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2428 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2430 /* The addresses with Bit 9 set are bad memory blocks. */
2431 if (!(val & (1 << 9))) {
2432 good_mbuf[good_mbuf_cnt] = (u16) val;
2436 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439 /* Free the good ones back to the mbuf pool thus discarding
2440 * all the bad ones. */
2441 while (good_mbuf_cnt) {
2444 val = good_mbuf[good_mbuf_cnt];
2445 val = (val << 9) | val | 1;
2447 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2454 bnx2_set_mac_addr(struct bnx2 *bp)
2457 u8 *mac_addr = bp->dev->dev_addr;
2459 val = (mac_addr[0] << 8) | mac_addr[1];
2461 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2463 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2464 (mac_addr[4] << 8) | mac_addr[5];
2466 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2470 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2473 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2474 struct rx_bd *rxbd =
2475 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2476 struct page *page = alloc_page(GFP_ATOMIC);
2480 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2481 PCI_DMA_FROMDEVICE);
2483 pci_unmap_addr_set(rx_pg, mapping, mapping);
2484 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2485 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2490 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2492 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2493 struct page *page = rx_pg->page;
2498 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2499 PCI_DMA_FROMDEVICE);
2506 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2508 struct sk_buff *skb;
2509 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2511 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2512 unsigned long align;
2514 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2519 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2520 skb_reserve(skb, BNX2_RX_ALIGN - align);
2522 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2523 PCI_DMA_FROMDEVICE);
2526 pci_unmap_addr_set(rx_buf, mapping, mapping);
2528 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2529 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2531 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2537 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2539 struct status_block *sblk = bnapi->status_blk.msi;
2540 u32 new_link_state, old_link_state;
2543 new_link_state = sblk->status_attn_bits & event;
2544 old_link_state = sblk->status_attn_bits_ack & event;
2545 if (new_link_state != old_link_state) {
2547 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2549 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2557 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2559 spin_lock(&bp->phy_lock);
2561 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2563 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2564 bnx2_set_remote_link(bp);
2566 spin_unlock(&bp->phy_lock);
2571 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2575 /* Tell compiler that status block fields can change. */
2577 cons = *bnapi->hw_tx_cons_ptr;
2578 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2584 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2586 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2587 u16 hw_cons, sw_cons, sw_ring_cons;
2590 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2591 sw_cons = txr->tx_cons;
2593 while (sw_cons != hw_cons) {
2594 struct sw_bd *tx_buf;
2595 struct sk_buff *skb;
2598 sw_ring_cons = TX_RING_IDX(sw_cons);
2600 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2603 /* partial BD completions possible with TSO packets */
2604 if (skb_is_gso(skb)) {
2605 u16 last_idx, last_ring_idx;
2607 last_idx = sw_cons +
2608 skb_shinfo(skb)->nr_frags + 1;
2609 last_ring_idx = sw_ring_cons +
2610 skb_shinfo(skb)->nr_frags + 1;
2611 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2614 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2619 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2620 skb_headlen(skb), PCI_DMA_TODEVICE);
2623 last = skb_shinfo(skb)->nr_frags;
2625 for (i = 0; i < last; i++) {
2626 sw_cons = NEXT_TX_BD(sw_cons);
2628 pci_unmap_page(bp->pdev,
2630 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2632 skb_shinfo(skb)->frags[i].size,
2636 sw_cons = NEXT_TX_BD(sw_cons);
2640 if (tx_pkt == budget)
2643 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2646 txr->hw_tx_cons = hw_cons;
2647 txr->tx_cons = sw_cons;
2648 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2649 * before checking for netif_queue_stopped(). Without the
2650 * memory barrier, there is a small possibility that bnx2_start_xmit()
2651 * will miss it and cause the queue to be stopped forever.
2655 if (unlikely(netif_queue_stopped(bp->dev)) &&
2656 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2657 netif_tx_lock(bp->dev);
2658 if ((netif_queue_stopped(bp->dev)) &&
2659 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2660 netif_wake_queue(bp->dev);
2661 netif_tx_unlock(bp->dev);
2667 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2668 struct sk_buff *skb, int count)
2670 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2671 struct rx_bd *cons_bd, *prod_bd;
2674 u16 hw_prod = rxr->rx_pg_prod, prod;
2675 u16 cons = rxr->rx_pg_cons;
2677 for (i = 0; i < count; i++) {
2678 prod = RX_PG_RING_IDX(hw_prod);
2680 prod_rx_pg = &rxr->rx_pg_ring[prod];
2681 cons_rx_pg = &rxr->rx_pg_ring[cons];
2682 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2683 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2685 if (i == 0 && skb) {
2687 struct skb_shared_info *shinfo;
2689 shinfo = skb_shinfo(skb);
2691 page = shinfo->frags[shinfo->nr_frags].page;
2692 shinfo->frags[shinfo->nr_frags].page = NULL;
2693 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2694 PCI_DMA_FROMDEVICE);
2695 cons_rx_pg->page = page;
2696 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2700 prod_rx_pg->page = cons_rx_pg->page;
2701 cons_rx_pg->page = NULL;
2702 pci_unmap_addr_set(prod_rx_pg, mapping,
2703 pci_unmap_addr(cons_rx_pg, mapping));
2705 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2706 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2709 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2710 hw_prod = NEXT_RX_BD(hw_prod);
2712 rxr->rx_pg_prod = hw_prod;
2713 rxr->rx_pg_cons = cons;
2717 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2718 struct sk_buff *skb, u16 cons, u16 prod)
2720 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2721 struct rx_bd *cons_bd, *prod_bd;
2723 cons_rx_buf = &rxr->rx_buf_ring[cons];
2724 prod_rx_buf = &rxr->rx_buf_ring[prod];
2726 pci_dma_sync_single_for_device(bp->pdev,
2727 pci_unmap_addr(cons_rx_buf, mapping),
2728 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2730 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2732 prod_rx_buf->skb = skb;
2737 pci_unmap_addr_set(prod_rx_buf, mapping,
2738 pci_unmap_addr(cons_rx_buf, mapping));
2740 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2741 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2742 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2743 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2747 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2748 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2752 u16 prod = ring_idx & 0xffff;
2754 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2755 if (unlikely(err)) {
2756 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2758 unsigned int raw_len = len + 4;
2759 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2761 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2766 skb_reserve(skb, BNX2_RX_OFFSET);
2767 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2768 PCI_DMA_FROMDEVICE);
2774 unsigned int i, frag_len, frag_size, pages;
2775 struct sw_pg *rx_pg;
2776 u16 pg_cons = rxr->rx_pg_cons;
2777 u16 pg_prod = rxr->rx_pg_prod;
2779 frag_size = len + 4 - hdr_len;
2780 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2781 skb_put(skb, hdr_len);
2783 for (i = 0; i < pages; i++) {
2784 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2785 if (unlikely(frag_len <= 4)) {
2786 unsigned int tail = 4 - frag_len;
2788 rxr->rx_pg_cons = pg_cons;
2789 rxr->rx_pg_prod = pg_prod;
2790 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2797 &skb_shinfo(skb)->frags[i - 1];
2799 skb->data_len -= tail;
2800 skb->truesize -= tail;
2804 rx_pg = &rxr->rx_pg_ring[pg_cons];
2806 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2807 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2812 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2815 err = bnx2_alloc_rx_page(bp, rxr,
2816 RX_PG_RING_IDX(pg_prod));
2817 if (unlikely(err)) {
2818 rxr->rx_pg_cons = pg_cons;
2819 rxr->rx_pg_prod = pg_prod;
2820 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2825 frag_size -= frag_len;
2826 skb->data_len += frag_len;
2827 skb->truesize += frag_len;
2828 skb->len += frag_len;
2830 pg_prod = NEXT_RX_BD(pg_prod);
2831 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2833 rxr->rx_pg_prod = pg_prod;
2834 rxr->rx_pg_cons = pg_cons;
2840 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2844 /* Tell compiler that status block fields can change. */
2846 cons = *bnapi->hw_rx_cons_ptr;
2847 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2853 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2855 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2856 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2857 struct l2_fhdr *rx_hdr;
2858 int rx_pkt = 0, pg_ring_used = 0;
2860 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2861 sw_cons = rxr->rx_cons;
2862 sw_prod = rxr->rx_prod;
2864 /* Memory barrier necessary as speculative reads of the rx
2865 * buffer can be ahead of the index in the status block
2868 while (sw_cons != hw_cons) {
2869 unsigned int len, hdr_len;
2871 struct sw_bd *rx_buf;
2872 struct sk_buff *skb;
2873 dma_addr_t dma_addr;
2875 sw_ring_cons = RX_RING_IDX(sw_cons);
2876 sw_ring_prod = RX_RING_IDX(sw_prod);
2878 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2883 dma_addr = pci_unmap_addr(rx_buf, mapping);
2885 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2886 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2887 PCI_DMA_FROMDEVICE);
2889 rx_hdr = (struct l2_fhdr *) skb->data;
2890 len = rx_hdr->l2_fhdr_pkt_len;
2892 if ((status = rx_hdr->l2_fhdr_status) &
2893 (L2_FHDR_ERRORS_BAD_CRC |
2894 L2_FHDR_ERRORS_PHY_DECODE |
2895 L2_FHDR_ERRORS_ALIGNMENT |
2896 L2_FHDR_ERRORS_TOO_SHORT |
2897 L2_FHDR_ERRORS_GIANT_FRAME)) {
2899 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2904 if (status & L2_FHDR_STATUS_SPLIT) {
2905 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2907 } else if (len > bp->rx_jumbo_thresh) {
2908 hdr_len = bp->rx_jumbo_thresh;
2914 if (len <= bp->rx_copy_thresh) {
2915 struct sk_buff *new_skb;
2917 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2918 if (new_skb == NULL) {
2919 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2925 skb_copy_from_linear_data_offset(skb,
2927 new_skb->data, len + 2);
2928 skb_reserve(new_skb, 2);
2929 skb_put(new_skb, len);
2931 bnx2_reuse_rx_skb(bp, rxr, skb,
2932 sw_ring_cons, sw_ring_prod);
2935 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2936 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2939 skb->protocol = eth_type_trans(skb, bp->dev);
2941 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2942 (ntohs(skb->protocol) != 0x8100)) {
2949 skb->ip_summed = CHECKSUM_NONE;
2951 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2952 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2954 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2955 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2956 skb->ip_summed = CHECKSUM_UNNECESSARY;
2960 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2961 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2962 rx_hdr->l2_fhdr_vlan_tag);
2966 netif_receive_skb(skb);
2968 bp->dev->last_rx = jiffies;
2972 sw_cons = NEXT_RX_BD(sw_cons);
2973 sw_prod = NEXT_RX_BD(sw_prod);
2975 if ((rx_pkt == budget))
2978 /* Refresh hw_cons to see if there is new work */
2979 if (sw_cons == hw_cons) {
2980 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2984 rxr->rx_cons = sw_cons;
2985 rxr->rx_prod = sw_prod;
2988 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2990 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2992 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3000 /* MSI ISR - The only difference between this and the INTx ISR
3001 * is that the MSI interrupt is always serviced.
3004 bnx2_msi(int irq, void *dev_instance)
3006 struct bnx2_napi *bnapi = dev_instance;
3007 struct bnx2 *bp = bnapi->bp;
3008 struct net_device *dev = bp->dev;
3010 prefetch(bnapi->status_blk.msi);
3011 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3012 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3013 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3015 /* Return here if interrupt is disabled. */
3016 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3019 netif_rx_schedule(dev, &bnapi->napi);
3025 bnx2_msi_1shot(int irq, void *dev_instance)
3027 struct bnx2_napi *bnapi = dev_instance;
3028 struct bnx2 *bp = bnapi->bp;
3029 struct net_device *dev = bp->dev;
3031 prefetch(bnapi->status_blk.msi);
3033 /* Return here if interrupt is disabled. */
3034 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3037 netif_rx_schedule(dev, &bnapi->napi);
3043 bnx2_interrupt(int irq, void *dev_instance)
3045 struct bnx2_napi *bnapi = dev_instance;
3046 struct bnx2 *bp = bnapi->bp;
3047 struct net_device *dev = bp->dev;
3048 struct status_block *sblk = bnapi->status_blk.msi;
3050 /* When using INTx, it is possible for the interrupt to arrive
3051 * at the CPU before the status block posted prior to the
3052 * interrupt. Reading a register will flush the status block.
3053 * When using MSI, the MSI message will always complete after
3054 * the status block write.
3056 if ((sblk->status_idx == bnapi->last_status_idx) &&
3057 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3058 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3061 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3062 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3063 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3065 /* Read back to deassert IRQ immediately to avoid too many
3066 * spurious interrupts.
3068 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3070 /* Return here if interrupt is shared and is disabled. */
3071 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3074 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3075 bnapi->last_status_idx = sblk->status_idx;
3076 __netif_rx_schedule(dev, &bnapi->napi);
3083 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3085 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3086 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3088 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3089 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3094 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3095 STATUS_ATTN_BITS_TIMER_ABORT)
3098 bnx2_has_work(struct bnx2_napi *bnapi)
3100 struct status_block *sblk = bnapi->status_blk.msi;
3102 if (bnx2_has_fast_work(bnapi))
3105 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3106 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3112 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3114 struct status_block *sblk = bnapi->status_blk.msi;
3115 u32 status_attn_bits = sblk->status_attn_bits;
3116 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3118 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3119 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3121 bnx2_phy_int(bp, bnapi);
3123 /* This is needed to take care of transient status
3124 * during link changes.
3126 REG_WR(bp, BNX2_HC_COMMAND,
3127 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3128 REG_RD(bp, BNX2_HC_COMMAND);
3132 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3133 int work_done, int budget)
3135 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3136 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3138 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3139 bnx2_tx_int(bp, bnapi, 0);
3141 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3142 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3147 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3149 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3150 struct bnx2 *bp = bnapi->bp;
3152 struct status_block_msix *sblk = bnapi->status_blk.msix;
3155 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3156 if (unlikely(work_done >= budget))
3159 bnapi->last_status_idx = sblk->status_idx;
3160 /* status idx must be read before checking for more work. */
3162 if (likely(!bnx2_has_fast_work(bnapi))) {
3164 netif_rx_complete(bp->dev, napi);
3165 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3166 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3167 bnapi->last_status_idx);
3174 static int bnx2_poll(struct napi_struct *napi, int budget)
3176 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3177 struct bnx2 *bp = bnapi->bp;
3179 struct status_block *sblk = bnapi->status_blk.msi;
3182 bnx2_poll_link(bp, bnapi);
3184 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3186 if (unlikely(work_done >= budget))
3189 /* bnapi->last_status_idx is used below to tell the hw how
3190 * much work has been processed, so we must read it before
3191 * checking for more work.
3193 bnapi->last_status_idx = sblk->status_idx;
3195 if (likely(!bnx2_has_work(bnapi))) {
3196 netif_rx_complete(bp->dev, napi);
3197 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3198 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3199 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3200 bnapi->last_status_idx);
3203 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3204 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3205 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3206 bnapi->last_status_idx);
3208 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3209 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3210 bnapi->last_status_idx);
3218 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3219 * from set_multicast.
3222 bnx2_set_rx_mode(struct net_device *dev)
3224 struct bnx2 *bp = netdev_priv(dev);
3225 u32 rx_mode, sort_mode;
3228 spin_lock_bh(&bp->phy_lock);
3230 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3231 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3232 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3234 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3235 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3237 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3238 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3240 if (dev->flags & IFF_PROMISC) {
3241 /* Promiscuous mode. */
3242 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3243 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3244 BNX2_RPM_SORT_USER0_PROM_VLAN;
3246 else if (dev->flags & IFF_ALLMULTI) {
3247 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3248 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3251 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3254 /* Accept one or more multicast(s). */
3255 struct dev_mc_list *mclist;
3256 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3261 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3263 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3264 i++, mclist = mclist->next) {
3266 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3268 regidx = (bit & 0xe0) >> 5;
3270 mc_filter[regidx] |= (1 << bit);
3273 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3274 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3278 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3281 if (rx_mode != bp->rx_mode) {
3282 bp->rx_mode = rx_mode;
3283 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3286 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3287 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3288 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3290 spin_unlock_bh(&bp->phy_lock);
3294 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3300 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3301 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3302 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3303 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3304 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3307 for (i = 0; i < rv2p_code_len; i += 8) {
3308 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3310 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3313 if (rv2p_proc == RV2P_PROC1) {
3314 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3315 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3318 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3319 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3323 /* Reset the processor, un-stall is done later. */
3324 if (rv2p_proc == RV2P_PROC1) {
3325 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3328 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3333 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3340 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3341 val |= cpu_reg->mode_value_halt;
3342 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3343 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3345 /* Load the Text area. */
3346 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3350 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3355 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3356 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3360 /* Load the Data area. */
3361 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3365 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3366 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3370 /* Load the SBSS area. */
3371 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3375 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3376 bnx2_reg_wr_ind(bp, offset, 0);
3380 /* Load the BSS area. */
3381 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3385 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3386 bnx2_reg_wr_ind(bp, offset, 0);
3390 /* Load the Read-Only area. */
3391 offset = cpu_reg->spad_base +
3392 (fw->rodata_addr - cpu_reg->mips_view_base);
3396 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3397 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3401 /* Clear the pre-fetch instruction. */
3402 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3403 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3405 /* Start the CPU. */
3406 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3407 val &= ~cpu_reg->mode_value_halt;
3408 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3409 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3415 bnx2_init_cpus(struct bnx2 *bp)
3421 /* Initialize the RV2P processor. */
3422 text = vmalloc(FW_BUF_SIZE);
3425 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3426 rv2p = bnx2_xi_rv2p_proc1;
3427 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3429 rv2p = bnx2_rv2p_proc1;
3430 rv2p_len = sizeof(bnx2_rv2p_proc1);
3432 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3436 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3438 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3439 rv2p = bnx2_xi_rv2p_proc2;
3440 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3442 rv2p = bnx2_rv2p_proc2;
3443 rv2p_len = sizeof(bnx2_rv2p_proc2);
3445 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3449 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3451 /* Initialize the RX Processor. */
3452 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3453 fw = &bnx2_rxp_fw_09;
3455 fw = &bnx2_rxp_fw_06;
3458 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3462 /* Initialize the TX Processor. */
3463 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3464 fw = &bnx2_txp_fw_09;
3466 fw = &bnx2_txp_fw_06;
3469 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3473 /* Initialize the TX Patch-up Processor. */
3474 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3475 fw = &bnx2_tpat_fw_09;
3477 fw = &bnx2_tpat_fw_06;
3480 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3484 /* Initialize the Completion Processor. */
3485 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3486 fw = &bnx2_com_fw_09;
3488 fw = &bnx2_com_fw_06;
3491 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3495 /* Initialize the Command Processor. */
3496 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3497 fw = &bnx2_cp_fw_09;
3499 fw = &bnx2_cp_fw_06;
3502 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3510 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3514 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3520 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3521 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3522 PCI_PM_CTRL_PME_STATUS);
3524 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3525 /* delay required during transition out of D3hot */
3528 val = REG_RD(bp, BNX2_EMAC_MODE);
3529 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3530 val &= ~BNX2_EMAC_MODE_MPKT;
3531 REG_WR(bp, BNX2_EMAC_MODE, val);
3533 val = REG_RD(bp, BNX2_RPM_CONFIG);
3534 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3535 REG_WR(bp, BNX2_RPM_CONFIG, val);
3546 autoneg = bp->autoneg;
3547 advertising = bp->advertising;
3549 if (bp->phy_port == PORT_TP) {
3550 bp->autoneg = AUTONEG_SPEED;
3551 bp->advertising = ADVERTISED_10baseT_Half |
3552 ADVERTISED_10baseT_Full |
3553 ADVERTISED_100baseT_Half |
3554 ADVERTISED_100baseT_Full |
3558 spin_lock_bh(&bp->phy_lock);
3559 bnx2_setup_phy(bp, bp->phy_port);
3560 spin_unlock_bh(&bp->phy_lock);
3562 bp->autoneg = autoneg;
3563 bp->advertising = advertising;
3565 bnx2_set_mac_addr(bp);
3567 val = REG_RD(bp, BNX2_EMAC_MODE);
3569 /* Enable port mode. */
3570 val &= ~BNX2_EMAC_MODE_PORT;
3571 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3572 BNX2_EMAC_MODE_ACPI_RCVD |
3573 BNX2_EMAC_MODE_MPKT;
3574 if (bp->phy_port == PORT_TP)
3575 val |= BNX2_EMAC_MODE_PORT_MII;
3577 val |= BNX2_EMAC_MODE_PORT_GMII;
3578 if (bp->line_speed == SPEED_2500)
3579 val |= BNX2_EMAC_MODE_25G_MODE;
3582 REG_WR(bp, BNX2_EMAC_MODE, val);
3584 /* receive all multicast */
3585 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3586 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3589 REG_WR(bp, BNX2_EMAC_RX_MODE,
3590 BNX2_EMAC_RX_MODE_SORT_MODE);
3592 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3593 BNX2_RPM_SORT_USER0_MC_EN;
3594 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3595 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3596 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3597 BNX2_RPM_SORT_USER0_ENA);
3599 /* Need to enable EMAC and RPM for WOL. */
3600 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3601 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3602 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3603 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3605 val = REG_RD(bp, BNX2_RPM_CONFIG);
3606 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3607 REG_WR(bp, BNX2_RPM_CONFIG, val);
3609 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3612 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3615 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3616 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3619 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3620 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3621 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3630 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3632 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3635 /* No more memory access after this point until
3636 * device is brought back to D0.
3648 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3653 /* Request access to the flash interface. */
3654 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3655 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3656 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3657 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3663 if (j >= NVRAM_TIMEOUT_COUNT)
3670 bnx2_release_nvram_lock(struct bnx2 *bp)
3675 /* Relinquish nvram interface. */
3676 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3678 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3679 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3680 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3686 if (j >= NVRAM_TIMEOUT_COUNT)
3694 bnx2_enable_nvram_write(struct bnx2 *bp)
3698 val = REG_RD(bp, BNX2_MISC_CFG);
3699 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3701 if (bp->flash_info->flags & BNX2_NV_WREN) {
3704 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3705 REG_WR(bp, BNX2_NVM_COMMAND,
3706 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3708 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3711 val = REG_RD(bp, BNX2_NVM_COMMAND);
3712 if (val & BNX2_NVM_COMMAND_DONE)
3716 if (j >= NVRAM_TIMEOUT_COUNT)
3723 bnx2_disable_nvram_write(struct bnx2 *bp)
3727 val = REG_RD(bp, BNX2_MISC_CFG);
3728 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3733 bnx2_enable_nvram_access(struct bnx2 *bp)
3737 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3738 /* Enable both bits, even on read. */
3739 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3740 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3744 bnx2_disable_nvram_access(struct bnx2 *bp)
3748 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3749 /* Disable both bits, even after read. */
3750 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3751 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3752 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3756 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3761 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3762 /* Buffered flash, no erase needed */
3765 /* Build an erase command */
3766 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3767 BNX2_NVM_COMMAND_DOIT;
3769 /* Need to clear DONE bit separately. */
3770 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3772 /* Address of the NVRAM to read from. */
3773 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3775 /* Issue an erase command. */
3776 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3778 /* Wait for completion. */
3779 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3784 val = REG_RD(bp, BNX2_NVM_COMMAND);
3785 if (val & BNX2_NVM_COMMAND_DONE)
3789 if (j >= NVRAM_TIMEOUT_COUNT)
3796 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3801 /* Build the command word. */
3802 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3804 /* Calculate an offset of a buffered flash, not needed for 5709. */
3805 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3806 offset = ((offset / bp->flash_info->page_size) <<
3807 bp->flash_info->page_bits) +
3808 (offset % bp->flash_info->page_size);
3811 /* Need to clear DONE bit separately. */
3812 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3814 /* Address of the NVRAM to read from. */
3815 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3817 /* Issue a read command. */
3818 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3820 /* Wait for completion. */
3821 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3826 val = REG_RD(bp, BNX2_NVM_COMMAND);
3827 if (val & BNX2_NVM_COMMAND_DONE) {
3828 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3829 memcpy(ret_val, &v, 4);
3833 if (j >= NVRAM_TIMEOUT_COUNT)
3841 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3847 /* Build the command word. */
3848 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3850 /* Calculate an offset of a buffered flash, not needed for 5709. */
3851 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3852 offset = ((offset / bp->flash_info->page_size) <<
3853 bp->flash_info->page_bits) +
3854 (offset % bp->flash_info->page_size);
3857 /* Need to clear DONE bit separately. */
3858 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3860 memcpy(&val32, val, 4);
3862 /* Write the data. */
3863 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3865 /* Address of the NVRAM to write to. */
3866 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3868 /* Issue the write command. */
3869 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3871 /* Wait for completion. */
3872 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3875 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3878 if (j >= NVRAM_TIMEOUT_COUNT)
3885 bnx2_init_nvram(struct bnx2 *bp)
3888 int j, entry_count, rc = 0;
3889 struct flash_spec *flash;
3891 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3892 bp->flash_info = &flash_5709;
3893 goto get_flash_size;
3896 /* Determine the selected interface. */
3897 val = REG_RD(bp, BNX2_NVM_CFG1);
3899 entry_count = ARRAY_SIZE(flash_table);
3901 if (val & 0x40000000) {
3903 /* Flash interface has been reconfigured */
3904 for (j = 0, flash = &flash_table[0]; j < entry_count;
3906 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3907 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3908 bp->flash_info = flash;
3915 /* Not yet been reconfigured */
3917 if (val & (1 << 23))
3918 mask = FLASH_BACKUP_STRAP_MASK;
3920 mask = FLASH_STRAP_MASK;
3922 for (j = 0, flash = &flash_table[0]; j < entry_count;
3925 if ((val & mask) == (flash->strapping & mask)) {
3926 bp->flash_info = flash;
3928 /* Request access to the flash interface. */
3929 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3932 /* Enable access to flash interface */
3933 bnx2_enable_nvram_access(bp);
3935 /* Reconfigure the flash interface */
3936 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3937 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3938 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3939 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3941 /* Disable access to flash interface */
3942 bnx2_disable_nvram_access(bp);
3943 bnx2_release_nvram_lock(bp);
3948 } /* if (val & 0x40000000) */
3950 if (j == entry_count) {
3951 bp->flash_info = NULL;
3952 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3957 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3958 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3960 bp->flash_size = val;
3962 bp->flash_size = bp->flash_info->total_size;
3968 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3972 u32 cmd_flags, offset32, len32, extra;
3977 /* Request access to the flash interface. */
3978 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3981 /* Enable access to flash interface */
3982 bnx2_enable_nvram_access(bp);
3995 pre_len = 4 - (offset & 3);
3997 if (pre_len >= len32) {
3999 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4000 BNX2_NVM_COMMAND_LAST;
4003 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4006 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4011 memcpy(ret_buf, buf + (offset & 3), pre_len);
4018 extra = 4 - (len32 & 3);
4019 len32 = (len32 + 4) & ~3;
4026 cmd_flags = BNX2_NVM_COMMAND_LAST;
4028 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4029 BNX2_NVM_COMMAND_LAST;
4031 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4033 memcpy(ret_buf, buf, 4 - extra);
4035 else if (len32 > 0) {
4038 /* Read the first word. */
4042 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4044 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4046 /* Advance to the next dword. */
4051 while (len32 > 4 && rc == 0) {
4052 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4054 /* Advance to the next dword. */
4063 cmd_flags = BNX2_NVM_COMMAND_LAST;
4064 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4066 memcpy(ret_buf, buf, 4 - extra);
4069 /* Disable access to flash interface */
4070 bnx2_disable_nvram_access(bp);
4072 bnx2_release_nvram_lock(bp);
4078 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4081 u32 written, offset32, len32;
4082 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4084 int align_start, align_end;
4089 align_start = align_end = 0;
4091 if ((align_start = (offset32 & 3))) {
4093 len32 += align_start;
4096 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4101 align_end = 4 - (len32 & 3);
4103 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4107 if (align_start || align_end) {
4108 align_buf = kmalloc(len32, GFP_KERNEL);
4109 if (align_buf == NULL)
4112 memcpy(align_buf, start, 4);
4115 memcpy(align_buf + len32 - 4, end, 4);
4117 memcpy(align_buf + align_start, data_buf, buf_size);
4121 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4122 flash_buffer = kmalloc(264, GFP_KERNEL);
4123 if (flash_buffer == NULL) {
4125 goto nvram_write_end;
4130 while ((written < len32) && (rc == 0)) {
4131 u32 page_start, page_end, data_start, data_end;
4132 u32 addr, cmd_flags;
4135 /* Find the page_start addr */
4136 page_start = offset32 + written;
4137 page_start -= (page_start % bp->flash_info->page_size);
4138 /* Find the page_end addr */
4139 page_end = page_start + bp->flash_info->page_size;
4140 /* Find the data_start addr */
4141 data_start = (written == 0) ? offset32 : page_start;
4142 /* Find the data_end addr */
4143 data_end = (page_end > offset32 + len32) ?
4144 (offset32 + len32) : page_end;
4146 /* Request access to the flash interface. */
4147 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4148 goto nvram_write_end;
4150 /* Enable access to flash interface */
4151 bnx2_enable_nvram_access(bp);
4153 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4154 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4157 /* Read the whole page into the buffer
4158 * (non-buffer flash only) */
4159 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4160 if (j == (bp->flash_info->page_size - 4)) {
4161 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4163 rc = bnx2_nvram_read_dword(bp,
4169 goto nvram_write_end;
4175 /* Enable writes to flash interface (unlock write-protect) */
4176 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4177 goto nvram_write_end;
4179 /* Loop to write back the buffer data from page_start to
4182 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4183 /* Erase the page */
4184 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4185 goto nvram_write_end;
4187 /* Re-enable the write again for the actual write */
4188 bnx2_enable_nvram_write(bp);
4190 for (addr = page_start; addr < data_start;
4191 addr += 4, i += 4) {
4193 rc = bnx2_nvram_write_dword(bp, addr,
4194 &flash_buffer[i], cmd_flags);
4197 goto nvram_write_end;
4203 /* Loop to write the new data from data_start to data_end */
4204 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4205 if ((addr == page_end - 4) ||
4206 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4207 (addr == data_end - 4))) {
4209 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4211 rc = bnx2_nvram_write_dword(bp, addr, buf,
4215 goto nvram_write_end;
4221 /* Loop to write back the buffer data from data_end
4223 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4224 for (addr = data_end; addr < page_end;
4225 addr += 4, i += 4) {
4227 if (addr == page_end-4) {
4228 cmd_flags = BNX2_NVM_COMMAND_LAST;
4230 rc = bnx2_nvram_write_dword(bp, addr,
4231 &flash_buffer[i], cmd_flags);
4234 goto nvram_write_end;
4240 /* Disable writes to flash interface (lock write-protect) */
4241 bnx2_disable_nvram_write(bp);
4243 /* Disable access to flash interface */
4244 bnx2_disable_nvram_access(bp);
4245 bnx2_release_nvram_lock(bp);
4247 /* Increment written */
4248 written += data_end - data_start;
4252 kfree(flash_buffer);
4258 bnx2_init_remote_phy(struct bnx2 *bp)
4262 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4263 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4266 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4267 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4270 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4271 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4273 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4274 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4275 bp->phy_port = PORT_FIBRE;
4277 bp->phy_port = PORT_TP;
4279 if (netif_running(bp->dev)) {
4282 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4283 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4284 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4290 bnx2_setup_msix_tbl(struct bnx2 *bp)
4292 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4294 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4295 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4299 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4305 /* Wait for the current PCI transaction to complete before
4306 * issuing a reset. */
4307 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4308 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4309 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4310 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4311 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4312 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4315 /* Wait for the firmware to tell us it is ok to issue a reset. */
4316 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4318 /* Deposit a driver reset signature so the firmware knows that
4319 * this is a soft reset. */
4320 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4321 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4323 /* Do a dummy read to force the chip to complete all current transaction
4324 * before we issue a reset. */
4325 val = REG_RD(bp, BNX2_MISC_ID);
4327 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4328 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4329 REG_RD(bp, BNX2_MISC_COMMAND);
4332 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4333 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4335 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4338 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4339 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4340 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4343 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4345 /* Reading back any register after chip reset will hang the
4346 * bus on 5706 A0 and A1. The msleep below provides plenty
4347 * of margin for write posting.
4349 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4350 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4353 /* Reset takes approximate 30 usec */
4354 for (i = 0; i < 10; i++) {
4355 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4356 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4357 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4362 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4363 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4364 printk(KERN_ERR PFX "Chip reset did not complete\n");
4369 /* Make sure byte swapping is properly configured. */
4370 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4371 if (val != 0x01020304) {
4372 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4376 /* Wait for the firmware to finish its initialization. */
4377 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4381 spin_lock_bh(&bp->phy_lock);
4382 old_port = bp->phy_port;
4383 bnx2_init_remote_phy(bp);
4384 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4385 old_port != bp->phy_port)
4386 bnx2_set_default_remote_link(bp);
4387 spin_unlock_bh(&bp->phy_lock);
4389 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4390 /* Adjust the voltage regular to two steps lower. The default
4391 * of this register is 0x0000000e. */
4392 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4394 /* Remove bad rbuf memory from the free pool. */
4395 rc = bnx2_alloc_bad_rbuf(bp);
4398 if (bp->flags & BNX2_FLAG_USING_MSIX)
4399 bnx2_setup_msix_tbl(bp);
4405 bnx2_init_chip(struct bnx2 *bp)
4410 /* Make sure the interrupt is not active. */
4411 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4413 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4414 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4416 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4418 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4419 DMA_READ_CHANS << 12 |
4420 DMA_WRITE_CHANS << 16;
4422 val |= (0x2 << 20) | (1 << 11);
4424 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4427 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4428 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4429 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4431 REG_WR(bp, BNX2_DMA_CONFIG, val);
4433 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4434 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4435 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4436 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4439 if (bp->flags & BNX2_FLAG_PCIX) {
4442 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4444 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4445 val16 & ~PCI_X_CMD_ERO);
4448 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4449 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4450 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4451 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4453 /* Initialize context mapping and zero out the quick contexts. The
4454 * context block must have already been enabled. */
4455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4456 rc = bnx2_init_5709_context(bp);
4460 bnx2_init_context(bp);
4462 if ((rc = bnx2_init_cpus(bp)) != 0)
4465 bnx2_init_nvram(bp);
4467 bnx2_set_mac_addr(bp);
4469 val = REG_RD(bp, BNX2_MQ_CONFIG);
4470 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4471 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4472 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4473 val |= BNX2_MQ_CONFIG_HALT_DIS;
4475 REG_WR(bp, BNX2_MQ_CONFIG, val);
4477 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4478 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4479 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4481 val = (BCM_PAGE_BITS - 8) << 24;
4482 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4484 /* Configure page size. */
4485 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4486 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4487 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4488 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4490 val = bp->mac_addr[0] +
4491 (bp->mac_addr[1] << 8) +
4492 (bp->mac_addr[2] << 16) +
4494 (bp->mac_addr[4] << 8) +
4495 (bp->mac_addr[5] << 16);
4496 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4498 /* Program the MTU. Also include 4 bytes for CRC32. */
4499 val = bp->dev->mtu + ETH_HLEN + 4;
4500 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4501 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4502 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4504 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4505 bp->bnx2_napi[i].last_status_idx = 0;
4507 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4509 /* Set up how to generate a link change interrupt. */
4510 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4512 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4513 (u64) bp->status_blk_mapping & 0xffffffff);
4514 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4516 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4517 (u64) bp->stats_blk_mapping & 0xffffffff);
4518 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4519 (u64) bp->stats_blk_mapping >> 32);
4521 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4522 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4524 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4525 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4527 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4528 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4530 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4532 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4534 REG_WR(bp, BNX2_HC_COM_TICKS,
4535 (bp->com_ticks_int << 16) | bp->com_ticks);
4537 REG_WR(bp, BNX2_HC_CMD_TICKS,
4538 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4540 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4541 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4543 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4544 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4546 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4547 val = BNX2_HC_CONFIG_COLLECT_STATS;
4549 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4550 BNX2_HC_CONFIG_COLLECT_STATS;
4553 if (bp->irq_nvecs > 1) {
4554 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4555 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4557 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4560 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4561 val |= BNX2_HC_CONFIG_ONE_SHOT;
4563 REG_WR(bp, BNX2_HC_CONFIG, val);
4565 for (i = 1; i < bp->irq_nvecs; i++) {
4566 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4567 BNX2_HC_SB_CONFIG_1;
4570 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4571 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4572 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4574 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4575 (bp->tx_quick_cons_trip_int << 16) |
4576 bp->tx_quick_cons_trip);
4578 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4579 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4581 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4582 (bp->rx_quick_cons_trip_int << 16) |
4583 bp->rx_quick_cons_trip);
4585 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4586 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4589 /* Clear internal stats counters. */
4590 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4592 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4594 /* Initialize the receive filter. */
4595 bnx2_set_rx_mode(bp->dev);
4597 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4598 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4599 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4600 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4602 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4605 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4606 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4610 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4616 bnx2_clear_ring_states(struct bnx2 *bp)
4618 struct bnx2_napi *bnapi;
4619 struct bnx2_tx_ring_info *txr;
4620 struct bnx2_rx_ring_info *rxr;
4623 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4624 bnapi = &bp->bnx2_napi[i];
4625 txr = &bnapi->tx_ring;
4626 rxr = &bnapi->rx_ring;
4629 txr->hw_tx_cons = 0;
4630 rxr->rx_prod_bseq = 0;
4633 rxr->rx_pg_prod = 0;
4634 rxr->rx_pg_cons = 0;
4639 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4641 u32 val, offset0, offset1, offset2, offset3;
4642 u32 cid_addr = GET_CID_ADDR(cid);
4644 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4645 offset0 = BNX2_L2CTX_TYPE_XI;
4646 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4647 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4648 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4650 offset0 = BNX2_L2CTX_TYPE;
4651 offset1 = BNX2_L2CTX_CMD_TYPE;
4652 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4653 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4655 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4656 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4658 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4659 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4661 val = (u64) txr->tx_desc_mapping >> 32;
4662 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4664 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4665 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4669 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4673 struct bnx2_napi *bnapi;
4674 struct bnx2_tx_ring_info *txr;
4676 bnapi = &bp->bnx2_napi[ring_num];
4677 txr = &bnapi->tx_ring;
4682 cid = TX_TSS_CID + ring_num - 1;
4684 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4686 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4688 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4689 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4692 txr->tx_prod_bseq = 0;
4694 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4695 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4697 bnx2_init_tx_context(bp, cid, txr);
4701 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4707 for (i = 0; i < num_rings; i++) {
4710 rxbd = &rx_ring[i][0];
4711 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4712 rxbd->rx_bd_len = buf_size;
4713 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4715 if (i == (num_rings - 1))
4719 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4720 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4725 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4728 u16 prod, ring_prod;
4729 u32 cid, rx_cid_addr, val;
4730 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4731 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4736 cid = RX_RSS_CID + ring_num - 1;
4738 rx_cid_addr = GET_CID_ADDR(cid);
4740 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4741 bp->rx_buf_use_size, bp->rx_max_ring);
4743 bnx2_init_rx_context(bp, cid);
4745 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4746 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4747 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4750 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4751 if (bp->rx_pg_ring_size) {
4752 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4753 rxr->rx_pg_desc_mapping,
4754 PAGE_SIZE, bp->rx_max_pg_ring);
4755 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4756 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4757 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4758 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4760 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4761 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4763 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4764 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4766 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4767 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4770 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4771 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4773 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4774 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4776 ring_prod = prod = rxr->rx_pg_prod;
4777 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4778 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4780 prod = NEXT_RX_BD(prod);
4781 ring_prod = RX_PG_RING_IDX(prod);
4783 rxr->rx_pg_prod = prod;
4785 ring_prod = prod = rxr->rx_prod;
4786 for (i = 0; i < bp->rx_ring_size; i++) {
4787 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4789 prod = NEXT_RX_BD(prod);
4790 ring_prod = RX_RING_IDX(prod);
4792 rxr->rx_prod = prod;
4794 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4795 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4796 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4798 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4799 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4801 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4805 bnx2_init_all_rings(struct bnx2 *bp)
4810 bnx2_clear_ring_states(bp);
4812 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4813 for (i = 0; i < bp->num_tx_rings; i++)
4814 bnx2_init_tx_ring(bp, i);
4816 if (bp->num_tx_rings > 1)
4817 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4820 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4821 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4823 for (i = 0; i < bp->num_rx_rings; i++)
4824 bnx2_init_rx_ring(bp, i);
4826 if (bp->num_rx_rings > 1) {
4828 u8 *tbl = (u8 *) &tbl_32;
4830 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4831 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4833 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4834 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4837 BNX2_RXP_SCRATCH_RSS_TBL + i,
4838 cpu_to_be32(tbl_32));
4841 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4842 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4844 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4849 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4851 u32 max, num_rings = 1;
4853 while (ring_size > MAX_RX_DESC_CNT) {
4854 ring_size -= MAX_RX_DESC_CNT;
4857 /* round to next power of 2 */
4859 while ((max & num_rings) == 0)
4862 if (num_rings != max)
4869 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4871 u32 rx_size, rx_space, jumbo_size;
4873 /* 8 for CRC and VLAN */
4874 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4876 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4877 sizeof(struct skb_shared_info);
4879 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4880 bp->rx_pg_ring_size = 0;
4881 bp->rx_max_pg_ring = 0;
4882 bp->rx_max_pg_ring_idx = 0;
4883 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4884 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4886 jumbo_size = size * pages;
4887 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4888 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4890 bp->rx_pg_ring_size = jumbo_size;
4891 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4893 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4894 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4895 bp->rx_copy_thresh = 0;
4898 bp->rx_buf_use_size = rx_size;
4900 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4901 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4902 bp->rx_ring_size = size;
4903 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4904 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4908 bnx2_free_tx_skbs(struct bnx2 *bp)
4912 for (i = 0; i < bp->num_tx_rings; i++) {
4913 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4914 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4917 if (txr->tx_buf_ring == NULL)
4920 for (j = 0; j < TX_DESC_CNT; ) {
4921 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4922 struct sk_buff *skb = tx_buf->skb;
4930 pci_unmap_single(bp->pdev,
4931 pci_unmap_addr(tx_buf, mapping),
4932 skb_headlen(skb), PCI_DMA_TODEVICE);
4936 last = skb_shinfo(skb)->nr_frags;
4937 for (k = 0; k < last; k++) {
4938 tx_buf = &txr->tx_buf_ring[j + k + 1];
4939 pci_unmap_page(bp->pdev,
4940 pci_unmap_addr(tx_buf, mapping),
4941 skb_shinfo(skb)->frags[j].size,
4951 bnx2_free_rx_skbs(struct bnx2 *bp)
4955 for (i = 0; i < bp->num_rx_rings; i++) {
4956 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4957 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4960 if (rxr->rx_buf_ring == NULL)
4963 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4964 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4965 struct sk_buff *skb = rx_buf->skb;
4970 pci_unmap_single(bp->pdev,
4971 pci_unmap_addr(rx_buf, mapping),
4972 bp->rx_buf_use_size,
4973 PCI_DMA_FROMDEVICE);
4979 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4980 bnx2_free_rx_page(bp, rxr, j);
4985 bnx2_free_skbs(struct bnx2 *bp)
4987 bnx2_free_tx_skbs(bp);
4988 bnx2_free_rx_skbs(bp);
4992 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4996 rc = bnx2_reset_chip(bp, reset_code);
5001 if ((rc = bnx2_init_chip(bp)) != 0)
5004 bnx2_init_all_rings(bp);
5009 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5013 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5016 spin_lock_bh(&bp->phy_lock);
5017 bnx2_init_phy(bp, reset_phy);
5019 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5020 bnx2_remote_phy_event(bp);
5021 spin_unlock_bh(&bp->phy_lock);
5026 bnx2_test_registers(struct bnx2 *bp)
5030 static const struct {
5033 #define BNX2_FL_NOT_5709 1
5037 { 0x006c, 0, 0x00000000, 0x0000003f },
5038 { 0x0090, 0, 0xffffffff, 0x00000000 },
5039 { 0x0094, 0, 0x00000000, 0x00000000 },
5041 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5042 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5043 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5044 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5045 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5046 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5047 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5048 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5049 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5051 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5052 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5053 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5054 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5055 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5056 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5058 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5059 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5060 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5062 { 0x1000, 0, 0x00000000, 0x00000001 },
5063 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5065 { 0x1408, 0, 0x01c00800, 0x00000000 },
5066 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5067 { 0x14a8, 0, 0x00000000, 0x000001ff },
5068 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5069 { 0x14b0, 0, 0x00000002, 0x00000001 },
5070 { 0x14b8, 0, 0x00000000, 0x00000000 },
5071 { 0x14c0, 0, 0x00000000, 0x00000009 },
5072 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5073 { 0x14cc, 0, 0x00000000, 0x00000001 },
5074 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5076 { 0x1800, 0, 0x00000000, 0x00000001 },
5077 { 0x1804, 0, 0x00000000, 0x00000003 },
5079 { 0x2800, 0, 0x00000000, 0x00000001 },
5080 { 0x2804, 0, 0x00000000, 0x00003f01 },
5081 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5082 { 0x2810, 0, 0xffff0000, 0x00000000 },
5083 { 0x2814, 0, 0xffff0000, 0x00000000 },
5084 { 0x2818, 0, 0xffff0000, 0x00000000 },
5085 { 0x281c, 0, 0xffff0000, 0x00000000 },
5086 { 0x2834, 0, 0xffffffff, 0x00000000 },
5087 { 0x2840, 0, 0x00000000, 0xffffffff },
5088 { 0x2844, 0, 0x00000000, 0xffffffff },
5089 { 0x2848, 0, 0xffffffff, 0x00000000 },
5090 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5092 { 0x2c00, 0, 0x00000000, 0x00000011 },
5093 { 0x2c04, 0, 0x00000000, 0x00030007 },
5095 { 0x3c00, 0, 0x00000000, 0x00000001 },
5096 { 0x3c04, 0, 0x00000000, 0x00070000 },
5097 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5098 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5099 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5100 { 0x3c14, 0, 0x00000000, 0xffffffff },
5101 { 0x3c18, 0, 0x00000000, 0xffffffff },
5102 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5103 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5105 { 0x5004, 0, 0x00000000, 0x0000007f },
5106 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5108 { 0x5c00, 0, 0x00000000, 0x00000001 },
5109 { 0x5c04, 0, 0x00000000, 0x0003000f },
5110 { 0x5c08, 0, 0x00000003, 0x00000000 },
5111 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5112 { 0x5c10, 0, 0x00000000, 0xffffffff },
5113 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5114 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5115 { 0x5c88, 0, 0x00000000, 0x00077373 },
5116 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5118 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5119 { 0x680c, 0, 0xffffffff, 0x00000000 },
5120 { 0x6810, 0, 0xffffffff, 0x00000000 },
5121 { 0x6814, 0, 0xffffffff, 0x00000000 },
5122 { 0x6818, 0, 0xffffffff, 0x00000000 },
5123 { 0x681c, 0, 0xffffffff, 0x00000000 },
5124 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5125 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5126 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5127 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5128 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5129 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5130 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5131 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5132 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5133 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5134 { 0x684c, 0, 0xffffffff, 0x00000000 },
5135 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5136 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5137 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5138 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5139 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5140 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5142 { 0xffff, 0, 0x00000000, 0x00000000 },
5147 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5150 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5151 u32 offset, rw_mask, ro_mask, save_val, val;
5152 u16 flags = reg_tbl[i].flags;
5154 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5157 offset = (u32) reg_tbl[i].offset;
5158 rw_mask = reg_tbl[i].rw_mask;
5159 ro_mask = reg_tbl[i].ro_mask;
5161 save_val = readl(bp->regview + offset);
5163 writel(0, bp->regview + offset);
5165 val = readl(bp->regview + offset);
5166 if ((val & rw_mask) != 0) {
5170 if ((val & ro_mask) != (save_val & ro_mask)) {
5174 writel(0xffffffff, bp->regview + offset);
5176 val = readl(bp->regview + offset);
5177 if ((val & rw_mask) != rw_mask) {
5181 if ((val & ro_mask) != (save_val & ro_mask)) {
5185 writel(save_val, bp->regview + offset);
5189 writel(save_val, bp->regview + offset);
5197 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5199 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5200 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5203 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5206 for (offset = 0; offset < size; offset += 4) {
5208 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5210 if (bnx2_reg_rd_ind(bp, start + offset) !=
5220 bnx2_test_memory(struct bnx2 *bp)
5224 static struct mem_entry {
5227 } mem_tbl_5706[] = {
5228 { 0x60000, 0x4000 },
5229 { 0xa0000, 0x3000 },
5230 { 0xe0000, 0x4000 },
5231 { 0x120000, 0x4000 },
5232 { 0x1a0000, 0x4000 },
5233 { 0x160000, 0x4000 },
5237 { 0x60000, 0x4000 },
5238 { 0xa0000, 0x3000 },
5239 { 0xe0000, 0x4000 },
5240 { 0x120000, 0x4000 },
5241 { 0x1a0000, 0x4000 },
5244 struct mem_entry *mem_tbl;
5246 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5247 mem_tbl = mem_tbl_5709;
5249 mem_tbl = mem_tbl_5706;
5251 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5252 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5253 mem_tbl[i].len)) != 0) {
5261 #define BNX2_MAC_LOOPBACK 0
5262 #define BNX2_PHY_LOOPBACK 1
5265 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5267 unsigned int pkt_size, num_pkts, i;
5268 struct sk_buff *skb, *rx_skb;
5269 unsigned char *packet;
5270 u16 rx_start_idx, rx_idx;
5273 struct sw_bd *rx_buf;
5274 struct l2_fhdr *rx_hdr;
5276 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5277 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5278 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5282 txr = &tx_napi->tx_ring;
5283 rxr = &bnapi->rx_ring;
5284 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5285 bp->loopback = MAC_LOOPBACK;
5286 bnx2_set_mac_loopback(bp);
5288 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5289 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5292 bp->loopback = PHY_LOOPBACK;
5293 bnx2_set_phy_loopback(bp);
5298 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5299 skb = netdev_alloc_skb(bp->dev, pkt_size);
5302 packet = skb_put(skb, pkt_size);
5303 memcpy(packet, bp->dev->dev_addr, 6);
5304 memset(packet + 6, 0x0, 8);
5305 for (i = 14; i < pkt_size; i++)
5306 packet[i] = (unsigned char) (i & 0xff);
5308 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5311 REG_WR(bp, BNX2_HC_COMMAND,
5312 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5314 REG_RD(bp, BNX2_HC_COMMAND);
5317 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5321 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5323 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5324 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5325 txbd->tx_bd_mss_nbytes = pkt_size;
5326 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5329 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5330 txr->tx_prod_bseq += pkt_size;
5332 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5333 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5337 REG_WR(bp, BNX2_HC_COMMAND,
5338 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5340 REG_RD(bp, BNX2_HC_COMMAND);
5344 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5347 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5348 goto loopback_test_done;
5350 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5351 if (rx_idx != rx_start_idx + num_pkts) {
5352 goto loopback_test_done;
5355 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5356 rx_skb = rx_buf->skb;
5358 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5359 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5361 pci_dma_sync_single_for_cpu(bp->pdev,
5362 pci_unmap_addr(rx_buf, mapping),
5363 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5365 if (rx_hdr->l2_fhdr_status &
5366 (L2_FHDR_ERRORS_BAD_CRC |
5367 L2_FHDR_ERRORS_PHY_DECODE |
5368 L2_FHDR_ERRORS_ALIGNMENT |
5369 L2_FHDR_ERRORS_TOO_SHORT |
5370 L2_FHDR_ERRORS_GIANT_FRAME)) {
5372 goto loopback_test_done;
5375 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5376 goto loopback_test_done;
5379 for (i = 14; i < pkt_size; i++) {
5380 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5381 goto loopback_test_done;
5392 #define BNX2_MAC_LOOPBACK_FAILED 1
5393 #define BNX2_PHY_LOOPBACK_FAILED 2
5394 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5395 BNX2_PHY_LOOPBACK_FAILED)
5398 bnx2_test_loopback(struct bnx2 *bp)
5402 if (!netif_running(bp->dev))
5403 return BNX2_LOOPBACK_FAILED;
5405 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5406 spin_lock_bh(&bp->phy_lock);
5407 bnx2_init_phy(bp, 1);
5408 spin_unlock_bh(&bp->phy_lock);
5409 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5410 rc |= BNX2_MAC_LOOPBACK_FAILED;
5411 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5412 rc |= BNX2_PHY_LOOPBACK_FAILED;
5416 #define NVRAM_SIZE 0x200
5417 #define CRC32_RESIDUAL 0xdebb20e3
5420 bnx2_test_nvram(struct bnx2 *bp)
5422 __be32 buf[NVRAM_SIZE / 4];
5423 u8 *data = (u8 *) buf;
5427 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5428 goto test_nvram_done;
5430 magic = be32_to_cpu(buf[0]);
5431 if (magic != 0x669955aa) {
5433 goto test_nvram_done;
5436 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5437 goto test_nvram_done;
5439 csum = ether_crc_le(0x100, data);
5440 if (csum != CRC32_RESIDUAL) {
5442 goto test_nvram_done;
5445 csum = ether_crc_le(0x100, data + 0x100);
5446 if (csum != CRC32_RESIDUAL) {
5455 bnx2_test_link(struct bnx2 *bp)
5459 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5464 spin_lock_bh(&bp->phy_lock);
5465 bnx2_enable_bmsr1(bp);
5466 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5467 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5468 bnx2_disable_bmsr1(bp);
5469 spin_unlock_bh(&bp->phy_lock);
5471 if (bmsr & BMSR_LSTATUS) {
5478 bnx2_test_intr(struct bnx2 *bp)
5483 if (!netif_running(bp->dev))
5486 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5488 /* This register is not touched during run-time. */
5489 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5490 REG_RD(bp, BNX2_HC_COMMAND);
5492 for (i = 0; i < 10; i++) {
5493 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5499 msleep_interruptible(10);
5507 /* Determining link for parallel detection. */
5509 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5511 u32 mode_ctl, an_dbg, exp;
5513 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5516 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5517 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5519 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5522 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5523 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5524 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5526 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5529 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5530 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5531 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5533 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5540 bnx2_5706_serdes_timer(struct bnx2 *bp)
5544 spin_lock(&bp->phy_lock);
5545 if (bp->serdes_an_pending) {
5546 bp->serdes_an_pending--;
5548 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5551 bp->current_interval = bp->timer_interval;
5553 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5555 if (bmcr & BMCR_ANENABLE) {
5556 if (bnx2_5706_serdes_has_link(bp)) {
5557 bmcr &= ~BMCR_ANENABLE;
5558 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5559 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5560 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5564 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5565 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5568 bnx2_write_phy(bp, 0x17, 0x0f01);
5569 bnx2_read_phy(bp, 0x15, &phy2);
5573 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5574 bmcr |= BMCR_ANENABLE;
5575 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5577 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5580 bp->current_interval = bp->timer_interval;
5585 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5586 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5587 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5589 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5590 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5591 bnx2_5706s_force_link_dn(bp, 1);
5592 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5595 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5598 spin_unlock(&bp->phy_lock);
5602 bnx2_5708_serdes_timer(struct bnx2 *bp)
5604 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5607 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5608 bp->serdes_an_pending = 0;
5612 spin_lock(&bp->phy_lock);
5613 if (bp->serdes_an_pending)
5614 bp->serdes_an_pending--;
5615 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5618 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5619 if (bmcr & BMCR_ANENABLE) {
5620 bnx2_enable_forced_2g5(bp);
5621 bp->current_interval = SERDES_FORCED_TIMEOUT;
5623 bnx2_disable_forced_2g5(bp);
5624 bp->serdes_an_pending = 2;
5625 bp->current_interval = bp->timer_interval;
5629 bp->current_interval = bp->timer_interval;
5631 spin_unlock(&bp->phy_lock);
5635 bnx2_timer(unsigned long data)
5637 struct bnx2 *bp = (struct bnx2 *) data;
5639 if (!netif_running(bp->dev))
5642 if (atomic_read(&bp->intr_sem) != 0)
5643 goto bnx2_restart_timer;
5645 bnx2_send_heart_beat(bp);
5647 bp->stats_blk->stat_FwRxDrop =
5648 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5650 /* workaround occasional corrupted counters */
5651 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5652 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5653 BNX2_HC_COMMAND_STATS_NOW);
5655 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5656 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5657 bnx2_5706_serdes_timer(bp);
5659 bnx2_5708_serdes_timer(bp);
5663 mod_timer(&bp->timer, jiffies + bp->current_interval);
5667 bnx2_request_irq(struct bnx2 *bp)
5669 unsigned long flags;
5670 struct bnx2_irq *irq;
5673 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5676 flags = IRQF_SHARED;
5678 for (i = 0; i < bp->irq_nvecs; i++) {
5679 irq = &bp->irq_tbl[i];
5680 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5690 bnx2_free_irq(struct bnx2 *bp)
5692 struct bnx2_irq *irq;
5695 for (i = 0; i < bp->irq_nvecs; i++) {
5696 irq = &bp->irq_tbl[i];
5698 free_irq(irq->vector, &bp->bnx2_napi[i]);
5701 if (bp->flags & BNX2_FLAG_USING_MSI)
5702 pci_disable_msi(bp->pdev);
5703 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5704 pci_disable_msix(bp->pdev);
5706 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5710 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5713 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5715 bnx2_setup_msix_tbl(bp);
5716 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5717 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5718 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5720 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5721 msix_ent[i].entry = i;
5722 msix_ent[i].vector = 0;
5724 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5725 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5728 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5732 bp->irq_nvecs = msix_vecs;
5733 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5734 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5735 bp->irq_tbl[i].vector = msix_ent[i].vector;
5739 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5741 int cpus = num_online_cpus();
5742 int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5744 bp->irq_tbl[0].handler = bnx2_interrupt;
5745 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5747 bp->irq_tbl[0].vector = bp->pdev->irq;
5749 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5750 bnx2_enable_msix(bp, msix_vecs);
5752 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5753 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5754 if (pci_enable_msi(bp->pdev) == 0) {
5755 bp->flags |= BNX2_FLAG_USING_MSI;
5756 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5757 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5758 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5760 bp->irq_tbl[0].handler = bnx2_msi;
5762 bp->irq_tbl[0].vector = bp->pdev->irq;
5765 bp->num_tx_rings = 1;
5766 bp->num_rx_rings = bp->irq_nvecs;
5769 /* Called with rtnl_lock */
5771 bnx2_open(struct net_device *dev)
5773 struct bnx2 *bp = netdev_priv(dev);
5776 netif_carrier_off(dev);
5778 bnx2_set_power_state(bp, PCI_D0);
5779 bnx2_disable_int(bp);
5781 bnx2_setup_int_mode(bp, disable_msi);
5782 bnx2_napi_enable(bp);
5783 rc = bnx2_alloc_mem(bp);
5787 rc = bnx2_request_irq(bp);
5791 rc = bnx2_init_nic(bp, 1);
5795 mod_timer(&bp->timer, jiffies + bp->current_interval);
5797 atomic_set(&bp->intr_sem, 0);
5799 bnx2_enable_int(bp);
5801 if (bp->flags & BNX2_FLAG_USING_MSI) {
5802 /* Test MSI to make sure it is working
5803 * If MSI test fails, go back to INTx mode
5805 if (bnx2_test_intr(bp) != 0) {
5806 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5807 " using MSI, switching to INTx mode. Please"
5808 " report this failure to the PCI maintainer"
5809 " and include system chipset information.\n",
5812 bnx2_disable_int(bp);
5815 bnx2_setup_int_mode(bp, 1);
5817 rc = bnx2_init_nic(bp, 0);
5820 rc = bnx2_request_irq(bp);
5823 del_timer_sync(&bp->timer);
5826 bnx2_enable_int(bp);
5829 if (bp->flags & BNX2_FLAG_USING_MSI)
5830 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5831 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5832 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5834 netif_start_queue(dev);
5839 bnx2_napi_disable(bp);
5847 bnx2_reset_task(struct work_struct *work)
5849 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5851 if (!netif_running(bp->dev))
5854 bnx2_netif_stop(bp);
5856 bnx2_init_nic(bp, 1);
5858 atomic_set(&bp->intr_sem, 1);
5859 bnx2_netif_start(bp);
5863 bnx2_tx_timeout(struct net_device *dev)
5865 struct bnx2 *bp = netdev_priv(dev);
5867 /* This allows the netif to be shutdown gracefully before resetting */
5868 schedule_work(&bp->reset_task);
5872 /* Called with rtnl_lock */
5874 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5876 struct bnx2 *bp = netdev_priv(dev);
5878 bnx2_netif_stop(bp);
5881 bnx2_set_rx_mode(dev);
5883 bnx2_netif_start(bp);
5887 /* Called with netif_tx_lock.
5888 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5889 * netif_wake_queue().
5892 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5894 struct bnx2 *bp = netdev_priv(dev);
5897 struct sw_bd *tx_buf;
5898 u32 len, vlan_tag_flags, last_frag, mss;
5899 u16 prod, ring_prod;
5901 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5902 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5904 if (unlikely(bnx2_tx_avail(bp, txr) <
5905 (skb_shinfo(skb)->nr_frags + 1))) {
5906 netif_stop_queue(dev);
5907 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5910 return NETDEV_TX_BUSY;
5912 len = skb_headlen(skb);
5913 prod = txr->tx_prod;
5914 ring_prod = TX_RING_IDX(prod);
5917 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5921 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5923 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5925 if ((mss = skb_shinfo(skb)->gso_size)) {
5926 u32 tcp_opt_len, ip_tcp_len;
5929 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5931 tcp_opt_len = tcp_optlen(skb);
5933 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5934 u32 tcp_off = skb_transport_offset(skb) -
5935 sizeof(struct ipv6hdr) - ETH_HLEN;
5937 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5938 TX_BD_FLAGS_SW_FLAGS;
5939 if (likely(tcp_off == 0))
5940 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5943 vlan_tag_flags |= ((tcp_off & 0x3) <<
5944 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5945 ((tcp_off & 0x10) <<
5946 TX_BD_FLAGS_TCP6_OFF4_SHL);
5947 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5950 if (skb_header_cloned(skb) &&
5951 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5953 return NETDEV_TX_OK;
5956 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5960 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5961 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5965 if (tcp_opt_len || (iph->ihl > 5)) {
5966 vlan_tag_flags |= ((iph->ihl - 5) +
5967 (tcp_opt_len >> 2)) << 8;
5973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5975 tx_buf = &txr->tx_buf_ring[ring_prod];
5977 pci_unmap_addr_set(tx_buf, mapping, mapping);
5979 txbd = &txr->tx_desc_ring[ring_prod];
5981 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5982 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5983 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5984 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5986 last_frag = skb_shinfo(skb)->nr_frags;
5988 for (i = 0; i < last_frag; i++) {
5989 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5991 prod = NEXT_TX_BD(prod);
5992 ring_prod = TX_RING_IDX(prod);
5993 txbd = &txr->tx_desc_ring[ring_prod];
5996 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5997 len, PCI_DMA_TODEVICE);
5998 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
6001 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6002 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6003 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6004 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6007 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6009 prod = NEXT_TX_BD(prod);
6010 txr->tx_prod_bseq += skb->len;
6012 REG_WR16(bp, txr->tx_bidx_addr, prod);
6013 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6017 txr->tx_prod = prod;
6018 dev->trans_start = jiffies;
6020 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6021 netif_stop_queue(dev);
6022 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6023 netif_wake_queue(dev);
6026 return NETDEV_TX_OK;
6029 /* Called with rtnl_lock */
6031 bnx2_close(struct net_device *dev)
6033 struct bnx2 *bp = netdev_priv(dev);
6036 cancel_work_sync(&bp->reset_task);
6038 bnx2_disable_int_sync(bp);
6039 bnx2_napi_disable(bp);
6040 del_timer_sync(&bp->timer);
6041 if (bp->flags & BNX2_FLAG_NO_WOL)
6042 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6044 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6046 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6047 bnx2_reset_chip(bp, reset_code);
6052 netif_carrier_off(bp->dev);
6053 bnx2_set_power_state(bp, PCI_D3hot);
6057 #define GET_NET_STATS64(ctr) \
6058 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6059 (unsigned long) (ctr##_lo)
6061 #define GET_NET_STATS32(ctr) \
6064 #if (BITS_PER_LONG == 64)
6065 #define GET_NET_STATS GET_NET_STATS64
6067 #define GET_NET_STATS GET_NET_STATS32
6070 static struct net_device_stats *
6071 bnx2_get_stats(struct net_device *dev)
6073 struct bnx2 *bp = netdev_priv(dev);
6074 struct statistics_block *stats_blk = bp->stats_blk;
6075 struct net_device_stats *net_stats = &bp->net_stats;
6077 if (bp->stats_blk == NULL) {
6080 net_stats->rx_packets =
6081 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6082 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6083 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6085 net_stats->tx_packets =
6086 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6087 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6088 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6090 net_stats->rx_bytes =
6091 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6093 net_stats->tx_bytes =
6094 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6096 net_stats->multicast =
6097 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6099 net_stats->collisions =
6100 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6102 net_stats->rx_length_errors =
6103 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6104 stats_blk->stat_EtherStatsOverrsizePkts);
6106 net_stats->rx_over_errors =
6107 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6109 net_stats->rx_frame_errors =
6110 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6112 net_stats->rx_crc_errors =
6113 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6115 net_stats->rx_errors = net_stats->rx_length_errors +
6116 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6117 net_stats->rx_crc_errors;
6119 net_stats->tx_aborted_errors =
6120 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6121 stats_blk->stat_Dot3StatsLateCollisions);
6123 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6124 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6125 net_stats->tx_carrier_errors = 0;
6127 net_stats->tx_carrier_errors =
6129 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6132 net_stats->tx_errors =
6134 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6136 net_stats->tx_aborted_errors +
6137 net_stats->tx_carrier_errors;
6139 net_stats->rx_missed_errors =
6140 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6141 stats_blk->stat_FwRxDrop);
6146 /* All ethtool functions called with rtnl_lock */
6149 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6151 struct bnx2 *bp = netdev_priv(dev);
6152 int support_serdes = 0, support_copper = 0;
6154 cmd->supported = SUPPORTED_Autoneg;
6155 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6158 } else if (bp->phy_port == PORT_FIBRE)
6163 if (support_serdes) {
6164 cmd->supported |= SUPPORTED_1000baseT_Full |
6166 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6167 cmd->supported |= SUPPORTED_2500baseX_Full;
6170 if (support_copper) {
6171 cmd->supported |= SUPPORTED_10baseT_Half |
6172 SUPPORTED_10baseT_Full |
6173 SUPPORTED_100baseT_Half |
6174 SUPPORTED_100baseT_Full |
6175 SUPPORTED_1000baseT_Full |
6180 spin_lock_bh(&bp->phy_lock);
6181 cmd->port = bp->phy_port;
6182 cmd->advertising = bp->advertising;
6184 if (bp->autoneg & AUTONEG_SPEED) {
6185 cmd->autoneg = AUTONEG_ENABLE;
6188 cmd->autoneg = AUTONEG_DISABLE;
6191 if (netif_carrier_ok(dev)) {
6192 cmd->speed = bp->line_speed;
6193 cmd->duplex = bp->duplex;
6199 spin_unlock_bh(&bp->phy_lock);
6201 cmd->transceiver = XCVR_INTERNAL;
6202 cmd->phy_address = bp->phy_addr;
6208 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6210 struct bnx2 *bp = netdev_priv(dev);
6211 u8 autoneg = bp->autoneg;
6212 u8 req_duplex = bp->req_duplex;
6213 u16 req_line_speed = bp->req_line_speed;
6214 u32 advertising = bp->advertising;
6217 spin_lock_bh(&bp->phy_lock);
6219 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6220 goto err_out_unlock;
6222 if (cmd->port != bp->phy_port &&
6223 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6224 goto err_out_unlock;
6226 /* If device is down, we can store the settings only if the user
6227 * is setting the currently active port.
6229 if (!netif_running(dev) && cmd->port != bp->phy_port)
6230 goto err_out_unlock;
6232 if (cmd->autoneg == AUTONEG_ENABLE) {
6233 autoneg |= AUTONEG_SPEED;
6235 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6237 /* allow advertising 1 speed */
6238 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6239 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6240 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6241 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6243 if (cmd->port == PORT_FIBRE)
6244 goto err_out_unlock;
6246 advertising = cmd->advertising;
6248 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6249 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6250 (cmd->port == PORT_TP))
6251 goto err_out_unlock;
6252 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6253 advertising = cmd->advertising;
6254 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6255 goto err_out_unlock;
6257 if (cmd->port == PORT_FIBRE)
6258 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6260 advertising = ETHTOOL_ALL_COPPER_SPEED;
6262 advertising |= ADVERTISED_Autoneg;
6265 if (cmd->port == PORT_FIBRE) {
6266 if ((cmd->speed != SPEED_1000 &&
6267 cmd->speed != SPEED_2500) ||
6268 (cmd->duplex != DUPLEX_FULL))
6269 goto err_out_unlock;
6271 if (cmd->speed == SPEED_2500 &&
6272 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6273 goto err_out_unlock;
6275 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6276 goto err_out_unlock;
6278 autoneg &= ~AUTONEG_SPEED;
6279 req_line_speed = cmd->speed;
6280 req_duplex = cmd->duplex;
6284 bp->autoneg = autoneg;
6285 bp->advertising = advertising;
6286 bp->req_line_speed = req_line_speed;
6287 bp->req_duplex = req_duplex;
6290 /* If device is down, the new settings will be picked up when it is
6293 if (netif_running(dev))
6294 err = bnx2_setup_phy(bp, cmd->port);
6297 spin_unlock_bh(&bp->phy_lock);
6303 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6305 struct bnx2 *bp = netdev_priv(dev);
6307 strcpy(info->driver, DRV_MODULE_NAME);
6308 strcpy(info->version, DRV_MODULE_VERSION);
6309 strcpy(info->bus_info, pci_name(bp->pdev));
6310 strcpy(info->fw_version, bp->fw_version);
6313 #define BNX2_REGDUMP_LEN (32 * 1024)
6316 bnx2_get_regs_len(struct net_device *dev)
6318 return BNX2_REGDUMP_LEN;
6322 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6324 u32 *p = _p, i, offset;
6326 struct bnx2 *bp = netdev_priv(dev);
6327 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6328 0x0800, 0x0880, 0x0c00, 0x0c10,
6329 0x0c30, 0x0d08, 0x1000, 0x101c,
6330 0x1040, 0x1048, 0x1080, 0x10a4,
6331 0x1400, 0x1490, 0x1498, 0x14f0,
6332 0x1500, 0x155c, 0x1580, 0x15dc,
6333 0x1600, 0x1658, 0x1680, 0x16d8,
6334 0x1800, 0x1820, 0x1840, 0x1854,
6335 0x1880, 0x1894, 0x1900, 0x1984,
6336 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6337 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6338 0x2000, 0x2030, 0x23c0, 0x2400,
6339 0x2800, 0x2820, 0x2830, 0x2850,
6340 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6341 0x3c00, 0x3c94, 0x4000, 0x4010,
6342 0x4080, 0x4090, 0x43c0, 0x4458,
6343 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6344 0x4fc0, 0x5010, 0x53c0, 0x5444,
6345 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6346 0x5fc0, 0x6000, 0x6400, 0x6428,
6347 0x6800, 0x6848, 0x684c, 0x6860,
6348 0x6888, 0x6910, 0x8000 };
6352 memset(p, 0, BNX2_REGDUMP_LEN);
6354 if (!netif_running(bp->dev))
6358 offset = reg_boundaries[0];
6360 while (offset < BNX2_REGDUMP_LEN) {
6361 *p++ = REG_RD(bp, offset);
6363 if (offset == reg_boundaries[i + 1]) {
6364 offset = reg_boundaries[i + 2];
6365 p = (u32 *) (orig_p + offset);
6372 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6374 struct bnx2 *bp = netdev_priv(dev);
6376 if (bp->flags & BNX2_FLAG_NO_WOL) {
6381 wol->supported = WAKE_MAGIC;
6383 wol->wolopts = WAKE_MAGIC;
6387 memset(&wol->sopass, 0, sizeof(wol->sopass));
6391 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6393 struct bnx2 *bp = netdev_priv(dev);
6395 if (wol->wolopts & ~WAKE_MAGIC)
6398 if (wol->wolopts & WAKE_MAGIC) {
6399 if (bp->flags & BNX2_FLAG_NO_WOL)
6411 bnx2_nway_reset(struct net_device *dev)
6413 struct bnx2 *bp = netdev_priv(dev);
6416 if (!(bp->autoneg & AUTONEG_SPEED)) {
6420 spin_lock_bh(&bp->phy_lock);
6422 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6425 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6426 spin_unlock_bh(&bp->phy_lock);
6430 /* Force a link down visible on the other side */
6431 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6432 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6433 spin_unlock_bh(&bp->phy_lock);
6437 spin_lock_bh(&bp->phy_lock);
6439 bp->current_interval = SERDES_AN_TIMEOUT;
6440 bp->serdes_an_pending = 1;
6441 mod_timer(&bp->timer, jiffies + bp->current_interval);
6444 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6445 bmcr &= ~BMCR_LOOPBACK;
6446 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6448 spin_unlock_bh(&bp->phy_lock);
6454 bnx2_get_eeprom_len(struct net_device *dev)
6456 struct bnx2 *bp = netdev_priv(dev);
6458 if (bp->flash_info == NULL)
6461 return (int) bp->flash_size;
6465 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6468 struct bnx2 *bp = netdev_priv(dev);
6471 /* parameters already validated in ethtool_get_eeprom */
6473 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6479 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6482 struct bnx2 *bp = netdev_priv(dev);
6485 /* parameters already validated in ethtool_set_eeprom */
6487 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6493 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6495 struct bnx2 *bp = netdev_priv(dev);
6497 memset(coal, 0, sizeof(struct ethtool_coalesce));
6499 coal->rx_coalesce_usecs = bp->rx_ticks;
6500 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6501 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6502 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6504 coal->tx_coalesce_usecs = bp->tx_ticks;
6505 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6506 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6507 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6509 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6515 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6517 struct bnx2 *bp = netdev_priv(dev);
6519 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6520 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6522 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6523 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6525 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6526 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6528 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6529 if (bp->rx_quick_cons_trip_int > 0xff)
6530 bp->rx_quick_cons_trip_int = 0xff;
6532 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6533 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6535 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6536 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6538 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6539 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6541 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6542 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6545 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6546 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6547 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6548 bp->stats_ticks = USEC_PER_SEC;
6550 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6551 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6552 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6554 if (netif_running(bp->dev)) {
6555 bnx2_netif_stop(bp);
6556 bnx2_init_nic(bp, 0);
6557 bnx2_netif_start(bp);
6564 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6566 struct bnx2 *bp = netdev_priv(dev);
6568 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6569 ering->rx_mini_max_pending = 0;
6570 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6572 ering->rx_pending = bp->rx_ring_size;
6573 ering->rx_mini_pending = 0;
6574 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6576 ering->tx_max_pending = MAX_TX_DESC_CNT;
6577 ering->tx_pending = bp->tx_ring_size;
6581 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6583 if (netif_running(bp->dev)) {
6584 bnx2_netif_stop(bp);
6585 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6590 bnx2_set_rx_ring_size(bp, rx);
6591 bp->tx_ring_size = tx;
6593 if (netif_running(bp->dev)) {
6596 rc = bnx2_alloc_mem(bp);
6599 bnx2_init_nic(bp, 0);
6600 bnx2_netif_start(bp);
6606 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6608 struct bnx2 *bp = netdev_priv(dev);
6611 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6612 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6613 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6617 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6622 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6624 struct bnx2 *bp = netdev_priv(dev);
6626 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6627 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6628 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6632 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6634 struct bnx2 *bp = netdev_priv(dev);
6636 bp->req_flow_ctrl = 0;
6637 if (epause->rx_pause)
6638 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6639 if (epause->tx_pause)
6640 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6642 if (epause->autoneg) {
6643 bp->autoneg |= AUTONEG_FLOW_CTRL;
6646 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6649 spin_lock_bh(&bp->phy_lock);
6651 bnx2_setup_phy(bp, bp->phy_port);
6653 spin_unlock_bh(&bp->phy_lock);
6659 bnx2_get_rx_csum(struct net_device *dev)
6661 struct bnx2 *bp = netdev_priv(dev);
6667 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6669 struct bnx2 *bp = netdev_priv(dev);
6676 bnx2_set_tso(struct net_device *dev, u32 data)
6678 struct bnx2 *bp = netdev_priv(dev);
6681 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6682 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6683 dev->features |= NETIF_F_TSO6;
6685 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6690 #define BNX2_NUM_STATS 46
6693 char string[ETH_GSTRING_LEN];
6694 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6696 { "rx_error_bytes" },
6698 { "tx_error_bytes" },
6699 { "rx_ucast_packets" },
6700 { "rx_mcast_packets" },
6701 { "rx_bcast_packets" },
6702 { "tx_ucast_packets" },
6703 { "tx_mcast_packets" },
6704 { "tx_bcast_packets" },
6705 { "tx_mac_errors" },
6706 { "tx_carrier_errors" },
6707 { "rx_crc_errors" },
6708 { "rx_align_errors" },
6709 { "tx_single_collisions" },
6710 { "tx_multi_collisions" },
6712 { "tx_excess_collisions" },
6713 { "tx_late_collisions" },
6714 { "tx_total_collisions" },
6717 { "rx_undersize_packets" },
6718 { "rx_oversize_packets" },
6719 { "rx_64_byte_packets" },
6720 { "rx_65_to_127_byte_packets" },
6721 { "rx_128_to_255_byte_packets" },
6722 { "rx_256_to_511_byte_packets" },
6723 { "rx_512_to_1023_byte_packets" },
6724 { "rx_1024_to_1522_byte_packets" },
6725 { "rx_1523_to_9022_byte_packets" },
6726 { "tx_64_byte_packets" },
6727 { "tx_65_to_127_byte_packets" },
6728 { "tx_128_to_255_byte_packets" },
6729 { "tx_256_to_511_byte_packets" },
6730 { "tx_512_to_1023_byte_packets" },
6731 { "tx_1024_to_1522_byte_packets" },
6732 { "tx_1523_to_9022_byte_packets" },
6733 { "rx_xon_frames" },
6734 { "rx_xoff_frames" },
6735 { "tx_xon_frames" },
6736 { "tx_xoff_frames" },
6737 { "rx_mac_ctrl_frames" },
6738 { "rx_filtered_packets" },
6740 { "rx_fw_discards" },
6743 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6745 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6746 STATS_OFFSET32(stat_IfHCInOctets_hi),
6747 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6748 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6749 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6750 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6751 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6752 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6753 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6754 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6755 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6756 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6757 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6758 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6759 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6760 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6761 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6762 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6763 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6764 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6765 STATS_OFFSET32(stat_EtherStatsCollisions),
6766 STATS_OFFSET32(stat_EtherStatsFragments),
6767 STATS_OFFSET32(stat_EtherStatsJabbers),
6768 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6769 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6770 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6771 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6772 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6773 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6774 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6775 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6776 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6777 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6778 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6779 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6780 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6781 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6782 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6783 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6784 STATS_OFFSET32(stat_XonPauseFramesReceived),
6785 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6786 STATS_OFFSET32(stat_OutXonSent),
6787 STATS_OFFSET32(stat_OutXoffSent),
6788 STATS_OFFSET32(stat_MacControlFramesReceived),
6789 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6790 STATS_OFFSET32(stat_IfInMBUFDiscards),
6791 STATS_OFFSET32(stat_FwRxDrop),
6794 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6795 * skipped because of errata.
6797 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6798 8,0,8,8,8,8,8,8,8,8,
6799 4,0,4,4,4,4,4,4,4,4,
6800 4,4,4,4,4,4,4,4,4,4,
6801 4,4,4,4,4,4,4,4,4,4,
6805 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6806 8,0,8,8,8,8,8,8,8,8,
6807 4,4,4,4,4,4,4,4,4,4,
6808 4,4,4,4,4,4,4,4,4,4,
6809 4,4,4,4,4,4,4,4,4,4,
6813 #define BNX2_NUM_TESTS 6
6816 char string[ETH_GSTRING_LEN];
6817 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6818 { "register_test (offline)" },
6819 { "memory_test (offline)" },
6820 { "loopback_test (offline)" },
6821 { "nvram_test (online)" },
6822 { "interrupt_test (online)" },
6823 { "link_test (online)" },
6827 bnx2_get_sset_count(struct net_device *dev, int sset)
6831 return BNX2_NUM_TESTS;
6833 return BNX2_NUM_STATS;
6840 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6842 struct bnx2 *bp = netdev_priv(dev);
6844 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6845 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6848 bnx2_netif_stop(bp);
6849 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6852 if (bnx2_test_registers(bp) != 0) {
6854 etest->flags |= ETH_TEST_FL_FAILED;
6856 if (bnx2_test_memory(bp) != 0) {
6858 etest->flags |= ETH_TEST_FL_FAILED;
6860 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6861 etest->flags |= ETH_TEST_FL_FAILED;
6863 if (!netif_running(bp->dev)) {
6864 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6867 bnx2_init_nic(bp, 1);
6868 bnx2_netif_start(bp);
6871 /* wait for link up */
6872 for (i = 0; i < 7; i++) {
6875 msleep_interruptible(1000);
6879 if (bnx2_test_nvram(bp) != 0) {
6881 etest->flags |= ETH_TEST_FL_FAILED;
6883 if (bnx2_test_intr(bp) != 0) {
6885 etest->flags |= ETH_TEST_FL_FAILED;
6888 if (bnx2_test_link(bp) != 0) {
6890 etest->flags |= ETH_TEST_FL_FAILED;
6896 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6898 switch (stringset) {
6900 memcpy(buf, bnx2_stats_str_arr,
6901 sizeof(bnx2_stats_str_arr));
6904 memcpy(buf, bnx2_tests_str_arr,
6905 sizeof(bnx2_tests_str_arr));
6911 bnx2_get_ethtool_stats(struct net_device *dev,
6912 struct ethtool_stats *stats, u64 *buf)
6914 struct bnx2 *bp = netdev_priv(dev);
6916 u32 *hw_stats = (u32 *) bp->stats_blk;
6917 u8 *stats_len_arr = NULL;
6919 if (hw_stats == NULL) {
6920 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6924 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6925 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6926 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6927 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6928 stats_len_arr = bnx2_5706_stats_len_arr;
6930 stats_len_arr = bnx2_5708_stats_len_arr;
6932 for (i = 0; i < BNX2_NUM_STATS; i++) {
6933 if (stats_len_arr[i] == 0) {
6934 /* skip this counter */
6938 if (stats_len_arr[i] == 4) {
6939 /* 4-byte counter */
6941 *(hw_stats + bnx2_stats_offset_arr[i]);
6944 /* 8-byte counter */
6945 buf[i] = (((u64) *(hw_stats +
6946 bnx2_stats_offset_arr[i])) << 32) +
6947 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6952 bnx2_phys_id(struct net_device *dev, u32 data)
6954 struct bnx2 *bp = netdev_priv(dev);
6961 save = REG_RD(bp, BNX2_MISC_CFG);
6962 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6964 for (i = 0; i < (data * 2); i++) {
6966 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6969 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6970 BNX2_EMAC_LED_1000MB_OVERRIDE |
6971 BNX2_EMAC_LED_100MB_OVERRIDE |
6972 BNX2_EMAC_LED_10MB_OVERRIDE |
6973 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6974 BNX2_EMAC_LED_TRAFFIC);
6976 msleep_interruptible(500);
6977 if (signal_pending(current))
6980 REG_WR(bp, BNX2_EMAC_LED, 0);
6981 REG_WR(bp, BNX2_MISC_CFG, save);
6986 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6988 struct bnx2 *bp = netdev_priv(dev);
6990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6991 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6993 return (ethtool_op_set_tx_csum(dev, data));
6996 static const struct ethtool_ops bnx2_ethtool_ops = {
6997 .get_settings = bnx2_get_settings,
6998 .set_settings = bnx2_set_settings,
6999 .get_drvinfo = bnx2_get_drvinfo,
7000 .get_regs_len = bnx2_get_regs_len,
7001 .get_regs = bnx2_get_regs,
7002 .get_wol = bnx2_get_wol,
7003 .set_wol = bnx2_set_wol,
7004 .nway_reset = bnx2_nway_reset,
7005 .get_link = ethtool_op_get_link,
7006 .get_eeprom_len = bnx2_get_eeprom_len,
7007 .get_eeprom = bnx2_get_eeprom,
7008 .set_eeprom = bnx2_set_eeprom,
7009 .get_coalesce = bnx2_get_coalesce,
7010 .set_coalesce = bnx2_set_coalesce,
7011 .get_ringparam = bnx2_get_ringparam,
7012 .set_ringparam = bnx2_set_ringparam,
7013 .get_pauseparam = bnx2_get_pauseparam,
7014 .set_pauseparam = bnx2_set_pauseparam,
7015 .get_rx_csum = bnx2_get_rx_csum,
7016 .set_rx_csum = bnx2_set_rx_csum,
7017 .set_tx_csum = bnx2_set_tx_csum,
7018 .set_sg = ethtool_op_set_sg,
7019 .set_tso = bnx2_set_tso,
7020 .self_test = bnx2_self_test,
7021 .get_strings = bnx2_get_strings,
7022 .phys_id = bnx2_phys_id,
7023 .get_ethtool_stats = bnx2_get_ethtool_stats,
7024 .get_sset_count = bnx2_get_sset_count,
7027 /* Called with rtnl_lock */
7029 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7031 struct mii_ioctl_data *data = if_mii(ifr);
7032 struct bnx2 *bp = netdev_priv(dev);
7037 data->phy_id = bp->phy_addr;
7043 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7046 if (!netif_running(dev))
7049 spin_lock_bh(&bp->phy_lock);
7050 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7051 spin_unlock_bh(&bp->phy_lock);
7053 data->val_out = mii_regval;
7059 if (!capable(CAP_NET_ADMIN))
7062 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7065 if (!netif_running(dev))
7068 spin_lock_bh(&bp->phy_lock);
7069 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7070 spin_unlock_bh(&bp->phy_lock);
7081 /* Called with rtnl_lock */
7083 bnx2_change_mac_addr(struct net_device *dev, void *p)
7085 struct sockaddr *addr = p;
7086 struct bnx2 *bp = netdev_priv(dev);
7088 if (!is_valid_ether_addr(addr->sa_data))
7091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7092 if (netif_running(dev))
7093 bnx2_set_mac_addr(bp);
7098 /* Called with rtnl_lock */
7100 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7102 struct bnx2 *bp = netdev_priv(dev);
7104 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7105 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7109 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7112 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7114 poll_bnx2(struct net_device *dev)
7116 struct bnx2 *bp = netdev_priv(dev);
7118 disable_irq(bp->pdev->irq);
7119 bnx2_interrupt(bp->pdev->irq, dev);
7120 enable_irq(bp->pdev->irq);
7124 static void __devinit
7125 bnx2_get_5709_media(struct bnx2 *bp)
7127 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7128 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7131 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7133 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7134 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7138 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7139 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7141 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7143 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7148 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7156 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7162 static void __devinit
7163 bnx2_get_pci_speed(struct bnx2 *bp)
7167 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7168 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7171 bp->flags |= BNX2_FLAG_PCIX;
7173 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7175 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7177 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7178 bp->bus_speed_mhz = 133;
7181 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7182 bp->bus_speed_mhz = 100;
7185 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7186 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7187 bp->bus_speed_mhz = 66;
7190 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7191 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7192 bp->bus_speed_mhz = 50;
7195 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7196 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7197 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7198 bp->bus_speed_mhz = 33;
7203 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7204 bp->bus_speed_mhz = 66;
7206 bp->bus_speed_mhz = 33;
7209 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7210 bp->flags |= BNX2_FLAG_PCI_32BIT;
7214 static int __devinit
7215 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7218 unsigned long mem_len;
7221 u64 dma_mask, persist_dma_mask;
7223 SET_NETDEV_DEV(dev, &pdev->dev);
7224 bp = netdev_priv(dev);
7229 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7230 rc = pci_enable_device(pdev);
7232 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7236 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7238 "Cannot find PCI device base address, aborting.\n");
7240 goto err_out_disable;
7243 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7245 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7246 goto err_out_disable;
7249 pci_set_master(pdev);
7250 pci_save_state(pdev);
7252 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7253 if (bp->pm_cap == 0) {
7255 "Cannot find power management capability, aborting.\n");
7257 goto err_out_release;
7263 spin_lock_init(&bp->phy_lock);
7264 spin_lock_init(&bp->indirect_lock);
7265 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7267 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7268 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7269 dev->mem_end = dev->mem_start + mem_len;
7270 dev->irq = pdev->irq;
7272 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7275 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7277 goto err_out_release;
7280 /* Configure byte swap and enable write to the reg_window registers.
7281 * Rely on CPU to do target byte swapping on big endian systems
7282 * The chip's target access swapping will not swap all accesses
7284 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7285 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7286 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7288 bnx2_set_power_state(bp, PCI_D0);
7290 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7292 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7293 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7295 "Cannot find PCIE capability, aborting.\n");
7299 bp->flags |= BNX2_FLAG_PCIE;
7300 if (CHIP_REV(bp) == CHIP_REV_Ax)
7301 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7303 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7304 if (bp->pcix_cap == 0) {
7306 "Cannot find PCIX capability, aborting.\n");
7312 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7313 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7314 bp->flags |= BNX2_FLAG_MSIX_CAP;
7317 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7318 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7319 bp->flags |= BNX2_FLAG_MSI_CAP;
7322 /* 5708 cannot support DMA addresses > 40-bit. */
7323 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7324 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7326 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7328 /* Configure DMA attributes. */
7329 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7330 dev->features |= NETIF_F_HIGHDMA;
7331 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7334 "pci_set_consistent_dma_mask failed, aborting.\n");
7337 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7338 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7342 if (!(bp->flags & BNX2_FLAG_PCIE))
7343 bnx2_get_pci_speed(bp);
7345 /* 5706A0 may falsely detect SERR and PERR. */
7346 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7347 reg = REG_RD(bp, PCI_COMMAND);
7348 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7349 REG_WR(bp, PCI_COMMAND, reg);
7351 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7352 !(bp->flags & BNX2_FLAG_PCIX)) {
7355 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7359 bnx2_init_nvram(bp);
7361 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7363 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7364 BNX2_SHM_HDR_SIGNATURE_SIG) {
7365 u32 off = PCI_FUNC(pdev->devfn) << 2;
7367 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7369 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7371 /* Get the permanent MAC address. First we need to make sure the
7372 * firmware is actually running.
7374 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7376 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7377 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7378 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7383 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7384 for (i = 0, j = 0; i < 3; i++) {
7387 num = (u8) (reg >> (24 - (i * 8)));
7388 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7389 if (num >= k || !skip0 || k == 1) {
7390 bp->fw_version[j++] = (num / k) + '0';
7395 bp->fw_version[j++] = '.';
7397 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7398 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7401 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7402 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7404 for (i = 0; i < 30; i++) {
7405 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7406 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7411 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7412 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7413 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7414 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7416 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7418 bp->fw_version[j++] = ' ';
7419 for (i = 0; i < 3; i++) {
7420 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7422 memcpy(&bp->fw_version[j], ®, 4);
7427 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7428 bp->mac_addr[0] = (u8) (reg >> 8);
7429 bp->mac_addr[1] = (u8) reg;
7431 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7432 bp->mac_addr[2] = (u8) (reg >> 24);
7433 bp->mac_addr[3] = (u8) (reg >> 16);
7434 bp->mac_addr[4] = (u8) (reg >> 8);
7435 bp->mac_addr[5] = (u8) reg;
7437 bp->tx_ring_size = MAX_TX_DESC_CNT;
7438 bnx2_set_rx_ring_size(bp, 255);
7442 bp->tx_quick_cons_trip_int = 20;
7443 bp->tx_quick_cons_trip = 20;
7444 bp->tx_ticks_int = 80;
7447 bp->rx_quick_cons_trip_int = 6;
7448 bp->rx_quick_cons_trip = 6;
7449 bp->rx_ticks_int = 18;
7452 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7454 bp->timer_interval = HZ;
7455 bp->current_interval = HZ;
7459 /* Disable WOL support if we are running on a SERDES chip. */
7460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7461 bnx2_get_5709_media(bp);
7462 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7463 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7465 bp->phy_port = PORT_TP;
7466 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7467 bp->phy_port = PORT_FIBRE;
7468 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7469 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7470 bp->flags |= BNX2_FLAG_NO_WOL;
7473 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7474 /* Don't do parallel detect on this board because of
7475 * some board problems. The link will not go down
7476 * if we do parallel detect.
7478 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7479 pdev->subsystem_device == 0x310c)
7480 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7483 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7484 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7486 bnx2_init_remote_phy(bp);
7488 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7489 CHIP_NUM(bp) == CHIP_NUM_5708)
7490 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7491 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7492 (CHIP_REV(bp) == CHIP_REV_Ax ||
7493 CHIP_REV(bp) == CHIP_REV_Bx))
7494 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7496 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7497 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7498 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7499 bp->flags |= BNX2_FLAG_NO_WOL;
7503 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7504 bp->tx_quick_cons_trip_int =
7505 bp->tx_quick_cons_trip;
7506 bp->tx_ticks_int = bp->tx_ticks;
7507 bp->rx_quick_cons_trip_int =
7508 bp->rx_quick_cons_trip;
7509 bp->rx_ticks_int = bp->rx_ticks;
7510 bp->comp_prod_trip_int = bp->comp_prod_trip;
7511 bp->com_ticks_int = bp->com_ticks;
7512 bp->cmd_ticks_int = bp->cmd_ticks;
7515 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7517 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7518 * with byte enables disabled on the unused 32-bit word. This is legal
7519 * but causes problems on the AMD 8132 which will eventually stop
7520 * responding after a while.
7522 * AMD believes this incompatibility is unique to the 5706, and
7523 * prefers to locally disable MSI rather than globally disabling it.
7525 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7526 struct pci_dev *amd_8132 = NULL;
7528 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7529 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7532 if (amd_8132->revision >= 0x10 &&
7533 amd_8132->revision <= 0x13) {
7535 pci_dev_put(amd_8132);
7541 bnx2_set_default_link(bp);
7542 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7544 init_timer(&bp->timer);
7545 bp->timer.expires = RUN_AT(bp->timer_interval);
7546 bp->timer.data = (unsigned long) bp;
7547 bp->timer.function = bnx2_timer;
7553 iounmap(bp->regview);
7558 pci_release_regions(pdev);
7561 pci_disable_device(pdev);
7562 pci_set_drvdata(pdev, NULL);
7568 static char * __devinit
7569 bnx2_bus_string(struct bnx2 *bp, char *str)
7573 if (bp->flags & BNX2_FLAG_PCIE) {
7574 s += sprintf(s, "PCI Express");
7576 s += sprintf(s, "PCI");
7577 if (bp->flags & BNX2_FLAG_PCIX)
7578 s += sprintf(s, "-X");
7579 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7580 s += sprintf(s, " 32-bit");
7582 s += sprintf(s, " 64-bit");
7583 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7588 static void __devinit
7589 bnx2_init_napi(struct bnx2 *bp)
7593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7594 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7595 int (*poll)(struct napi_struct *, int);
7600 poll = bnx2_poll_msix;
7602 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7607 static int __devinit
7608 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7610 static int version_printed = 0;
7611 struct net_device *dev = NULL;
7615 DECLARE_MAC_BUF(mac);
7617 if (version_printed++ == 0)
7618 printk(KERN_INFO "%s", version);
7620 /* dev zeroed in init_etherdev */
7621 dev = alloc_etherdev(sizeof(*bp));
7626 rc = bnx2_init_board(pdev, dev);
7632 dev->open = bnx2_open;
7633 dev->hard_start_xmit = bnx2_start_xmit;
7634 dev->stop = bnx2_close;
7635 dev->get_stats = bnx2_get_stats;
7636 dev->set_multicast_list = bnx2_set_rx_mode;
7637 dev->do_ioctl = bnx2_ioctl;
7638 dev->set_mac_address = bnx2_change_mac_addr;
7639 dev->change_mtu = bnx2_change_mtu;
7640 dev->tx_timeout = bnx2_tx_timeout;
7641 dev->watchdog_timeo = TX_TIMEOUT;
7643 dev->vlan_rx_register = bnx2_vlan_rx_register;
7645 dev->ethtool_ops = &bnx2_ethtool_ops;
7647 bp = netdev_priv(dev);
7650 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7651 dev->poll_controller = poll_bnx2;
7654 pci_set_drvdata(pdev, dev);
7656 memcpy(dev->dev_addr, bp->mac_addr, 6);
7657 memcpy(dev->perm_addr, bp->mac_addr, 6);
7658 bp->name = board_info[ent->driver_data].name;
7660 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7661 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7662 dev->features |= NETIF_F_IPV6_CSUM;
7665 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7667 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7668 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7669 dev->features |= NETIF_F_TSO6;
7671 if ((rc = register_netdev(dev))) {
7672 dev_err(&pdev->dev, "Cannot register net device\n");
7674 iounmap(bp->regview);
7675 pci_release_regions(pdev);
7676 pci_disable_device(pdev);
7677 pci_set_drvdata(pdev, NULL);
7682 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7683 "IRQ %d, node addr %s\n",
7686 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7687 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7688 bnx2_bus_string(bp, str),
7690 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7695 static void __devexit
7696 bnx2_remove_one(struct pci_dev *pdev)
7698 struct net_device *dev = pci_get_drvdata(pdev);
7699 struct bnx2 *bp = netdev_priv(dev);
7701 flush_scheduled_work();
7703 unregister_netdev(dev);
7706 iounmap(bp->regview);
7709 pci_release_regions(pdev);
7710 pci_disable_device(pdev);
7711 pci_set_drvdata(pdev, NULL);
7715 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7717 struct net_device *dev = pci_get_drvdata(pdev);
7718 struct bnx2 *bp = netdev_priv(dev);
7721 /* PCI register 4 needs to be saved whether netif_running() or not.
7722 * MSI address and data need to be saved if using MSI and
7725 pci_save_state(pdev);
7726 if (!netif_running(dev))
7729 flush_scheduled_work();
7730 bnx2_netif_stop(bp);
7731 netif_device_detach(dev);
7732 del_timer_sync(&bp->timer);
7733 if (bp->flags & BNX2_FLAG_NO_WOL)
7734 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7736 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7738 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7739 bnx2_reset_chip(bp, reset_code);
7741 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7746 bnx2_resume(struct pci_dev *pdev)
7748 struct net_device *dev = pci_get_drvdata(pdev);
7749 struct bnx2 *bp = netdev_priv(dev);
7751 pci_restore_state(pdev);
7752 if (!netif_running(dev))
7755 bnx2_set_power_state(bp, PCI_D0);
7756 netif_device_attach(dev);
7757 bnx2_init_nic(bp, 1);
7758 bnx2_netif_start(bp);
7763 * bnx2_io_error_detected - called when PCI error is detected
7764 * @pdev: Pointer to PCI device
7765 * @state: The current pci connection state
7767 * This function is called after a PCI bus error affecting
7768 * this device has been detected.
7770 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7771 pci_channel_state_t state)
7773 struct net_device *dev = pci_get_drvdata(pdev);
7774 struct bnx2 *bp = netdev_priv(dev);
7777 netif_device_detach(dev);
7779 if (netif_running(dev)) {
7780 bnx2_netif_stop(bp);
7781 del_timer_sync(&bp->timer);
7782 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7785 pci_disable_device(pdev);
7788 /* Request a slot slot reset. */
7789 return PCI_ERS_RESULT_NEED_RESET;
7793 * bnx2_io_slot_reset - called after the pci bus has been reset.
7794 * @pdev: Pointer to PCI device
7796 * Restart the card from scratch, as if from a cold-boot.
7798 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7800 struct net_device *dev = pci_get_drvdata(pdev);
7801 struct bnx2 *bp = netdev_priv(dev);
7804 if (pci_enable_device(pdev)) {
7806 "Cannot re-enable PCI device after reset.\n");
7808 return PCI_ERS_RESULT_DISCONNECT;
7810 pci_set_master(pdev);
7811 pci_restore_state(pdev);
7813 if (netif_running(dev)) {
7814 bnx2_set_power_state(bp, PCI_D0);
7815 bnx2_init_nic(bp, 1);
7819 return PCI_ERS_RESULT_RECOVERED;
7823 * bnx2_io_resume - called when traffic can start flowing again.
7824 * @pdev: Pointer to PCI device
7826 * This callback is called when the error recovery driver tells us that
7827 * its OK to resume normal operation.
7829 static void bnx2_io_resume(struct pci_dev *pdev)
7831 struct net_device *dev = pci_get_drvdata(pdev);
7832 struct bnx2 *bp = netdev_priv(dev);
7835 if (netif_running(dev))
7836 bnx2_netif_start(bp);
7838 netif_device_attach(dev);
7842 static struct pci_error_handlers bnx2_err_handler = {
7843 .error_detected = bnx2_io_error_detected,
7844 .slot_reset = bnx2_io_slot_reset,
7845 .resume = bnx2_io_resume,
7848 static struct pci_driver bnx2_pci_driver = {
7849 .name = DRV_MODULE_NAME,
7850 .id_table = bnx2_pci_tbl,
7851 .probe = bnx2_init_one,
7852 .remove = __devexit_p(bnx2_remove_one),
7853 .suspend = bnx2_suspend,
7854 .resume = bnx2_resume,
7855 .err_handler = &bnx2_err_handler,
7858 static int __init bnx2_init(void)
7860 return pci_register_driver(&bnx2_pci_driver);
7863 static void __exit bnx2_cleanup(void)
7865 pci_unregister_driver(&bnx2_pci_driver);
7868 module_init(bnx2_init);
7869 module_exit(bnx2_cleanup);