c37acc1d10acccb1ecebfafca0c9af7d07e2c37f
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "2.0.1"
58 #define DRV_MODULE_RELDATE      "May 6, 2009"
59 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-4.6.16.fw"
60 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-4.6.16.fw"
61 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-4.6.17.fw"
62 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-4.6.15.fw"
63
64 #define RUN_AT(x) (jiffies + (x))
65
66 /* Time in jiffies before concluding the transmitter is hung. */
67 #define TX_TIMEOUT  (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71
72 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
74 MODULE_LICENSE("GPL");
75 MODULE_VERSION(DRV_MODULE_VERSION);
76 MODULE_FIRMWARE(FW_MIPS_FILE_06);
77 MODULE_FIRMWARE(FW_RV2P_FILE_06);
78 MODULE_FIRMWARE(FW_MIPS_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09);
80
81 static int disable_msi = 0;
82
83 module_param(disable_msi, int, 0);
84 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85
86 typedef enum {
87         BCM5706 = 0,
88         NC370T,
89         NC370I,
90         BCM5706S,
91         NC370F,
92         BCM5708,
93         BCM5708S,
94         BCM5709,
95         BCM5709S,
96         BCM5716,
97         BCM5716S,
98 } board_t;
99
100 /* indexed by board_t, above */
101 static struct {
102         char *name;
103 } board_info[] __devinitdata = {
104         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
105         { "HP NC370T Multifunction Gigabit Server Adapter" },
106         { "HP NC370i Multifunction Gigabit Server Adapter" },
107         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108         { "HP NC370F Multifunction Gigabit Server Adapter" },
109         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
110         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
112         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
115         };
116
117 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136         { PCI_VENDOR_ID_BROADCOM, 0x163b,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138         { PCI_VENDOR_ID_BROADCOM, 0x163c,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140         { 0, }
141 };
142
143 static struct flash_spec flash_table[] =
144 {
145 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
147         /* Slow EEPROM */
148         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151          "EEPROM - slow"},
152         /* Expansion entry 0001 */
153         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156          "Entry 0001"},
157         /* Saifun SA25F010 (non-buffered flash) */
158         /* strap, cfg1, & write1 need updates */
159         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162          "Non-buffered flash (128kB)"},
163         /* Saifun SA25F020 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168          "Non-buffered flash (256kB)"},
169         /* Expansion entry 0100 */
170         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173          "Entry 0100"},
174         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
179         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
180         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
184         /* Saifun SA25F005 (non-buffered flash) */
185         /* strap, cfg1, & write1 need updates */
186         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189          "Non-buffered flash (64kB)"},
190         /* Fast EEPROM */
191         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194          "EEPROM - fast"},
195         /* Expansion entry 1001 */
196         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199          "Entry 1001"},
200         /* Expansion entry 1010 */
201         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1010"},
205         /* ATMEL AT45DB011B (buffered flash) */
206         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209          "Buffered flash (128kB)"},
210         /* Expansion entry 1100 */
211         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1100"},
215         /* Expansion entry 1101 */
216         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1101"},
220         /* Ateml Expansion entry 1110 */
221         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224          "Entry 1110 (Atmel)"},
225         /* ATMEL AT45DB021B (buffered flash) */
226         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229          "Buffered flash (256kB)"},
230 };
231
232 static struct flash_spec flash_5709 = {
233         .flags          = BNX2_NV_BUFFERED,
234         .page_bits      = BCM5709_FLASH_PAGE_BITS,
235         .page_size      = BCM5709_FLASH_PAGE_SIZE,
236         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
237         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
238         .name           = "5709 Buffered flash (256kB)",
239 };
240
241 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242
243 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
244 {
245         u32 diff;
246
247         smp_mb();
248
249         /* The ring uses 256 indices for 255 entries, one of them
250          * needs to be skipped.
251          */
252         diff = txr->tx_prod - txr->tx_cons;
253         if (unlikely(diff >= TX_DESC_CNT)) {
254                 diff &= 0xffff;
255                 if (diff == TX_DESC_CNT)
256                         diff = MAX_TX_DESC_CNT;
257         }
258         return (bp->tx_ring_size - diff);
259 }
260
261 static u32
262 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
263 {
264         u32 val;
265
266         spin_lock_bh(&bp->indirect_lock);
267         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
269         spin_unlock_bh(&bp->indirect_lock);
270         return val;
271 }
272
273 static void
274 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
275 {
276         spin_lock_bh(&bp->indirect_lock);
277         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
279         spin_unlock_bh(&bp->indirect_lock);
280 }
281
282 static void
283 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
286 }
287
288 static u32
289 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
290 {
291         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
292 }
293
294 static void
295 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
296 {
297         offset += cid_addr;
298         spin_lock_bh(&bp->indirect_lock);
299         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
300                 int i;
301
302                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
303                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
304                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
305                 for (i = 0; i < 5; i++) {
306                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
307                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
308                                 break;
309                         udelay(5);
310                 }
311         } else {
312                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
313                 REG_WR(bp, BNX2_CTX_DATA, val);
314         }
315         spin_unlock_bh(&bp->indirect_lock);
316 }
317
318 static int
319 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
320 {
321         u32 val1;
322         int i, ret;
323
324         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         val1 = (bp->phy_addr << 21) | (reg << 16) |
335                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
336                 BNX2_EMAC_MDIO_COMM_START_BUSY;
337         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
338
339         for (i = 0; i < 50; i++) {
340                 udelay(10);
341
342                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
343                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
344                         udelay(5);
345
346                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
347                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
348
349                         break;
350                 }
351         }
352
353         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
354                 *val = 0x0;
355                 ret = -EBUSY;
356         }
357         else {
358                 *val = val1;
359                 ret = 0;
360         }
361
362         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
363                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
365
366                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
367                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
368
369                 udelay(40);
370         }
371
372         return ret;
373 }
374
375 static int
376 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
377 {
378         u32 val1;
379         int i, ret;
380
381         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
382                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
383                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
384
385                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
386                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387
388                 udelay(40);
389         }
390
391         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
392                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
393                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
394         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
395
396         for (i = 0; i < 50; i++) {
397                 udelay(10);
398
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
400                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
401                         udelay(5);
402                         break;
403                 }
404         }
405
406         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
407                 ret = -EBUSY;
408         else
409                 ret = 0;
410
411         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
412                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
413                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
414
415                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
416                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
417
418                 udelay(40);
419         }
420
421         return ret;
422 }
423
424 static void
425 bnx2_disable_int(struct bnx2 *bp)
426 {
427         int i;
428         struct bnx2_napi *bnapi;
429
430         for (i = 0; i < bp->irq_nvecs; i++) {
431                 bnapi = &bp->bnx2_napi[i];
432                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
433                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
434         }
435         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
436 }
437
438 static void
439 bnx2_enable_int(struct bnx2 *bp)
440 {
441         int i;
442         struct bnx2_napi *bnapi;
443
444         for (i = 0; i < bp->irq_nvecs; i++) {
445                 bnapi = &bp->bnx2_napi[i];
446
447                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
450                        bnapi->last_status_idx);
451
452                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
453                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
454                        bnapi->last_status_idx);
455         }
456         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
457 }
458
459 static void
460 bnx2_disable_int_sync(struct bnx2 *bp)
461 {
462         int i;
463
464         atomic_inc(&bp->intr_sem);
465         bnx2_disable_int(bp);
466         for (i = 0; i < bp->irq_nvecs; i++)
467                 synchronize_irq(bp->irq_tbl[i].vector);
468 }
469
470 static void
471 bnx2_napi_disable(struct bnx2 *bp)
472 {
473         int i;
474
475         for (i = 0; i < bp->irq_nvecs; i++)
476                 napi_disable(&bp->bnx2_napi[i].napi);
477 }
478
479 static void
480 bnx2_napi_enable(struct bnx2 *bp)
481 {
482         int i;
483
484         for (i = 0; i < bp->irq_nvecs; i++)
485                 napi_enable(&bp->bnx2_napi[i].napi);
486 }
487
488 static void
489 bnx2_netif_stop(struct bnx2 *bp)
490 {
491         bnx2_disable_int_sync(bp);
492         if (netif_running(bp->dev)) {
493                 bnx2_napi_disable(bp);
494                 netif_tx_disable(bp->dev);
495                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
496         }
497 }
498
499 static void
500 bnx2_netif_start(struct bnx2 *bp)
501 {
502         if (atomic_dec_and_test(&bp->intr_sem)) {
503                 if (netif_running(bp->dev)) {
504                         netif_tx_wake_all_queues(bp->dev);
505                         bnx2_napi_enable(bp);
506                         bnx2_enable_int(bp);
507                 }
508         }
509 }
510
511 static void
512 bnx2_free_tx_mem(struct bnx2 *bp)
513 {
514         int i;
515
516         for (i = 0; i < bp->num_tx_rings; i++) {
517                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
518                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
519
520                 if (txr->tx_desc_ring) {
521                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
522                                             txr->tx_desc_ring,
523                                             txr->tx_desc_mapping);
524                         txr->tx_desc_ring = NULL;
525                 }
526                 kfree(txr->tx_buf_ring);
527                 txr->tx_buf_ring = NULL;
528         }
529 }
530
531 static void
532 bnx2_free_rx_mem(struct bnx2 *bp)
533 {
534         int i;
535
536         for (i = 0; i < bp->num_rx_rings; i++) {
537                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
538                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
539                 int j;
540
541                 for (j = 0; j < bp->rx_max_ring; j++) {
542                         if (rxr->rx_desc_ring[j])
543                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
544                                                     rxr->rx_desc_ring[j],
545                                                     rxr->rx_desc_mapping[j]);
546                         rxr->rx_desc_ring[j] = NULL;
547                 }
548                 if (rxr->rx_buf_ring)
549                         vfree(rxr->rx_buf_ring);
550                 rxr->rx_buf_ring = NULL;
551
552                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
553                         if (rxr->rx_pg_desc_ring[j])
554                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
555                                                     rxr->rx_pg_desc_ring[j],
556                                                     rxr->rx_pg_desc_mapping[j]);
557                         rxr->rx_pg_desc_ring[j] = NULL;
558                 }
559                 if (rxr->rx_pg_ring)
560                         vfree(rxr->rx_pg_ring);
561                 rxr->rx_pg_ring = NULL;
562         }
563 }
564
565 static int
566 bnx2_alloc_tx_mem(struct bnx2 *bp)
567 {
568         int i;
569
570         for (i = 0; i < bp->num_tx_rings; i++) {
571                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
572                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
573
574                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
575                 if (txr->tx_buf_ring == NULL)
576                         return -ENOMEM;
577
578                 txr->tx_desc_ring =
579                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
580                                              &txr->tx_desc_mapping);
581                 if (txr->tx_desc_ring == NULL)
582                         return -ENOMEM;
583         }
584         return 0;
585 }
586
587 static int
588 bnx2_alloc_rx_mem(struct bnx2 *bp)
589 {
590         int i;
591
592         for (i = 0; i < bp->num_rx_rings; i++) {
593                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
594                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
595                 int j;
596
597                 rxr->rx_buf_ring =
598                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
599                 if (rxr->rx_buf_ring == NULL)
600                         return -ENOMEM;
601
602                 memset(rxr->rx_buf_ring, 0,
603                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
604
605                 for (j = 0; j < bp->rx_max_ring; j++) {
606                         rxr->rx_desc_ring[j] =
607                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
608                                                      &rxr->rx_desc_mapping[j]);
609                         if (rxr->rx_desc_ring[j] == NULL)
610                                 return -ENOMEM;
611
612                 }
613
614                 if (bp->rx_pg_ring_size) {
615                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
616                                                   bp->rx_max_pg_ring);
617                         if (rxr->rx_pg_ring == NULL)
618                                 return -ENOMEM;
619
620                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
621                                bp->rx_max_pg_ring);
622                 }
623
624                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
625                         rxr->rx_pg_desc_ring[j] =
626                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
627                                                 &rxr->rx_pg_desc_mapping[j]);
628                         if (rxr->rx_pg_desc_ring[j] == NULL)
629                                 return -ENOMEM;
630
631                 }
632         }
633         return 0;
634 }
635
636 static void
637 bnx2_free_mem(struct bnx2 *bp)
638 {
639         int i;
640         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
641
642         bnx2_free_tx_mem(bp);
643         bnx2_free_rx_mem(bp);
644
645         for (i = 0; i < bp->ctx_pages; i++) {
646                 if (bp->ctx_blk[i]) {
647                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
648                                             bp->ctx_blk[i],
649                                             bp->ctx_blk_mapping[i]);
650                         bp->ctx_blk[i] = NULL;
651                 }
652         }
653         if (bnapi->status_blk.msi) {
654                 pci_free_consistent(bp->pdev, bp->status_stats_size,
655                                     bnapi->status_blk.msi,
656                                     bp->status_blk_mapping);
657                 bnapi->status_blk.msi = NULL;
658                 bp->stats_blk = NULL;
659         }
660 }
661
662 static int
663 bnx2_alloc_mem(struct bnx2 *bp)
664 {
665         int i, status_blk_size, err;
666         struct bnx2_napi *bnapi;
667         void *status_blk;
668
669         /* Combine status and statistics blocks into one allocation. */
670         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
671         if (bp->flags & BNX2_FLAG_MSIX_CAP)
672                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
673                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
674         bp->status_stats_size = status_blk_size +
675                                 sizeof(struct statistics_block);
676
677         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
678                                           &bp->status_blk_mapping);
679         if (status_blk == NULL)
680                 goto alloc_mem_err;
681
682         memset(status_blk, 0, bp->status_stats_size);
683
684         bnapi = &bp->bnx2_napi[0];
685         bnapi->status_blk.msi = status_blk;
686         bnapi->hw_tx_cons_ptr =
687                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
688         bnapi->hw_rx_cons_ptr =
689                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
690         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
691                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
692                         struct status_block_msix *sblk;
693
694                         bnapi = &bp->bnx2_napi[i];
695
696                         sblk = (void *) (status_blk +
697                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
698                         bnapi->status_blk.msix = sblk;
699                         bnapi->hw_tx_cons_ptr =
700                                 &sblk->status_tx_quick_consumer_index;
701                         bnapi->hw_rx_cons_ptr =
702                                 &sblk->status_rx_quick_consumer_index;
703                         bnapi->int_num = i << 24;
704                 }
705         }
706
707         bp->stats_blk = status_blk + status_blk_size;
708
709         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
710
711         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
712                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
713                 if (bp->ctx_pages == 0)
714                         bp->ctx_pages = 1;
715                 for (i = 0; i < bp->ctx_pages; i++) {
716                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
717                                                 BCM_PAGE_SIZE,
718                                                 &bp->ctx_blk_mapping[i]);
719                         if (bp->ctx_blk[i] == NULL)
720                                 goto alloc_mem_err;
721                 }
722         }
723
724         err = bnx2_alloc_rx_mem(bp);
725         if (err)
726                 goto alloc_mem_err;
727
728         err = bnx2_alloc_tx_mem(bp);
729         if (err)
730                 goto alloc_mem_err;
731
732         return 0;
733
734 alloc_mem_err:
735         bnx2_free_mem(bp);
736         return -ENOMEM;
737 }
738
739 static void
740 bnx2_report_fw_link(struct bnx2 *bp)
741 {
742         u32 fw_link_status = 0;
743
744         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
745                 return;
746
747         if (bp->link_up) {
748                 u32 bmsr;
749
750                 switch (bp->line_speed) {
751                 case SPEED_10:
752                         if (bp->duplex == DUPLEX_HALF)
753                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
754                         else
755                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
756                         break;
757                 case SPEED_100:
758                         if (bp->duplex == DUPLEX_HALF)
759                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
760                         else
761                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
762                         break;
763                 case SPEED_1000:
764                         if (bp->duplex == DUPLEX_HALF)
765                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
766                         else
767                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
768                         break;
769                 case SPEED_2500:
770                         if (bp->duplex == DUPLEX_HALF)
771                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
772                         else
773                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
774                         break;
775                 }
776
777                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
778
779                 if (bp->autoneg) {
780                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
781
782                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
783                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
784
785                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
786                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
787                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
788                         else
789                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
790                 }
791         }
792         else
793                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
794
795         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
796 }
797
798 static char *
799 bnx2_xceiver_str(struct bnx2 *bp)
800 {
801         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
802                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
803                  "Copper"));
804 }
805
806 static void
807 bnx2_report_link(struct bnx2 *bp)
808 {
809         if (bp->link_up) {
810                 netif_carrier_on(bp->dev);
811                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
812                        bnx2_xceiver_str(bp));
813
814                 printk("%d Mbps ", bp->line_speed);
815
816                 if (bp->duplex == DUPLEX_FULL)
817                         printk("full duplex");
818                 else
819                         printk("half duplex");
820
821                 if (bp->flow_ctrl) {
822                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
823                                 printk(", receive ");
824                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
825                                         printk("& transmit ");
826                         }
827                         else {
828                                 printk(", transmit ");
829                         }
830                         printk("flow control ON");
831                 }
832                 printk("\n");
833         }
834         else {
835                 netif_carrier_off(bp->dev);
836                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
837                        bnx2_xceiver_str(bp));
838         }
839
840         bnx2_report_fw_link(bp);
841 }
842
843 static void
844 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
845 {
846         u32 local_adv, remote_adv;
847
848         bp->flow_ctrl = 0;
849         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
850                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
851
852                 if (bp->duplex == DUPLEX_FULL) {
853                         bp->flow_ctrl = bp->req_flow_ctrl;
854                 }
855                 return;
856         }
857
858         if (bp->duplex != DUPLEX_FULL) {
859                 return;
860         }
861
862         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
863             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
864                 u32 val;
865
866                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
867                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
868                         bp->flow_ctrl |= FLOW_CTRL_TX;
869                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
870                         bp->flow_ctrl |= FLOW_CTRL_RX;
871                 return;
872         }
873
874         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
875         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
876
877         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
878                 u32 new_local_adv = 0;
879                 u32 new_remote_adv = 0;
880
881                 if (local_adv & ADVERTISE_1000XPAUSE)
882                         new_local_adv |= ADVERTISE_PAUSE_CAP;
883                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
884                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
885                 if (remote_adv & ADVERTISE_1000XPAUSE)
886                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
887                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
888                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
889
890                 local_adv = new_local_adv;
891                 remote_adv = new_remote_adv;
892         }
893
894         /* See Table 28B-3 of 802.3ab-1999 spec. */
895         if (local_adv & ADVERTISE_PAUSE_CAP) {
896                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
897                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
898                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
899                         }
900                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
901                                 bp->flow_ctrl = FLOW_CTRL_RX;
902                         }
903                 }
904                 else {
905                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
906                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
907                         }
908                 }
909         }
910         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
911                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
912                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
913
914                         bp->flow_ctrl = FLOW_CTRL_TX;
915                 }
916         }
917 }
918
919 static int
920 bnx2_5709s_linkup(struct bnx2 *bp)
921 {
922         u32 val, speed;
923
924         bp->link_up = 1;
925
926         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
927         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
928         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
929
930         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
931                 bp->line_speed = bp->req_line_speed;
932                 bp->duplex = bp->req_duplex;
933                 return 0;
934         }
935         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
936         switch (speed) {
937                 case MII_BNX2_GP_TOP_AN_SPEED_10:
938                         bp->line_speed = SPEED_10;
939                         break;
940                 case MII_BNX2_GP_TOP_AN_SPEED_100:
941                         bp->line_speed = SPEED_100;
942                         break;
943                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
944                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
945                         bp->line_speed = SPEED_1000;
946                         break;
947                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
948                         bp->line_speed = SPEED_2500;
949                         break;
950         }
951         if (val & MII_BNX2_GP_TOP_AN_FD)
952                 bp->duplex = DUPLEX_FULL;
953         else
954                 bp->duplex = DUPLEX_HALF;
955         return 0;
956 }
957
958 static int
959 bnx2_5708s_linkup(struct bnx2 *bp)
960 {
961         u32 val;
962
963         bp->link_up = 1;
964         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
965         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
966                 case BCM5708S_1000X_STAT1_SPEED_10:
967                         bp->line_speed = SPEED_10;
968                         break;
969                 case BCM5708S_1000X_STAT1_SPEED_100:
970                         bp->line_speed = SPEED_100;
971                         break;
972                 case BCM5708S_1000X_STAT1_SPEED_1G:
973                         bp->line_speed = SPEED_1000;
974                         break;
975                 case BCM5708S_1000X_STAT1_SPEED_2G5:
976                         bp->line_speed = SPEED_2500;
977                         break;
978         }
979         if (val & BCM5708S_1000X_STAT1_FD)
980                 bp->duplex = DUPLEX_FULL;
981         else
982                 bp->duplex = DUPLEX_HALF;
983
984         return 0;
985 }
986
987 static int
988 bnx2_5706s_linkup(struct bnx2 *bp)
989 {
990         u32 bmcr, local_adv, remote_adv, common;
991
992         bp->link_up = 1;
993         bp->line_speed = SPEED_1000;
994
995         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
996         if (bmcr & BMCR_FULLDPLX) {
997                 bp->duplex = DUPLEX_FULL;
998         }
999         else {
1000                 bp->duplex = DUPLEX_HALF;
1001         }
1002
1003         if (!(bmcr & BMCR_ANENABLE)) {
1004                 return 0;
1005         }
1006
1007         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1008         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1009
1010         common = local_adv & remote_adv;
1011         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1012
1013                 if (common & ADVERTISE_1000XFULL) {
1014                         bp->duplex = DUPLEX_FULL;
1015                 }
1016                 else {
1017                         bp->duplex = DUPLEX_HALF;
1018                 }
1019         }
1020
1021         return 0;
1022 }
1023
1024 static int
1025 bnx2_copper_linkup(struct bnx2 *bp)
1026 {
1027         u32 bmcr;
1028
1029         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1030         if (bmcr & BMCR_ANENABLE) {
1031                 u32 local_adv, remote_adv, common;
1032
1033                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1034                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1035
1036                 common = local_adv & (remote_adv >> 2);
1037                 if (common & ADVERTISE_1000FULL) {
1038                         bp->line_speed = SPEED_1000;
1039                         bp->duplex = DUPLEX_FULL;
1040                 }
1041                 else if (common & ADVERTISE_1000HALF) {
1042                         bp->line_speed = SPEED_1000;
1043                         bp->duplex = DUPLEX_HALF;
1044                 }
1045                 else {
1046                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048
1049                         common = local_adv & remote_adv;
1050                         if (common & ADVERTISE_100FULL) {
1051                                 bp->line_speed = SPEED_100;
1052                                 bp->duplex = DUPLEX_FULL;
1053                         }
1054                         else if (common & ADVERTISE_100HALF) {
1055                                 bp->line_speed = SPEED_100;
1056                                 bp->duplex = DUPLEX_HALF;
1057                         }
1058                         else if (common & ADVERTISE_10FULL) {
1059                                 bp->line_speed = SPEED_10;
1060                                 bp->duplex = DUPLEX_FULL;
1061                         }
1062                         else if (common & ADVERTISE_10HALF) {
1063                                 bp->line_speed = SPEED_10;
1064                                 bp->duplex = DUPLEX_HALF;
1065                         }
1066                         else {
1067                                 bp->line_speed = 0;
1068                                 bp->link_up = 0;
1069                         }
1070                 }
1071         }
1072         else {
1073                 if (bmcr & BMCR_SPEED100) {
1074                         bp->line_speed = SPEED_100;
1075                 }
1076                 else {
1077                         bp->line_speed = SPEED_10;
1078                 }
1079                 if (bmcr & BMCR_FULLDPLX) {
1080                         bp->duplex = DUPLEX_FULL;
1081                 }
1082                 else {
1083                         bp->duplex = DUPLEX_HALF;
1084                 }
1085         }
1086
1087         return 0;
1088 }
1089
1090 static void
1091 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1092 {
1093         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1094
1095         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1096         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1097         val |= 0x02 << 8;
1098
1099         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1100                 u32 lo_water, hi_water;
1101
1102                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1103                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1104                 else
1105                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1106                 if (lo_water >= bp->rx_ring_size)
1107                         lo_water = 0;
1108
1109                 hi_water = bp->rx_ring_size / 4;
1110
1111                 if (hi_water <= lo_water)
1112                         lo_water = 0;
1113
1114                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1115                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1116
1117                 if (hi_water > 0xf)
1118                         hi_water = 0xf;
1119                 else if (hi_water == 0)
1120                         lo_water = 0;
1121                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1122         }
1123         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1124 }
1125
1126 static void
1127 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1128 {
1129         int i;
1130         u32 cid;
1131
1132         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1133                 if (i == 1)
1134                         cid = RX_RSS_CID;
1135                 bnx2_init_rx_context(bp, cid);
1136         }
1137 }
1138
1139 static void
1140 bnx2_set_mac_link(struct bnx2 *bp)
1141 {
1142         u32 val;
1143
1144         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1145         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1146                 (bp->duplex == DUPLEX_HALF)) {
1147                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1148         }
1149
1150         /* Configure the EMAC mode register. */
1151         val = REG_RD(bp, BNX2_EMAC_MODE);
1152
1153         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1154                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1155                 BNX2_EMAC_MODE_25G_MODE);
1156
1157         if (bp->link_up) {
1158                 switch (bp->line_speed) {
1159                         case SPEED_10:
1160                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1161                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1162                                         break;
1163                                 }
1164                                 /* fall through */
1165                         case SPEED_100:
1166                                 val |= BNX2_EMAC_MODE_PORT_MII;
1167                                 break;
1168                         case SPEED_2500:
1169                                 val |= BNX2_EMAC_MODE_25G_MODE;
1170                                 /* fall through */
1171                         case SPEED_1000:
1172                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1173                                 break;
1174                 }
1175         }
1176         else {
1177                 val |= BNX2_EMAC_MODE_PORT_GMII;
1178         }
1179
1180         /* Set the MAC to operate in the appropriate duplex mode. */
1181         if (bp->duplex == DUPLEX_HALF)
1182                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1183         REG_WR(bp, BNX2_EMAC_MODE, val);
1184
1185         /* Enable/disable rx PAUSE. */
1186         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1187
1188         if (bp->flow_ctrl & FLOW_CTRL_RX)
1189                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1190         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1191
1192         /* Enable/disable tx PAUSE. */
1193         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1194         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1195
1196         if (bp->flow_ctrl & FLOW_CTRL_TX)
1197                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1198         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1199
1200         /* Acknowledge the interrupt. */
1201         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1202
1203         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1204                 bnx2_init_all_rx_contexts(bp);
1205 }
1206
1207 static void
1208 bnx2_enable_bmsr1(struct bnx2 *bp)
1209 {
1210         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1211             (CHIP_NUM(bp) == CHIP_NUM_5709))
1212                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1213                                MII_BNX2_BLK_ADDR_GP_STATUS);
1214 }
1215
1216 static void
1217 bnx2_disable_bmsr1(struct bnx2 *bp)
1218 {
1219         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1220             (CHIP_NUM(bp) == CHIP_NUM_5709))
1221                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1222                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1223 }
1224
1225 static int
1226 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1227 {
1228         u32 up1;
1229         int ret = 1;
1230
1231         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1232                 return 0;
1233
1234         if (bp->autoneg & AUTONEG_SPEED)
1235                 bp->advertising |= ADVERTISED_2500baseX_Full;
1236
1237         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1238                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1239
1240         bnx2_read_phy(bp, bp->mii_up1, &up1);
1241         if (!(up1 & BCM5708S_UP1_2G5)) {
1242                 up1 |= BCM5708S_UP1_2G5;
1243                 bnx2_write_phy(bp, bp->mii_up1, up1);
1244                 ret = 0;
1245         }
1246
1247         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1248                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1249                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1250
1251         return ret;
1252 }
1253
1254 static int
1255 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1256 {
1257         u32 up1;
1258         int ret = 0;
1259
1260         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1261                 return 0;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1264                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1265
1266         bnx2_read_phy(bp, bp->mii_up1, &up1);
1267         if (up1 & BCM5708S_UP1_2G5) {
1268                 up1 &= ~BCM5708S_UP1_2G5;
1269                 bnx2_write_phy(bp, bp->mii_up1, up1);
1270                 ret = 1;
1271         }
1272
1273         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1274                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1275                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1276
1277         return ret;
1278 }
1279
1280 static void
1281 bnx2_enable_forced_2g5(struct bnx2 *bp)
1282 {
1283         u32 bmcr;
1284
1285         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1286                 return;
1287
1288         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1289                 u32 val;
1290
1291                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1293                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1294                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1295                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1296                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1297
1298                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1299                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1300                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1301
1302         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1303                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1304                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1305         }
1306
1307         if (bp->autoneg & AUTONEG_SPEED) {
1308                 bmcr &= ~BMCR_ANENABLE;
1309                 if (bp->req_duplex == DUPLEX_FULL)
1310                         bmcr |= BMCR_FULLDPLX;
1311         }
1312         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1313 }
1314
1315 static void
1316 bnx2_disable_forced_2g5(struct bnx2 *bp)
1317 {
1318         u32 bmcr;
1319
1320         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1321                 return;
1322
1323         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1324                 u32 val;
1325
1326                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1327                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1328                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1329                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1330                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1331
1332                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1333                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1334                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1335
1336         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1338                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1339         }
1340
1341         if (bp->autoneg & AUTONEG_SPEED)
1342                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1343         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1344 }
1345
1346 static void
1347 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1348 {
1349         u32 val;
1350
1351         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1352         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1353         if (start)
1354                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1355         else
1356                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1357 }
1358
1359 static int
1360 bnx2_set_link(struct bnx2 *bp)
1361 {
1362         u32 bmsr;
1363         u8 link_up;
1364
1365         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1366                 bp->link_up = 1;
1367                 return 0;
1368         }
1369
1370         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1371                 return 0;
1372
1373         link_up = bp->link_up;
1374
1375         bnx2_enable_bmsr1(bp);
1376         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1377         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378         bnx2_disable_bmsr1(bp);
1379
1380         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1381             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1382                 u32 val, an_dbg;
1383
1384                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1385                         bnx2_5706s_force_link_dn(bp, 0);
1386                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1387                 }
1388                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1389
1390                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1391                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1392                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1393
1394                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1395                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1396                         bmsr |= BMSR_LSTATUS;
1397                 else
1398                         bmsr &= ~BMSR_LSTATUS;
1399         }
1400
1401         if (bmsr & BMSR_LSTATUS) {
1402                 bp->link_up = 1;
1403
1404                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1405                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1406                                 bnx2_5706s_linkup(bp);
1407                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1408                                 bnx2_5708s_linkup(bp);
1409                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410                                 bnx2_5709s_linkup(bp);
1411                 }
1412                 else {
1413                         bnx2_copper_linkup(bp);
1414                 }
1415                 bnx2_resolve_flow_ctrl(bp);
1416         }
1417         else {
1418                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1419                     (bp->autoneg & AUTONEG_SPEED))
1420                         bnx2_disable_forced_2g5(bp);
1421
1422                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1423                         u32 bmcr;
1424
1425                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1426                         bmcr |= BMCR_ANENABLE;
1427                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1428
1429                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1430                 }
1431                 bp->link_up = 0;
1432         }
1433
1434         if (bp->link_up != link_up) {
1435                 bnx2_report_link(bp);
1436         }
1437
1438         bnx2_set_mac_link(bp);
1439
1440         return 0;
1441 }
1442
1443 static int
1444 bnx2_reset_phy(struct bnx2 *bp)
1445 {
1446         int i;
1447         u32 reg;
1448
1449         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1450
1451 #define PHY_RESET_MAX_WAIT 100
1452         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1453                 udelay(10);
1454
1455                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1456                 if (!(reg & BMCR_RESET)) {
1457                         udelay(20);
1458                         break;
1459                 }
1460         }
1461         if (i == PHY_RESET_MAX_WAIT) {
1462                 return -EBUSY;
1463         }
1464         return 0;
1465 }
1466
1467 static u32
1468 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1469 {
1470         u32 adv = 0;
1471
1472         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1473                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1474
1475                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1476                         adv = ADVERTISE_1000XPAUSE;
1477                 }
1478                 else {
1479                         adv = ADVERTISE_PAUSE_CAP;
1480                 }
1481         }
1482         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1483                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1484                         adv = ADVERTISE_1000XPSE_ASYM;
1485                 }
1486                 else {
1487                         adv = ADVERTISE_PAUSE_ASYM;
1488                 }
1489         }
1490         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1491                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1492                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1493                 }
1494                 else {
1495                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1496                 }
1497         }
1498         return adv;
1499 }
1500
1501 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1502
1503 static int
1504 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1505 __releases(&bp->phy_lock)
1506 __acquires(&bp->phy_lock)
1507 {
1508         u32 speed_arg = 0, pause_adv;
1509
1510         pause_adv = bnx2_phy_get_pause_adv(bp);
1511
1512         if (bp->autoneg & AUTONEG_SPEED) {
1513                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1514                 if (bp->advertising & ADVERTISED_10baseT_Half)
1515                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1516                 if (bp->advertising & ADVERTISED_10baseT_Full)
1517                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1518                 if (bp->advertising & ADVERTISED_100baseT_Half)
1519                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1520                 if (bp->advertising & ADVERTISED_100baseT_Full)
1521                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1522                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1523                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1525                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1526         } else {
1527                 if (bp->req_line_speed == SPEED_2500)
1528                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1529                 else if (bp->req_line_speed == SPEED_1000)
1530                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1531                 else if (bp->req_line_speed == SPEED_100) {
1532                         if (bp->req_duplex == DUPLEX_FULL)
1533                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1534                         else
1535                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1536                 } else if (bp->req_line_speed == SPEED_10) {
1537                         if (bp->req_duplex == DUPLEX_FULL)
1538                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1539                         else
1540                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1541                 }
1542         }
1543
1544         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1545                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1546         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1547                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1548
1549         if (port == PORT_TP)
1550                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1551                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1552
1553         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1554
1555         spin_unlock_bh(&bp->phy_lock);
1556         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1557         spin_lock_bh(&bp->phy_lock);
1558
1559         return 0;
1560 }
1561
1562 static int
1563 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1564 __releases(&bp->phy_lock)
1565 __acquires(&bp->phy_lock)
1566 {
1567         u32 adv, bmcr;
1568         u32 new_adv = 0;
1569
1570         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1571                 return (bnx2_setup_remote_phy(bp, port));
1572
1573         if (!(bp->autoneg & AUTONEG_SPEED)) {
1574                 u32 new_bmcr;
1575                 int force_link_down = 0;
1576
1577                 if (bp->req_line_speed == SPEED_2500) {
1578                         if (!bnx2_test_and_enable_2g5(bp))
1579                                 force_link_down = 1;
1580                 } else if (bp->req_line_speed == SPEED_1000) {
1581                         if (bnx2_test_and_disable_2g5(bp))
1582                                 force_link_down = 1;
1583                 }
1584                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1585                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1586
1587                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1589                 new_bmcr |= BMCR_SPEED1000;
1590
1591                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1592                         if (bp->req_line_speed == SPEED_2500)
1593                                 bnx2_enable_forced_2g5(bp);
1594                         else if (bp->req_line_speed == SPEED_1000) {
1595                                 bnx2_disable_forced_2g5(bp);
1596                                 new_bmcr &= ~0x2000;
1597                         }
1598
1599                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1600                         if (bp->req_line_speed == SPEED_2500)
1601                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1602                         else
1603                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1604                 }
1605
1606                 if (bp->req_duplex == DUPLEX_FULL) {
1607                         adv |= ADVERTISE_1000XFULL;
1608                         new_bmcr |= BMCR_FULLDPLX;
1609                 }
1610                 else {
1611                         adv |= ADVERTISE_1000XHALF;
1612                         new_bmcr &= ~BMCR_FULLDPLX;
1613                 }
1614                 if ((new_bmcr != bmcr) || (force_link_down)) {
1615                         /* Force a link down visible on the other side */
1616                         if (bp->link_up) {
1617                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1618                                                ~(ADVERTISE_1000XFULL |
1619                                                  ADVERTISE_1000XHALF));
1620                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1621                                         BMCR_ANRESTART | BMCR_ANENABLE);
1622
1623                                 bp->link_up = 0;
1624                                 netif_carrier_off(bp->dev);
1625                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1626                                 bnx2_report_link(bp);
1627                         }
1628                         bnx2_write_phy(bp, bp->mii_adv, adv);
1629                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1630                 } else {
1631                         bnx2_resolve_flow_ctrl(bp);
1632                         bnx2_set_mac_link(bp);
1633                 }
1634                 return 0;
1635         }
1636
1637         bnx2_test_and_enable_2g5(bp);
1638
1639         if (bp->advertising & ADVERTISED_1000baseT_Full)
1640                 new_adv |= ADVERTISE_1000XFULL;
1641
1642         new_adv |= bnx2_phy_get_pause_adv(bp);
1643
1644         bnx2_read_phy(bp, bp->mii_adv, &adv);
1645         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1646
1647         bp->serdes_an_pending = 0;
1648         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1649                 /* Force a link down visible on the other side */
1650                 if (bp->link_up) {
1651                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1652                         spin_unlock_bh(&bp->phy_lock);
1653                         msleep(20);
1654                         spin_lock_bh(&bp->phy_lock);
1655                 }
1656
1657                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1658                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1659                         BMCR_ANENABLE);
1660                 /* Speed up link-up time when the link partner
1661                  * does not autonegotiate which is very common
1662                  * in blade servers. Some blade servers use
1663                  * IPMI for kerboard input and it's important
1664                  * to minimize link disruptions. Autoneg. involves
1665                  * exchanging base pages plus 3 next pages and
1666                  * normally completes in about 120 msec.
1667                  */
1668                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1669                 bp->serdes_an_pending = 1;
1670                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1671         } else {
1672                 bnx2_resolve_flow_ctrl(bp);
1673                 bnx2_set_mac_link(bp);
1674         }
1675
1676         return 0;
1677 }
1678
1679 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1680         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1681                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1682                 (ADVERTISED_1000baseT_Full)
1683
1684 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1685         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1686         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1687         ADVERTISED_1000baseT_Full)
1688
1689 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1690         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1691
1692 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1693
1694 static void
1695 bnx2_set_default_remote_link(struct bnx2 *bp)
1696 {
1697         u32 link;
1698
1699         if (bp->phy_port == PORT_TP)
1700                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1701         else
1702                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1703
1704         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1705                 bp->req_line_speed = 0;
1706                 bp->autoneg |= AUTONEG_SPEED;
1707                 bp->advertising = ADVERTISED_Autoneg;
1708                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1709                         bp->advertising |= ADVERTISED_10baseT_Half;
1710                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1711                         bp->advertising |= ADVERTISED_10baseT_Full;
1712                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1713                         bp->advertising |= ADVERTISED_100baseT_Half;
1714                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1715                         bp->advertising |= ADVERTISED_100baseT_Full;
1716                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1717                         bp->advertising |= ADVERTISED_1000baseT_Full;
1718                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1719                         bp->advertising |= ADVERTISED_2500baseX_Full;
1720         } else {
1721                 bp->autoneg = 0;
1722                 bp->advertising = 0;
1723                 bp->req_duplex = DUPLEX_FULL;
1724                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1725                         bp->req_line_speed = SPEED_10;
1726                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1727                                 bp->req_duplex = DUPLEX_HALF;
1728                 }
1729                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1730                         bp->req_line_speed = SPEED_100;
1731                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1732                                 bp->req_duplex = DUPLEX_HALF;
1733                 }
1734                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1735                         bp->req_line_speed = SPEED_1000;
1736                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1737                         bp->req_line_speed = SPEED_2500;
1738         }
1739 }
1740
1741 static void
1742 bnx2_set_default_link(struct bnx2 *bp)
1743 {
1744         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1745                 bnx2_set_default_remote_link(bp);
1746                 return;
1747         }
1748
1749         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1750         bp->req_line_speed = 0;
1751         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1752                 u32 reg;
1753
1754                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1755
1756                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1757                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1758                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1759                         bp->autoneg = 0;
1760                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1761                         bp->req_duplex = DUPLEX_FULL;
1762                 }
1763         } else
1764                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1765 }
1766
1767 static void
1768 bnx2_send_heart_beat(struct bnx2 *bp)
1769 {
1770         u32 msg;
1771         u32 addr;
1772
1773         spin_lock(&bp->indirect_lock);
1774         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1775         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1776         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1777         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1778         spin_unlock(&bp->indirect_lock);
1779 }
1780
1781 static void
1782 bnx2_remote_phy_event(struct bnx2 *bp)
1783 {
1784         u32 msg;
1785         u8 link_up = bp->link_up;
1786         u8 old_port;
1787
1788         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1789
1790         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1791                 bnx2_send_heart_beat(bp);
1792
1793         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1794
1795         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1796                 bp->link_up = 0;
1797         else {
1798                 u32 speed;
1799
1800                 bp->link_up = 1;
1801                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1802                 bp->duplex = DUPLEX_FULL;
1803                 switch (speed) {
1804                         case BNX2_LINK_STATUS_10HALF:
1805                                 bp->duplex = DUPLEX_HALF;
1806                         case BNX2_LINK_STATUS_10FULL:
1807                                 bp->line_speed = SPEED_10;
1808                                 break;
1809                         case BNX2_LINK_STATUS_100HALF:
1810                                 bp->duplex = DUPLEX_HALF;
1811                         case BNX2_LINK_STATUS_100BASE_T4:
1812                         case BNX2_LINK_STATUS_100FULL:
1813                                 bp->line_speed = SPEED_100;
1814                                 break;
1815                         case BNX2_LINK_STATUS_1000HALF:
1816                                 bp->duplex = DUPLEX_HALF;
1817                         case BNX2_LINK_STATUS_1000FULL:
1818                                 bp->line_speed = SPEED_1000;
1819                                 break;
1820                         case BNX2_LINK_STATUS_2500HALF:
1821                                 bp->duplex = DUPLEX_HALF;
1822                         case BNX2_LINK_STATUS_2500FULL:
1823                                 bp->line_speed = SPEED_2500;
1824                                 break;
1825                         default:
1826                                 bp->line_speed = 0;
1827                                 break;
1828                 }
1829
1830                 bp->flow_ctrl = 0;
1831                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1832                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1833                         if (bp->duplex == DUPLEX_FULL)
1834                                 bp->flow_ctrl = bp->req_flow_ctrl;
1835                 } else {
1836                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1837                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1838                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1839                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1840                 }
1841
1842                 old_port = bp->phy_port;
1843                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1844                         bp->phy_port = PORT_FIBRE;
1845                 else
1846                         bp->phy_port = PORT_TP;
1847
1848                 if (old_port != bp->phy_port)
1849                         bnx2_set_default_link(bp);
1850
1851         }
1852         if (bp->link_up != link_up)
1853                 bnx2_report_link(bp);
1854
1855         bnx2_set_mac_link(bp);
1856 }
1857
1858 static int
1859 bnx2_set_remote_link(struct bnx2 *bp)
1860 {
1861         u32 evt_code;
1862
1863         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1864         switch (evt_code) {
1865                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1866                         bnx2_remote_phy_event(bp);
1867                         break;
1868                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1869                 default:
1870                         bnx2_send_heart_beat(bp);
1871                         break;
1872         }
1873         return 0;
1874 }
1875
1876 static int
1877 bnx2_setup_copper_phy(struct bnx2 *bp)
1878 __releases(&bp->phy_lock)
1879 __acquires(&bp->phy_lock)
1880 {
1881         u32 bmcr;
1882         u32 new_bmcr;
1883
1884         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1885
1886         if (bp->autoneg & AUTONEG_SPEED) {
1887                 u32 adv_reg, adv1000_reg;
1888                 u32 new_adv_reg = 0;
1889                 u32 new_adv1000_reg = 0;
1890
1891                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1892                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1893                         ADVERTISE_PAUSE_ASYM);
1894
1895                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1896                 adv1000_reg &= PHY_ALL_1000_SPEED;
1897
1898                 if (bp->advertising & ADVERTISED_10baseT_Half)
1899                         new_adv_reg |= ADVERTISE_10HALF;
1900                 if (bp->advertising & ADVERTISED_10baseT_Full)
1901                         new_adv_reg |= ADVERTISE_10FULL;
1902                 if (bp->advertising & ADVERTISED_100baseT_Half)
1903                         new_adv_reg |= ADVERTISE_100HALF;
1904                 if (bp->advertising & ADVERTISED_100baseT_Full)
1905                         new_adv_reg |= ADVERTISE_100FULL;
1906                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1907                         new_adv1000_reg |= ADVERTISE_1000FULL;
1908
1909                 new_adv_reg |= ADVERTISE_CSMA;
1910
1911                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1912
1913                 if ((adv1000_reg != new_adv1000_reg) ||
1914                         (adv_reg != new_adv_reg) ||
1915                         ((bmcr & BMCR_ANENABLE) == 0)) {
1916
1917                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1918                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1919                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1920                                 BMCR_ANENABLE);
1921                 }
1922                 else if (bp->link_up) {
1923                         /* Flow ctrl may have changed from auto to forced */
1924                         /* or vice-versa. */
1925
1926                         bnx2_resolve_flow_ctrl(bp);
1927                         bnx2_set_mac_link(bp);
1928                 }
1929                 return 0;
1930         }
1931
1932         new_bmcr = 0;
1933         if (bp->req_line_speed == SPEED_100) {
1934                 new_bmcr |= BMCR_SPEED100;
1935         }
1936         if (bp->req_duplex == DUPLEX_FULL) {
1937                 new_bmcr |= BMCR_FULLDPLX;
1938         }
1939         if (new_bmcr != bmcr) {
1940                 u32 bmsr;
1941
1942                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1944
1945                 if (bmsr & BMSR_LSTATUS) {
1946                         /* Force link down */
1947                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1948                         spin_unlock_bh(&bp->phy_lock);
1949                         msleep(50);
1950                         spin_lock_bh(&bp->phy_lock);
1951
1952                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1953                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1954                 }
1955
1956                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1957
1958                 /* Normally, the new speed is setup after the link has
1959                  * gone down and up again. In some cases, link will not go
1960                  * down so we need to set up the new speed here.
1961                  */
1962                 if (bmsr & BMSR_LSTATUS) {
1963                         bp->line_speed = bp->req_line_speed;
1964                         bp->duplex = bp->req_duplex;
1965                         bnx2_resolve_flow_ctrl(bp);
1966                         bnx2_set_mac_link(bp);
1967                 }
1968         } else {
1969                 bnx2_resolve_flow_ctrl(bp);
1970                 bnx2_set_mac_link(bp);
1971         }
1972         return 0;
1973 }
1974
1975 static int
1976 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1977 __releases(&bp->phy_lock)
1978 __acquires(&bp->phy_lock)
1979 {
1980         if (bp->loopback == MAC_LOOPBACK)
1981                 return 0;
1982
1983         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1984                 return (bnx2_setup_serdes_phy(bp, port));
1985         }
1986         else {
1987                 return (bnx2_setup_copper_phy(bp));
1988         }
1989 }
1990
1991 static int
1992 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1993 {
1994         u32 val;
1995
1996         bp->mii_bmcr = MII_BMCR + 0x10;
1997         bp->mii_bmsr = MII_BMSR + 0x10;
1998         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1999         bp->mii_adv = MII_ADVERTISE + 0x10;
2000         bp->mii_lpa = MII_LPA + 0x10;
2001         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2002
2003         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2004         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2005
2006         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2007         if (reset_phy)
2008                 bnx2_reset_phy(bp);
2009
2010         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2011
2012         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2013         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2014         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2015         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2018         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2019         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2020                 val |= BCM5708S_UP1_2G5;
2021         else
2022                 val &= ~BCM5708S_UP1_2G5;
2023         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2024
2025         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2026         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2027         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2028         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2029
2030         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2031
2032         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2033               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2034         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2035
2036         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2037
2038         return 0;
2039 }
2040
2041 static int
2042 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2043 {
2044         u32 val;
2045
2046         if (reset_phy)
2047                 bnx2_reset_phy(bp);
2048
2049         bp->mii_up1 = BCM5708S_UP1;
2050
2051         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2052         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2053         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2054
2055         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2056         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2057         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2058
2059         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2060         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2061         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2062
2063         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2064                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2065                 val |= BCM5708S_UP1_2G5;
2066                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2067         }
2068
2069         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2070             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2071             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2072                 /* increase tx signal amplitude */
2073                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074                                BCM5708S_BLK_ADDR_TX_MISC);
2075                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2076                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2077                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2078                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2079         }
2080
2081         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2082               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2083
2084         if (val) {
2085                 u32 is_backplane;
2086
2087                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2088                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2089                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2090                                        BCM5708S_BLK_ADDR_TX_MISC);
2091                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2092                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2093                                        BCM5708S_BLK_ADDR_DIG);
2094                 }
2095         }
2096         return 0;
2097 }
2098
2099 static int
2100 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2101 {
2102         if (reset_phy)
2103                 bnx2_reset_phy(bp);
2104
2105         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2106
2107         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2108                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2109
2110         if (bp->dev->mtu > 1500) {
2111                 u32 val;
2112
2113                 /* Set extended packet length bit */
2114                 bnx2_write_phy(bp, 0x18, 0x7);
2115                 bnx2_read_phy(bp, 0x18, &val);
2116                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2117
2118                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2119                 bnx2_read_phy(bp, 0x1c, &val);
2120                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2121         }
2122         else {
2123                 u32 val;
2124
2125                 bnx2_write_phy(bp, 0x18, 0x7);
2126                 bnx2_read_phy(bp, 0x18, &val);
2127                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2128
2129                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2130                 bnx2_read_phy(bp, 0x1c, &val);
2131                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2132         }
2133
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2139 {
2140         u32 val;
2141
2142         if (reset_phy)
2143                 bnx2_reset_phy(bp);
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2146                 bnx2_write_phy(bp, 0x18, 0x0c00);
2147                 bnx2_write_phy(bp, 0x17, 0x000a);
2148                 bnx2_write_phy(bp, 0x15, 0x310b);
2149                 bnx2_write_phy(bp, 0x17, 0x201f);
2150                 bnx2_write_phy(bp, 0x15, 0x9506);
2151                 bnx2_write_phy(bp, 0x17, 0x401f);
2152                 bnx2_write_phy(bp, 0x15, 0x14e2);
2153                 bnx2_write_phy(bp, 0x18, 0x0400);
2154         }
2155
2156         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2157                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2158                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2159                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2160                 val &= ~(1 << 8);
2161                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2162         }
2163
2164         if (bp->dev->mtu > 1500) {
2165                 /* Set extended packet length bit */
2166                 bnx2_write_phy(bp, 0x18, 0x7);
2167                 bnx2_read_phy(bp, 0x18, &val);
2168                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2169
2170                 bnx2_read_phy(bp, 0x10, &val);
2171                 bnx2_write_phy(bp, 0x10, val | 0x1);
2172         }
2173         else {
2174                 bnx2_write_phy(bp, 0x18, 0x7);
2175                 bnx2_read_phy(bp, 0x18, &val);
2176                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2177
2178                 bnx2_read_phy(bp, 0x10, &val);
2179                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2180         }
2181
2182         /* ethernet@wirespeed */
2183         bnx2_write_phy(bp, 0x18, 0x7007);
2184         bnx2_read_phy(bp, 0x18, &val);
2185         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2186         return 0;
2187 }
2188
2189
2190 static int
2191 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2192 __releases(&bp->phy_lock)
2193 __acquires(&bp->phy_lock)
2194 {
2195         u32 val;
2196         int rc = 0;
2197
2198         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2199         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2200
2201         bp->mii_bmcr = MII_BMCR;
2202         bp->mii_bmsr = MII_BMSR;
2203         bp->mii_bmsr1 = MII_BMSR;
2204         bp->mii_adv = MII_ADVERTISE;
2205         bp->mii_lpa = MII_LPA;
2206
2207         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2208
2209         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2210                 goto setup_phy;
2211
2212         bnx2_read_phy(bp, MII_PHYSID1, &val);
2213         bp->phy_id = val << 16;
2214         bnx2_read_phy(bp, MII_PHYSID2, &val);
2215         bp->phy_id |= val & 0xffff;
2216
2217         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2218                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2219                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2220                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2221                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2222                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2223                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2224         }
2225         else {
2226                 rc = bnx2_init_copper_phy(bp, reset_phy);
2227         }
2228
2229 setup_phy:
2230         if (!rc)
2231                 rc = bnx2_setup_phy(bp, bp->phy_port);
2232
2233         return rc;
2234 }
2235
2236 static int
2237 bnx2_set_mac_loopback(struct bnx2 *bp)
2238 {
2239         u32 mac_mode;
2240
2241         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2242         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2243         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2244         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2245         bp->link_up = 1;
2246         return 0;
2247 }
2248
2249 static int bnx2_test_link(struct bnx2 *);
2250
2251 static int
2252 bnx2_set_phy_loopback(struct bnx2 *bp)
2253 {
2254         u32 mac_mode;
2255         int rc, i;
2256
2257         spin_lock_bh(&bp->phy_lock);
2258         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2259                             BMCR_SPEED1000);
2260         spin_unlock_bh(&bp->phy_lock);
2261         if (rc)
2262                 return rc;
2263
2264         for (i = 0; i < 10; i++) {
2265                 if (bnx2_test_link(bp) == 0)
2266                         break;
2267                 msleep(100);
2268         }
2269
2270         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2271         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2272                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2273                       BNX2_EMAC_MODE_25G_MODE);
2274
2275         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2276         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2277         bp->link_up = 1;
2278         return 0;
2279 }
2280
2281 static int
2282 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2283 {
2284         int i;
2285         u32 val;
2286
2287         bp->fw_wr_seq++;
2288         msg_data |= bp->fw_wr_seq;
2289
2290         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2291
2292         if (!ack)
2293                 return 0;
2294
2295         /* wait for an acknowledgement. */
2296         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2297                 msleep(10);
2298
2299                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2300
2301                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2302                         break;
2303         }
2304         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2305                 return 0;
2306
2307         /* If we timed out, inform the firmware that this is the case. */
2308         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2309                 if (!silent)
2310                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2311                                             "%x\n", msg_data);
2312
2313                 msg_data &= ~BNX2_DRV_MSG_CODE;
2314                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2315
2316                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2317
2318                 return -EBUSY;
2319         }
2320
2321         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2322                 return -EIO;
2323
2324         return 0;
2325 }
2326
2327 static int
2328 bnx2_init_5709_context(struct bnx2 *bp)
2329 {
2330         int i, ret = 0;
2331         u32 val;
2332
2333         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2334         val |= (BCM_PAGE_BITS - 8) << 16;
2335         REG_WR(bp, BNX2_CTX_COMMAND, val);
2336         for (i = 0; i < 10; i++) {
2337                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2338                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2339                         break;
2340                 udelay(2);
2341         }
2342         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2343                 return -EBUSY;
2344
2345         for (i = 0; i < bp->ctx_pages; i++) {
2346                 int j;
2347
2348                 if (bp->ctx_blk[i])
2349                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2350                 else
2351                         return -ENOMEM;
2352
2353                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2354                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2355                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2356                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2357                        (u64) bp->ctx_blk_mapping[i] >> 32);
2358                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2359                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2360                 for (j = 0; j < 10; j++) {
2361
2362                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2363                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2364                                 break;
2365                         udelay(5);
2366                 }
2367                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2368                         ret = -EBUSY;
2369                         break;
2370                 }
2371         }
2372         return ret;
2373 }
2374
2375 static void
2376 bnx2_init_context(struct bnx2 *bp)
2377 {
2378         u32 vcid;
2379
2380         vcid = 96;
2381         while (vcid) {
2382                 u32 vcid_addr, pcid_addr, offset;
2383                 int i;
2384
2385                 vcid--;
2386
2387                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2388                         u32 new_vcid;
2389
2390                         vcid_addr = GET_PCID_ADDR(vcid);
2391                         if (vcid & 0x8) {
2392                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2393                         }
2394                         else {
2395                                 new_vcid = vcid;
2396                         }
2397                         pcid_addr = GET_PCID_ADDR(new_vcid);
2398                 }
2399                 else {
2400                         vcid_addr = GET_CID_ADDR(vcid);
2401                         pcid_addr = vcid_addr;
2402                 }
2403
2404                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2405                         vcid_addr += (i << PHY_CTX_SHIFT);
2406                         pcid_addr += (i << PHY_CTX_SHIFT);
2407
2408                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2409                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2410
2411                         /* Zero out the context. */
2412                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2413                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2414                 }
2415         }
2416 }
2417
2418 static int
2419 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2420 {
2421         u16 *good_mbuf;
2422         u32 good_mbuf_cnt;
2423         u32 val;
2424
2425         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2426         if (good_mbuf == NULL) {
2427                 printk(KERN_ERR PFX "Failed to allocate memory in "
2428                                     "bnx2_alloc_bad_rbuf\n");
2429                 return -ENOMEM;
2430         }
2431
2432         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2433                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2434
2435         good_mbuf_cnt = 0;
2436
2437         /* Allocate a bunch of mbufs and save the good ones in an array. */
2438         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2440                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2441                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2442
2443                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2444
2445                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2446
2447                 /* The addresses with Bit 9 set are bad memory blocks. */
2448                 if (!(val & (1 << 9))) {
2449                         good_mbuf[good_mbuf_cnt] = (u16) val;
2450                         good_mbuf_cnt++;
2451                 }
2452
2453                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2454         }
2455
2456         /* Free the good ones back to the mbuf pool thus discarding
2457          * all the bad ones. */
2458         while (good_mbuf_cnt) {
2459                 good_mbuf_cnt--;
2460
2461                 val = good_mbuf[good_mbuf_cnt];
2462                 val = (val << 9) | val | 1;
2463
2464                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2465         }
2466         kfree(good_mbuf);
2467         return 0;
2468 }
2469
2470 static void
2471 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2472 {
2473         u32 val;
2474
2475         val = (mac_addr[0] << 8) | mac_addr[1];
2476
2477         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2478
2479         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2480                 (mac_addr[4] << 8) | mac_addr[5];
2481
2482         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2483 }
2484
2485 static inline int
2486 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2487 {
2488         dma_addr_t mapping;
2489         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2490         struct rx_bd *rxbd =
2491                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2492         struct page *page = alloc_page(GFP_ATOMIC);
2493
2494         if (!page)
2495                 return -ENOMEM;
2496         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2497                                PCI_DMA_FROMDEVICE);
2498         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2499                 __free_page(page);
2500                 return -EIO;
2501         }
2502
2503         rx_pg->page = page;
2504         pci_unmap_addr_set(rx_pg, mapping, mapping);
2505         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2506         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2507         return 0;
2508 }
2509
2510 static void
2511 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2512 {
2513         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2514         struct page *page = rx_pg->page;
2515
2516         if (!page)
2517                 return;
2518
2519         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2520                        PCI_DMA_FROMDEVICE);
2521
2522         __free_page(page);
2523         rx_pg->page = NULL;
2524 }
2525
2526 static inline int
2527 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2528 {
2529         struct sk_buff *skb;
2530         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2531         dma_addr_t mapping;
2532         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2533         unsigned long align;
2534
2535         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2536         if (skb == NULL) {
2537                 return -ENOMEM;
2538         }
2539
2540         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2541                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2542
2543         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2544                 PCI_DMA_FROMDEVICE);
2545         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2546                 dev_kfree_skb(skb);
2547                 return -EIO;
2548         }
2549
2550         rx_buf->skb = skb;
2551         pci_unmap_addr_set(rx_buf, mapping, mapping);
2552
2553         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2554         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2555
2556         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2557
2558         return 0;
2559 }
2560
2561 static int
2562 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2563 {
2564         struct status_block *sblk = bnapi->status_blk.msi;
2565         u32 new_link_state, old_link_state;
2566         int is_set = 1;
2567
2568         new_link_state = sblk->status_attn_bits & event;
2569         old_link_state = sblk->status_attn_bits_ack & event;
2570         if (new_link_state != old_link_state) {
2571                 if (new_link_state)
2572                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2573                 else
2574                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2575         } else
2576                 is_set = 0;
2577
2578         return is_set;
2579 }
2580
2581 static void
2582 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2583 {
2584         spin_lock(&bp->phy_lock);
2585
2586         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2587                 bnx2_set_link(bp);
2588         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2589                 bnx2_set_remote_link(bp);
2590
2591         spin_unlock(&bp->phy_lock);
2592
2593 }
2594
2595 static inline u16
2596 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2597 {
2598         u16 cons;
2599
2600         /* Tell compiler that status block fields can change. */
2601         barrier();
2602         cons = *bnapi->hw_tx_cons_ptr;
2603         barrier();
2604         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2605                 cons++;
2606         return cons;
2607 }
2608
2609 static int
2610 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2611 {
2612         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2613         u16 hw_cons, sw_cons, sw_ring_cons;
2614         int tx_pkt = 0, index;
2615         struct netdev_queue *txq;
2616
2617         index = (bnapi - bp->bnx2_napi);
2618         txq = netdev_get_tx_queue(bp->dev, index);
2619
2620         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2621         sw_cons = txr->tx_cons;
2622
2623         while (sw_cons != hw_cons) {
2624                 struct sw_tx_bd *tx_buf;
2625                 struct sk_buff *skb;
2626                 int i, last;
2627
2628                 sw_ring_cons = TX_RING_IDX(sw_cons);
2629
2630                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2631                 skb = tx_buf->skb;
2632
2633                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2634                 prefetch(&skb->end);
2635
2636                 /* partial BD completions possible with TSO packets */
2637                 if (tx_buf->is_gso) {
2638                         u16 last_idx, last_ring_idx;
2639
2640                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2641                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2642                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2643                                 last_idx++;
2644                         }
2645                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2646                                 break;
2647                         }
2648                 }
2649
2650                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2651
2652                 tx_buf->skb = NULL;
2653                 last = tx_buf->nr_frags;
2654
2655                 for (i = 0; i < last; i++) {
2656                         sw_cons = NEXT_TX_BD(sw_cons);
2657                 }
2658
2659                 sw_cons = NEXT_TX_BD(sw_cons);
2660
2661                 dev_kfree_skb(skb);
2662                 tx_pkt++;
2663                 if (tx_pkt == budget)
2664                         break;
2665
2666                 if (hw_cons == sw_cons)
2667                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2668         }
2669
2670         txr->hw_tx_cons = hw_cons;
2671         txr->tx_cons = sw_cons;
2672
2673         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2674          * before checking for netif_tx_queue_stopped().  Without the
2675          * memory barrier, there is a small possibility that bnx2_start_xmit()
2676          * will miss it and cause the queue to be stopped forever.
2677          */
2678         smp_mb();
2679
2680         if (unlikely(netif_tx_queue_stopped(txq)) &&
2681                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2682                 __netif_tx_lock(txq, smp_processor_id());
2683                 if ((netif_tx_queue_stopped(txq)) &&
2684                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2685                         netif_tx_wake_queue(txq);
2686                 __netif_tx_unlock(txq);
2687         }
2688
2689         return tx_pkt;
2690 }
2691
2692 static void
2693 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2694                         struct sk_buff *skb, int count)
2695 {
2696         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2697         struct rx_bd *cons_bd, *prod_bd;
2698         int i;
2699         u16 hw_prod, prod;
2700         u16 cons = rxr->rx_pg_cons;
2701
2702         cons_rx_pg = &rxr->rx_pg_ring[cons];
2703
2704         /* The caller was unable to allocate a new page to replace the
2705          * last one in the frags array, so we need to recycle that page
2706          * and then free the skb.
2707          */
2708         if (skb) {
2709                 struct page *page;
2710                 struct skb_shared_info *shinfo;
2711
2712                 shinfo = skb_shinfo(skb);
2713                 shinfo->nr_frags--;
2714                 page = shinfo->frags[shinfo->nr_frags].page;
2715                 shinfo->frags[shinfo->nr_frags].page = NULL;
2716
2717                 cons_rx_pg->page = page;
2718                 dev_kfree_skb(skb);
2719         }
2720
2721         hw_prod = rxr->rx_pg_prod;
2722
2723         for (i = 0; i < count; i++) {
2724                 prod = RX_PG_RING_IDX(hw_prod);
2725
2726                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2727                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2728                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2729                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2730
2731                 if (prod != cons) {
2732                         prod_rx_pg->page = cons_rx_pg->page;
2733                         cons_rx_pg->page = NULL;
2734                         pci_unmap_addr_set(prod_rx_pg, mapping,
2735                                 pci_unmap_addr(cons_rx_pg, mapping));
2736
2737                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2738                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2739
2740                 }
2741                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2742                 hw_prod = NEXT_RX_BD(hw_prod);
2743         }
2744         rxr->rx_pg_prod = hw_prod;
2745         rxr->rx_pg_cons = cons;
2746 }
2747
2748 static inline void
2749 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2750                   struct sk_buff *skb, u16 cons, u16 prod)
2751 {
2752         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2753         struct rx_bd *cons_bd, *prod_bd;
2754
2755         cons_rx_buf = &rxr->rx_buf_ring[cons];
2756         prod_rx_buf = &rxr->rx_buf_ring[prod];
2757
2758         pci_dma_sync_single_for_device(bp->pdev,
2759                 pci_unmap_addr(cons_rx_buf, mapping),
2760                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2761
2762         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2763
2764         prod_rx_buf->skb = skb;
2765
2766         if (cons == prod)
2767                 return;
2768
2769         pci_unmap_addr_set(prod_rx_buf, mapping,
2770                         pci_unmap_addr(cons_rx_buf, mapping));
2771
2772         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2773         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2774         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2775         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2776 }
2777
2778 static int
2779 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2780             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2781             u32 ring_idx)
2782 {
2783         int err;
2784         u16 prod = ring_idx & 0xffff;
2785
2786         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2787         if (unlikely(err)) {
2788                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2789                 if (hdr_len) {
2790                         unsigned int raw_len = len + 4;
2791                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2792
2793                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2794                 }
2795                 return err;
2796         }
2797
2798         skb_reserve(skb, BNX2_RX_OFFSET);
2799         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2800                          PCI_DMA_FROMDEVICE);
2801
2802         if (hdr_len == 0) {
2803                 skb_put(skb, len);
2804                 return 0;
2805         } else {
2806                 unsigned int i, frag_len, frag_size, pages;
2807                 struct sw_pg *rx_pg;
2808                 u16 pg_cons = rxr->rx_pg_cons;
2809                 u16 pg_prod = rxr->rx_pg_prod;
2810
2811                 frag_size = len + 4 - hdr_len;
2812                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2813                 skb_put(skb, hdr_len);
2814
2815                 for (i = 0; i < pages; i++) {
2816                         dma_addr_t mapping_old;
2817
2818                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2819                         if (unlikely(frag_len <= 4)) {
2820                                 unsigned int tail = 4 - frag_len;
2821
2822                                 rxr->rx_pg_cons = pg_cons;
2823                                 rxr->rx_pg_prod = pg_prod;
2824                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2825                                                         pages - i);
2826                                 skb->len -= tail;
2827                                 if (i == 0) {
2828                                         skb->tail -= tail;
2829                                 } else {
2830                                         skb_frag_t *frag =
2831                                                 &skb_shinfo(skb)->frags[i - 1];
2832                                         frag->size -= tail;
2833                                         skb->data_len -= tail;
2834                                         skb->truesize -= tail;
2835                                 }
2836                                 return 0;
2837                         }
2838                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2839
2840                         /* Don't unmap yet.  If we're unable to allocate a new
2841                          * page, we need to recycle the page and the DMA addr.
2842                          */
2843                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2844                         if (i == pages - 1)
2845                                 frag_len -= 4;
2846
2847                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2848                         rx_pg->page = NULL;
2849
2850                         err = bnx2_alloc_rx_page(bp, rxr,
2851                                                  RX_PG_RING_IDX(pg_prod));
2852                         if (unlikely(err)) {
2853                                 rxr->rx_pg_cons = pg_cons;
2854                                 rxr->rx_pg_prod = pg_prod;
2855                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2856                                                         pages - i);
2857                                 return err;
2858                         }
2859
2860                         pci_unmap_page(bp->pdev, mapping_old,
2861                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2862
2863                         frag_size -= frag_len;
2864                         skb->data_len += frag_len;
2865                         skb->truesize += frag_len;
2866                         skb->len += frag_len;
2867
2868                         pg_prod = NEXT_RX_BD(pg_prod);
2869                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2870                 }
2871                 rxr->rx_pg_prod = pg_prod;
2872                 rxr->rx_pg_cons = pg_cons;
2873         }
2874         return 0;
2875 }
2876
2877 static inline u16
2878 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2879 {
2880         u16 cons;
2881
2882         /* Tell compiler that status block fields can change. */
2883         barrier();
2884         cons = *bnapi->hw_rx_cons_ptr;
2885         barrier();
2886         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2887                 cons++;
2888         return cons;
2889 }
2890
2891 static int
2892 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2893 {
2894         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2895         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2896         struct l2_fhdr *rx_hdr;
2897         int rx_pkt = 0, pg_ring_used = 0;
2898
2899         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2900         sw_cons = rxr->rx_cons;
2901         sw_prod = rxr->rx_prod;
2902
2903         /* Memory barrier necessary as speculative reads of the rx
2904          * buffer can be ahead of the index in the status block
2905          */
2906         rmb();
2907         while (sw_cons != hw_cons) {
2908                 unsigned int len, hdr_len;
2909                 u32 status;
2910                 struct sw_bd *rx_buf;
2911                 struct sk_buff *skb;
2912                 dma_addr_t dma_addr;
2913                 u16 vtag = 0;
2914                 int hw_vlan __maybe_unused = 0;
2915
2916                 sw_ring_cons = RX_RING_IDX(sw_cons);
2917                 sw_ring_prod = RX_RING_IDX(sw_prod);
2918
2919                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2920                 skb = rx_buf->skb;
2921
2922                 rx_buf->skb = NULL;
2923
2924                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2925
2926                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2927                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2928                         PCI_DMA_FROMDEVICE);
2929
2930                 rx_hdr = (struct l2_fhdr *) skb->data;
2931                 len = rx_hdr->l2_fhdr_pkt_len;
2932                 status = rx_hdr->l2_fhdr_status;
2933
2934                 hdr_len = 0;
2935                 if (status & L2_FHDR_STATUS_SPLIT) {
2936                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2937                         pg_ring_used = 1;
2938                 } else if (len > bp->rx_jumbo_thresh) {
2939                         hdr_len = bp->rx_jumbo_thresh;
2940                         pg_ring_used = 1;
2941                 }
2942
2943                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2944                                        L2_FHDR_ERRORS_PHY_DECODE |
2945                                        L2_FHDR_ERRORS_ALIGNMENT |
2946                                        L2_FHDR_ERRORS_TOO_SHORT |
2947                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
2948
2949                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2950                                           sw_ring_prod);
2951                         if (pg_ring_used) {
2952                                 int pages;
2953
2954                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2955
2956                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2957                         }
2958                         goto next_rx;
2959                 }
2960
2961                 len -= 4;
2962
2963                 if (len <= bp->rx_copy_thresh) {
2964                         struct sk_buff *new_skb;
2965
2966                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2967                         if (new_skb == NULL) {
2968                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2969                                                   sw_ring_prod);
2970                                 goto next_rx;
2971                         }
2972
2973                         /* aligned copy */
2974                         skb_copy_from_linear_data_offset(skb,
2975                                                          BNX2_RX_OFFSET - 6,
2976                                       new_skb->data, len + 6);
2977                         skb_reserve(new_skb, 6);
2978                         skb_put(new_skb, len);
2979
2980                         bnx2_reuse_rx_skb(bp, rxr, skb,
2981                                 sw_ring_cons, sw_ring_prod);
2982
2983                         skb = new_skb;
2984                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2985                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2986                         goto next_rx;
2987
2988                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2989                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2990                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2991 #ifdef BCM_VLAN
2992                         if (bp->vlgrp)
2993                                 hw_vlan = 1;
2994                         else
2995 #endif
2996                         {
2997                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2998                                         __skb_push(skb, 4);
2999
3000                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3001                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3002                                 ve->h_vlan_TCI = htons(vtag);
3003                                 len += 4;
3004                         }
3005                 }
3006
3007                 skb->protocol = eth_type_trans(skb, bp->dev);
3008
3009                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3010                         (ntohs(skb->protocol) != 0x8100)) {
3011
3012                         dev_kfree_skb(skb);
3013                         goto next_rx;
3014
3015                 }
3016
3017                 skb->ip_summed = CHECKSUM_NONE;
3018                 if (bp->rx_csum &&
3019                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3020                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3021
3022                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3023                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3024                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3025                 }
3026
3027                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3028
3029 #ifdef BCM_VLAN
3030                 if (hw_vlan)
3031                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3032                 else
3033 #endif
3034                         netif_receive_skb(skb);
3035
3036                 rx_pkt++;
3037
3038 next_rx:
3039                 sw_cons = NEXT_RX_BD(sw_cons);
3040                 sw_prod = NEXT_RX_BD(sw_prod);
3041
3042                 if ((rx_pkt == budget))
3043                         break;
3044
3045                 /* Refresh hw_cons to see if there is new work */
3046                 if (sw_cons == hw_cons) {
3047                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3048                         rmb();
3049                 }
3050         }
3051         rxr->rx_cons = sw_cons;
3052         rxr->rx_prod = sw_prod;
3053
3054         if (pg_ring_used)
3055                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3056
3057         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3058
3059         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3060
3061         mmiowb();
3062
3063         return rx_pkt;
3064
3065 }
3066
3067 /* MSI ISR - The only difference between this and the INTx ISR
3068  * is that the MSI interrupt is always serviced.
3069  */
3070 static irqreturn_t
3071 bnx2_msi(int irq, void *dev_instance)
3072 {
3073         struct bnx2_napi *bnapi = dev_instance;
3074         struct bnx2 *bp = bnapi->bp;
3075
3076         prefetch(bnapi->status_blk.msi);
3077         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3078                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3079                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3080
3081         /* Return here if interrupt is disabled. */
3082         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3083                 return IRQ_HANDLED;
3084
3085         napi_schedule(&bnapi->napi);
3086
3087         return IRQ_HANDLED;
3088 }
3089
3090 static irqreturn_t
3091 bnx2_msi_1shot(int irq, void *dev_instance)
3092 {
3093         struct bnx2_napi *bnapi = dev_instance;
3094         struct bnx2 *bp = bnapi->bp;
3095
3096         prefetch(bnapi->status_blk.msi);
3097
3098         /* Return here if interrupt is disabled. */
3099         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3100                 return IRQ_HANDLED;
3101
3102         napi_schedule(&bnapi->napi);
3103
3104         return IRQ_HANDLED;
3105 }
3106
3107 static irqreturn_t
3108 bnx2_interrupt(int irq, void *dev_instance)
3109 {
3110         struct bnx2_napi *bnapi = dev_instance;
3111         struct bnx2 *bp = bnapi->bp;
3112         struct status_block *sblk = bnapi->status_blk.msi;
3113
3114         /* When using INTx, it is possible for the interrupt to arrive
3115          * at the CPU before the status block posted prior to the
3116          * interrupt. Reading a register will flush the status block.
3117          * When using MSI, the MSI message will always complete after
3118          * the status block write.
3119          */
3120         if ((sblk->status_idx == bnapi->last_status_idx) &&
3121             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3122              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3123                 return IRQ_NONE;
3124
3125         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3126                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3127                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3128
3129         /* Read back to deassert IRQ immediately to avoid too many
3130          * spurious interrupts.
3131          */
3132         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3133
3134         /* Return here if interrupt is shared and is disabled. */
3135         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3136                 return IRQ_HANDLED;
3137
3138         if (napi_schedule_prep(&bnapi->napi)) {
3139                 bnapi->last_status_idx = sblk->status_idx;
3140                 __napi_schedule(&bnapi->napi);
3141         }
3142
3143         return IRQ_HANDLED;
3144 }
3145
3146 static inline int
3147 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3148 {
3149         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3150         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3151
3152         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3153             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3154                 return 1;
3155         return 0;
3156 }
3157
3158 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3159                                  STATUS_ATTN_BITS_TIMER_ABORT)
3160
3161 static inline int
3162 bnx2_has_work(struct bnx2_napi *bnapi)
3163 {
3164         struct status_block *sblk = bnapi->status_blk.msi;
3165
3166         if (bnx2_has_fast_work(bnapi))
3167                 return 1;
3168
3169         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3170             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3171                 return 1;
3172
3173         return 0;
3174 }
3175
3176 static void
3177 bnx2_chk_missed_msi(struct bnx2 *bp)
3178 {
3179         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3180         u32 msi_ctrl;
3181
3182         if (bnx2_has_work(bnapi)) {
3183                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3184                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3185                         return;
3186
3187                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3188                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3189                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3190                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3191                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3192                 }
3193         }
3194
3195         bp->idle_chk_status_idx = bnapi->last_status_idx;
3196 }
3197
3198 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3199 {
3200         struct status_block *sblk = bnapi->status_blk.msi;
3201         u32 status_attn_bits = sblk->status_attn_bits;
3202         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3203
3204         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3205             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3206
3207                 bnx2_phy_int(bp, bnapi);
3208
3209                 /* This is needed to take care of transient status
3210                  * during link changes.
3211                  */
3212                 REG_WR(bp, BNX2_HC_COMMAND,
3213                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3214                 REG_RD(bp, BNX2_HC_COMMAND);
3215         }
3216 }
3217
3218 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3219                           int work_done, int budget)
3220 {
3221         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3222         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3223
3224         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3225                 bnx2_tx_int(bp, bnapi, 0);
3226
3227         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3228                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3229
3230         return work_done;
3231 }
3232
3233 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3234 {
3235         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3236         struct bnx2 *bp = bnapi->bp;
3237         int work_done = 0;
3238         struct status_block_msix *sblk = bnapi->status_blk.msix;
3239
3240         while (1) {
3241                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3242                 if (unlikely(work_done >= budget))
3243                         break;
3244
3245                 bnapi->last_status_idx = sblk->status_idx;
3246                 /* status idx must be read before checking for more work. */
3247                 rmb();
3248                 if (likely(!bnx2_has_fast_work(bnapi))) {
3249
3250                         napi_complete(napi);
3251                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3252                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3253                                bnapi->last_status_idx);
3254                         break;
3255                 }
3256         }
3257         return work_done;
3258 }
3259
3260 static int bnx2_poll(struct napi_struct *napi, int budget)
3261 {
3262         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3263         struct bnx2 *bp = bnapi->bp;
3264         int work_done = 0;
3265         struct status_block *sblk = bnapi->status_blk.msi;
3266
3267         while (1) {
3268                 bnx2_poll_link(bp, bnapi);
3269
3270                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3271
3272                 /* bnapi->last_status_idx is used below to tell the hw how
3273                  * much work has been processed, so we must read it before
3274                  * checking for more work.
3275                  */
3276                 bnapi->last_status_idx = sblk->status_idx;
3277
3278                 if (unlikely(work_done >= budget))
3279                         break;
3280
3281                 rmb();
3282                 if (likely(!bnx2_has_work(bnapi))) {
3283                         napi_complete(napi);
3284                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3285                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3286                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3287                                        bnapi->last_status_idx);
3288                                 break;
3289                         }
3290                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3291                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3292                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3293                                bnapi->last_status_idx);
3294
3295                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3296                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3297                                bnapi->last_status_idx);
3298                         break;
3299                 }
3300         }
3301
3302         return work_done;
3303 }
3304
3305 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3306  * from set_multicast.
3307  */
3308 static void
3309 bnx2_set_rx_mode(struct net_device *dev)
3310 {
3311         struct bnx2 *bp = netdev_priv(dev);
3312         u32 rx_mode, sort_mode;
3313         struct dev_addr_list *uc_ptr;
3314         int i;
3315
3316         if (!netif_running(dev))
3317                 return;
3318
3319         spin_lock_bh(&bp->phy_lock);
3320
3321         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3322                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3323         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3324 #ifdef BCM_VLAN
3325         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3326                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3327 #else
3328         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3329                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3330 #endif
3331         if (dev->flags & IFF_PROMISC) {
3332                 /* Promiscuous mode. */
3333                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3334                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3335                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3336         }
3337         else if (dev->flags & IFF_ALLMULTI) {
3338                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3339                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3340                                0xffffffff);
3341                 }
3342                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3343         }
3344         else {
3345                 /* Accept one or more multicast(s). */
3346                 struct dev_mc_list *mclist;
3347                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3348                 u32 regidx;
3349                 u32 bit;
3350                 u32 crc;
3351
3352                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3353
3354                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3355                      i++, mclist = mclist->next) {
3356
3357                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3358                         bit = crc & 0xff;
3359                         regidx = (bit & 0xe0) >> 5;
3360                         bit &= 0x1f;
3361                         mc_filter[regidx] |= (1 << bit);
3362                 }
3363
3364                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3365                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3366                                mc_filter[i]);
3367                 }
3368
3369                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3370         }
3371
3372         uc_ptr = NULL;
3373         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3377         } else if (!(dev->flags & IFF_PROMISC)) {
3378                 uc_ptr = dev->uc_list;
3379
3380                 /* Add all entries into to the match filter list */
3381                 for (i = 0; i < dev->uc_count; i++) {
3382                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3383                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3384                         sort_mode |= (1 <<
3385                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3386                         uc_ptr = uc_ptr->next;
3387                 }
3388
3389         }
3390
3391         if (rx_mode != bp->rx_mode) {
3392                 bp->rx_mode = rx_mode;
3393                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3394         }
3395
3396         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3397         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3398         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3399
3400         spin_unlock_bh(&bp->phy_lock);
3401 }
3402
3403 static int __devinit
3404 check_fw_section(const struct firmware *fw,
3405                  const struct bnx2_fw_file_section *section,
3406                  u32 alignment, bool non_empty)
3407 {
3408         u32 offset = be32_to_cpu(section->offset);
3409         u32 len = be32_to_cpu(section->len);
3410
3411         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3412                 return -EINVAL;
3413         if ((non_empty && len == 0) || len > fw->size - offset ||
3414             len & (alignment - 1))
3415                 return -EINVAL;
3416         return 0;
3417 }
3418
3419 static int __devinit
3420 check_mips_fw_entry(const struct firmware *fw,
3421                     const struct bnx2_mips_fw_file_entry *entry)
3422 {
3423         if (check_fw_section(fw, &entry->text, 4, true) ||
3424             check_fw_section(fw, &entry->data, 4, false) ||
3425             check_fw_section(fw, &entry->rodata, 4, false))
3426                 return -EINVAL;
3427         return 0;
3428 }
3429
3430 static int __devinit
3431 bnx2_request_firmware(struct bnx2 *bp)
3432 {
3433         const char *mips_fw_file, *rv2p_fw_file;
3434         const struct bnx2_mips_fw_file *mips_fw;
3435         const struct bnx2_rv2p_fw_file *rv2p_fw;
3436         int rc;
3437
3438         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3439                 mips_fw_file = FW_MIPS_FILE_09;
3440                 rv2p_fw_file = FW_RV2P_FILE_09;
3441         } else {
3442                 mips_fw_file = FW_MIPS_FILE_06;
3443                 rv2p_fw_file = FW_RV2P_FILE_06;
3444         }
3445
3446         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3447         if (rc) {
3448                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3449                        mips_fw_file);
3450                 return rc;
3451         }
3452
3453         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3454         if (rc) {
3455                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3456                        rv2p_fw_file);
3457                 return rc;
3458         }
3459         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3460         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3461         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3462             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3463             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3464             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3465             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3466             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3467                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3468                        mips_fw_file);
3469                 return -EINVAL;
3470         }
3471         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3472             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3473             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3474                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3475                        rv2p_fw_file);
3476                 return -EINVAL;
3477         }
3478
3479         return 0;
3480 }
3481
3482 static u32
3483 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3484 {
3485         switch (idx) {
3486         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3487                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3488                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3489                 break;
3490         }
3491         return rv2p_code;
3492 }
3493
3494 static int
3495 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3496              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3497 {
3498         u32 rv2p_code_len, file_offset;
3499         __be32 *rv2p_code;
3500         int i;
3501         u32 val, cmd, addr;
3502
3503         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3504         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3505
3506         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3507
3508         if (rv2p_proc == RV2P_PROC1) {
3509                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3510                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3511         } else {
3512                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3513                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3514         }
3515
3516         for (i = 0; i < rv2p_code_len; i += 8) {
3517                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3518                 rv2p_code++;
3519                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3520                 rv2p_code++;
3521
3522                 val = (i / 8) | cmd;
3523                 REG_WR(bp, addr, val);
3524         }
3525
3526         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3527         for (i = 0; i < 8; i++) {
3528                 u32 loc, code;
3529
3530                 loc = be32_to_cpu(fw_entry->fixup[i]);
3531                 if (loc && ((loc * 4) < rv2p_code_len)) {
3532                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3533                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3534                         code = be32_to_cpu(*(rv2p_code + loc));
3535                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3536                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3537
3538                         val = (loc / 2) | cmd;
3539                         REG_WR(bp, addr, val);
3540                 }
3541         }
3542
3543         /* Reset the processor, un-stall is done later. */
3544         if (rv2p_proc == RV2P_PROC1) {
3545                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3546         }
3547         else {
3548                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3549         }
3550
3551         return 0;
3552 }
3553
3554 static int
3555 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3556             const struct bnx2_mips_fw_file_entry *fw_entry)
3557 {
3558         u32 addr, len, file_offset;
3559         __be32 *data;
3560         u32 offset;
3561         u32 val;
3562
3563         /* Halt the CPU. */
3564         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3565         val |= cpu_reg->mode_value_halt;
3566         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3567         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3568
3569         /* Load the Text area. */
3570         addr = be32_to_cpu(fw_entry->text.addr);
3571         len = be32_to_cpu(fw_entry->text.len);
3572         file_offset = be32_to_cpu(fw_entry->text.offset);
3573         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3574
3575         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3576         if (len) {
3577                 int j;
3578
3579                 for (j = 0; j < (len / 4); j++, offset += 4)
3580                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3581         }
3582
3583         /* Load the Data area. */
3584         addr = be32_to_cpu(fw_entry->data.addr);
3585         len = be32_to_cpu(fw_entry->data.len);
3586         file_offset = be32_to_cpu(fw_entry->data.offset);
3587         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3588
3589         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3590         if (len) {
3591                 int j;
3592
3593                 for (j = 0; j < (len / 4); j++, offset += 4)
3594                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3595         }
3596
3597         /* Load the Read-Only area. */
3598         addr = be32_to_cpu(fw_entry->rodata.addr);
3599         len = be32_to_cpu(fw_entry->rodata.len);
3600         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3601         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3602
3603         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3604         if (len) {
3605                 int j;
3606
3607                 for (j = 0; j < (len / 4); j++, offset += 4)
3608                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3609         }
3610
3611         /* Clear the pre-fetch instruction. */
3612         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3613
3614         val = be32_to_cpu(fw_entry->start_addr);
3615         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3616
3617         /* Start the CPU. */
3618         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3619         val &= ~cpu_reg->mode_value_halt;
3620         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3621         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3622
3623         return 0;
3624 }
3625
3626 static int
3627 bnx2_init_cpus(struct bnx2 *bp)
3628 {
3629         const struct bnx2_mips_fw_file *mips_fw =
3630                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3631         const struct bnx2_rv2p_fw_file *rv2p_fw =
3632                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3633         int rc;
3634
3635         /* Initialize the RV2P processor. */
3636         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3637         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3638
3639         /* Initialize the RX Processor. */
3640         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3641         if (rc)
3642                 goto init_cpu_err;
3643
3644         /* Initialize the TX Processor. */
3645         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3646         if (rc)
3647                 goto init_cpu_err;
3648
3649         /* Initialize the TX Patch-up Processor. */
3650         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3651         if (rc)
3652                 goto init_cpu_err;
3653
3654         /* Initialize the Completion Processor. */
3655         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3656         if (rc)
3657                 goto init_cpu_err;
3658
3659         /* Initialize the Command Processor. */
3660         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3661
3662 init_cpu_err:
3663         return rc;
3664 }
3665
3666 static int
3667 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3668 {
3669         u16 pmcsr;
3670
3671         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3672
3673         switch (state) {
3674         case PCI_D0: {
3675                 u32 val;
3676
3677                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3678                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3679                         PCI_PM_CTRL_PME_STATUS);
3680
3681                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3682                         /* delay required during transition out of D3hot */
3683                         msleep(20);
3684
3685                 val = REG_RD(bp, BNX2_EMAC_MODE);
3686                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3687                 val &= ~BNX2_EMAC_MODE_MPKT;
3688                 REG_WR(bp, BNX2_EMAC_MODE, val);
3689
3690                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3691                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3692                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3693                 break;
3694         }
3695         case PCI_D3hot: {
3696                 int i;
3697                 u32 val, wol_msg;
3698
3699                 if (bp->wol) {
3700                         u32 advertising;
3701                         u8 autoneg;
3702
3703                         autoneg = bp->autoneg;
3704                         advertising = bp->advertising;
3705
3706                         if (bp->phy_port == PORT_TP) {
3707                                 bp->autoneg = AUTONEG_SPEED;
3708                                 bp->advertising = ADVERTISED_10baseT_Half |
3709                                         ADVERTISED_10baseT_Full |
3710                                         ADVERTISED_100baseT_Half |
3711                                         ADVERTISED_100baseT_Full |
3712                                         ADVERTISED_Autoneg;
3713                         }
3714
3715                         spin_lock_bh(&bp->phy_lock);
3716                         bnx2_setup_phy(bp, bp->phy_port);
3717                         spin_unlock_bh(&bp->phy_lock);
3718
3719                         bp->autoneg = autoneg;
3720                         bp->advertising = advertising;
3721
3722                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3723
3724                         val = REG_RD(bp, BNX2_EMAC_MODE);
3725
3726                         /* Enable port mode. */
3727                         val &= ~BNX2_EMAC_MODE_PORT;
3728                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3729                                BNX2_EMAC_MODE_ACPI_RCVD |
3730                                BNX2_EMAC_MODE_MPKT;
3731                         if (bp->phy_port == PORT_TP)
3732                                 val |= BNX2_EMAC_MODE_PORT_MII;
3733                         else {
3734                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3735                                 if (bp->line_speed == SPEED_2500)
3736                                         val |= BNX2_EMAC_MODE_25G_MODE;
3737                         }
3738
3739                         REG_WR(bp, BNX2_EMAC_MODE, val);
3740
3741                         /* receive all multicast */
3742                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3743                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3744                                        0xffffffff);
3745                         }
3746                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3747                                BNX2_EMAC_RX_MODE_SORT_MODE);
3748
3749                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3750                               BNX2_RPM_SORT_USER0_MC_EN;
3751                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3752                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3753                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3754                                BNX2_RPM_SORT_USER0_ENA);
3755
3756                         /* Need to enable EMAC and RPM for WOL. */
3757                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3758                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3759                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3760                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3761
3762                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3763                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3764                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3765
3766                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3767                 }
3768                 else {
3769                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3770                 }
3771
3772                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3773                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3774                                      1, 0);
3775
3776                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3777                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3778                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3779
3780                         if (bp->wol)
3781                                 pmcsr |= 3;
3782                 }
3783                 else {
3784                         pmcsr |= 3;
3785                 }
3786                 if (bp->wol) {
3787                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3788                 }
3789                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3790                                       pmcsr);
3791
3792                 /* No more memory access after this point until
3793                  * device is brought back to D0.
3794                  */
3795                 udelay(50);
3796                 break;
3797         }
3798         default:
3799                 return -EINVAL;
3800         }
3801         return 0;
3802 }
3803
3804 static int
3805 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3806 {
3807         u32 val;
3808         int j;
3809
3810         /* Request access to the flash interface. */
3811         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3812         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3813                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3814                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3815                         break;
3816
3817                 udelay(5);
3818         }
3819
3820         if (j >= NVRAM_TIMEOUT_COUNT)
3821                 return -EBUSY;
3822
3823         return 0;
3824 }
3825
3826 static int
3827 bnx2_release_nvram_lock(struct bnx2 *bp)
3828 {
3829         int j;
3830         u32 val;
3831
3832         /* Relinquish nvram interface. */
3833         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3834
3835         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3836                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3837                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3838                         break;
3839
3840                 udelay(5);
3841         }
3842
3843         if (j >= NVRAM_TIMEOUT_COUNT)
3844                 return -EBUSY;
3845
3846         return 0;
3847 }
3848
3849
3850 static int
3851 bnx2_enable_nvram_write(struct bnx2 *bp)
3852 {
3853         u32 val;
3854
3855         val = REG_RD(bp, BNX2_MISC_CFG);
3856         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3857
3858         if (bp->flash_info->flags & BNX2_NV_WREN) {
3859                 int j;
3860
3861                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3862                 REG_WR(bp, BNX2_NVM_COMMAND,
3863                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3864
3865                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3866                         udelay(5);
3867
3868                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3869                         if (val & BNX2_NVM_COMMAND_DONE)
3870                                 break;
3871                 }
3872
3873                 if (j >= NVRAM_TIMEOUT_COUNT)
3874                         return -EBUSY;
3875         }
3876         return 0;
3877 }
3878
3879 static void
3880 bnx2_disable_nvram_write(struct bnx2 *bp)
3881 {
3882         u32 val;
3883
3884         val = REG_RD(bp, BNX2_MISC_CFG);
3885         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3886 }
3887
3888
3889 static void
3890 bnx2_enable_nvram_access(struct bnx2 *bp)
3891 {
3892         u32 val;
3893
3894         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3895         /* Enable both bits, even on read. */
3896         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3897                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3898 }
3899
3900 static void
3901 bnx2_disable_nvram_access(struct bnx2 *bp)
3902 {
3903         u32 val;
3904
3905         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3906         /* Disable both bits, even after read. */
3907         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3908                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3909                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3910 }
3911
3912 static int
3913 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3914 {
3915         u32 cmd;
3916         int j;
3917
3918         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3919                 /* Buffered flash, no erase needed */
3920                 return 0;
3921
3922         /* Build an erase command */
3923         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3924               BNX2_NVM_COMMAND_DOIT;
3925
3926         /* Need to clear DONE bit separately. */
3927         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3928
3929         /* Address of the NVRAM to read from. */
3930         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3931
3932         /* Issue an erase command. */
3933         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3934
3935         /* Wait for completion. */
3936         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3937                 u32 val;
3938
3939                 udelay(5);
3940
3941                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3942                 if (val & BNX2_NVM_COMMAND_DONE)
3943                         break;
3944         }
3945
3946         if (j >= NVRAM_TIMEOUT_COUNT)
3947                 return -EBUSY;
3948
3949         return 0;
3950 }
3951
3952 static int
3953 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3954 {
3955         u32 cmd;
3956         int j;
3957
3958         /* Build the command word. */
3959         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3960
3961         /* Calculate an offset of a buffered flash, not needed for 5709. */
3962         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3963                 offset = ((offset / bp->flash_info->page_size) <<
3964                            bp->flash_info->page_bits) +
3965                           (offset % bp->flash_info->page_size);
3966         }
3967
3968         /* Need to clear DONE bit separately. */
3969         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3970
3971         /* Address of the NVRAM to read from. */
3972         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3973
3974         /* Issue a read command. */
3975         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3976
3977         /* Wait for completion. */
3978         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3979                 u32 val;
3980
3981                 udelay(5);
3982
3983                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3984                 if (val & BNX2_NVM_COMMAND_DONE) {
3985                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3986                         memcpy(ret_val, &v, 4);
3987                         break;
3988                 }
3989         }
3990         if (j >= NVRAM_TIMEOUT_COUNT)
3991                 return -EBUSY;
3992
3993         return 0;
3994 }
3995
3996
3997 static int
3998 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3999 {
4000         u32 cmd;
4001         __be32 val32;
4002         int j;
4003
4004         /* Build the command word. */
4005         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4006
4007         /* Calculate an offset of a buffered flash, not needed for 5709. */
4008         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4009                 offset = ((offset / bp->flash_info->page_size) <<
4010                           bp->flash_info->page_bits) +
4011                          (offset % bp->flash_info->page_size);
4012         }
4013
4014         /* Need to clear DONE bit separately. */
4015         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4016
4017         memcpy(&val32, val, 4);
4018
4019         /* Write the data. */
4020         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4021
4022         /* Address of the NVRAM to write to. */
4023         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4024
4025         /* Issue the write command. */
4026         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4027
4028         /* Wait for completion. */
4029         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4030                 udelay(5);
4031
4032                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4033                         break;
4034         }
4035         if (j >= NVRAM_TIMEOUT_COUNT)
4036                 return -EBUSY;
4037
4038         return 0;
4039 }
4040
4041 static int
4042 bnx2_init_nvram(struct bnx2 *bp)
4043 {
4044         u32 val;
4045         int j, entry_count, rc = 0;
4046         struct flash_spec *flash;
4047
4048         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4049                 bp->flash_info = &flash_5709;
4050                 goto get_flash_size;
4051         }
4052
4053         /* Determine the selected interface. */
4054         val = REG_RD(bp, BNX2_NVM_CFG1);
4055
4056         entry_count = ARRAY_SIZE(flash_table);
4057
4058         if (val & 0x40000000) {
4059
4060                 /* Flash interface has been reconfigured */
4061                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4062                      j++, flash++) {
4063                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4064                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4065                                 bp->flash_info = flash;
4066                                 break;
4067                         }
4068                 }
4069         }
4070         else {
4071                 u32 mask;
4072                 /* Not yet been reconfigured */
4073
4074                 if (val & (1 << 23))
4075                         mask = FLASH_BACKUP_STRAP_MASK;
4076                 else
4077                         mask = FLASH_STRAP_MASK;
4078
4079                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4080                         j++, flash++) {
4081
4082                         if ((val & mask) == (flash->strapping & mask)) {
4083                                 bp->flash_info = flash;
4084
4085                                 /* Request access to the flash interface. */
4086                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4087                                         return rc;
4088
4089                                 /* Enable access to flash interface */
4090                                 bnx2_enable_nvram_access(bp);
4091
4092                                 /* Reconfigure the flash interface */
4093                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4094                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4095                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4096                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4097
4098                                 /* Disable access to flash interface */
4099                                 bnx2_disable_nvram_access(bp);
4100                                 bnx2_release_nvram_lock(bp);
4101
4102                                 break;
4103                         }
4104                 }
4105         } /* if (val & 0x40000000) */
4106
4107         if (j == entry_count) {
4108                 bp->flash_info = NULL;
4109                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4110                 return -ENODEV;
4111         }
4112
4113 get_flash_size:
4114         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4115         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4116         if (val)
4117                 bp->flash_size = val;
4118         else
4119                 bp->flash_size = bp->flash_info->total_size;
4120
4121         return rc;
4122 }
4123
4124 static int
4125 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4126                 int buf_size)
4127 {
4128         int rc = 0;
4129         u32 cmd_flags, offset32, len32, extra;
4130
4131         if (buf_size == 0)
4132                 return 0;
4133
4134         /* Request access to the flash interface. */
4135         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4136                 return rc;
4137
4138         /* Enable access to flash interface */
4139         bnx2_enable_nvram_access(bp);
4140
4141         len32 = buf_size;
4142         offset32 = offset;
4143         extra = 0;
4144
4145         cmd_flags = 0;
4146
4147         if (offset32 & 3) {
4148                 u8 buf[4];
4149                 u32 pre_len;
4150
4151                 offset32 &= ~3;
4152                 pre_len = 4 - (offset & 3);
4153
4154                 if (pre_len >= len32) {
4155                         pre_len = len32;
4156                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4157                                     BNX2_NVM_COMMAND_LAST;
4158                 }
4159                 else {
4160                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4161                 }
4162
4163                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4164
4165                 if (rc)
4166                         return rc;
4167
4168                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4169
4170                 offset32 += 4;
4171                 ret_buf += pre_len;
4172                 len32 -= pre_len;
4173         }
4174         if (len32 & 3) {
4175                 extra = 4 - (len32 & 3);
4176                 len32 = (len32 + 4) & ~3;
4177         }
4178
4179         if (len32 == 4) {
4180                 u8 buf[4];
4181
4182                 if (cmd_flags)
4183                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4184                 else
4185                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4186                                     BNX2_NVM_COMMAND_LAST;
4187
4188                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4189
4190                 memcpy(ret_buf, buf, 4 - extra);
4191         }
4192         else if (len32 > 0) {
4193                 u8 buf[4];
4194
4195                 /* Read the first word. */
4196                 if (cmd_flags)
4197                         cmd_flags = 0;
4198                 else
4199                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4200
4201                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4202
4203                 /* Advance to the next dword. */
4204                 offset32 += 4;
4205                 ret_buf += 4;
4206                 len32 -= 4;
4207
4208                 while (len32 > 4 && rc == 0) {
4209                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4210
4211                         /* Advance to the next dword. */
4212                         offset32 += 4;
4213                         ret_buf += 4;
4214                         len32 -= 4;
4215                 }
4216
4217                 if (rc)
4218                         return rc;
4219
4220                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4221                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4222
4223                 memcpy(ret_buf, buf, 4 - extra);
4224         }
4225
4226         /* Disable access to flash interface */
4227         bnx2_disable_nvram_access(bp);
4228
4229         bnx2_release_nvram_lock(bp);
4230
4231         return rc;
4232 }
4233
4234 static int
4235 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4236                 int buf_size)
4237 {
4238         u32 written, offset32, len32;
4239         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4240         int rc = 0;
4241         int align_start, align_end;
4242
4243         buf = data_buf;
4244         offset32 = offset;
4245         len32 = buf_size;
4246         align_start = align_end = 0;
4247
4248         if ((align_start = (offset32 & 3))) {
4249                 offset32 &= ~3;
4250                 len32 += align_start;
4251                 if (len32 < 4)
4252                         len32 = 4;
4253                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4254                         return rc;
4255         }
4256
4257         if (len32 & 3) {
4258                 align_end = 4 - (len32 & 3);
4259                 len32 += align_end;
4260                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4261                         return rc;
4262         }
4263
4264         if (align_start || align_end) {
4265                 align_buf = kmalloc(len32, GFP_KERNEL);
4266                 if (align_buf == NULL)
4267                         return -ENOMEM;
4268                 if (align_start) {
4269                         memcpy(align_buf, start, 4);
4270                 }
4271                 if (align_end) {
4272                         memcpy(align_buf + len32 - 4, end, 4);
4273                 }
4274                 memcpy(align_buf + align_start, data_buf, buf_size);
4275                 buf = align_buf;
4276         }
4277
4278         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4279                 flash_buffer = kmalloc(264, GFP_KERNEL);
4280                 if (flash_buffer == NULL) {
4281                         rc = -ENOMEM;
4282                         goto nvram_write_end;
4283                 }
4284         }
4285
4286         written = 0;
4287         while ((written < len32) && (rc == 0)) {
4288                 u32 page_start, page_end, data_start, data_end;
4289                 u32 addr, cmd_flags;
4290                 int i;
4291
4292                 /* Find the page_start addr */
4293                 page_start = offset32 + written;
4294                 page_start -= (page_start % bp->flash_info->page_size);
4295                 /* Find the page_end addr */
4296                 page_end = page_start + bp->flash_info->page_size;
4297                 /* Find the data_start addr */
4298                 data_start = (written == 0) ? offset32 : page_start;
4299                 /* Find the data_end addr */
4300                 data_end = (page_end > offset32 + len32) ?
4301                         (offset32 + len32) : page_end;
4302
4303                 /* Request access to the flash interface. */
4304                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4305                         goto nvram_write_end;
4306
4307                 /* Enable access to flash interface */
4308                 bnx2_enable_nvram_access(bp);
4309
4310                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4311                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4312                         int j;
4313
4314                         /* Read the whole page into the buffer
4315                          * (non-buffer flash only) */
4316                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4317                                 if (j == (bp->flash_info->page_size - 4)) {
4318                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4319                                 }
4320                                 rc = bnx2_nvram_read_dword(bp,
4321                                         page_start + j,
4322                                         &flash_buffer[j],
4323                                         cmd_flags);
4324
4325                                 if (rc)
4326                                         goto nvram_write_end;
4327
4328                                 cmd_flags = 0;
4329                         }
4330                 }
4331
4332                 /* Enable writes to flash interface (unlock write-protect) */
4333                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4334                         goto nvram_write_end;
4335
4336                 /* Loop to write back the buffer data from page_start to
4337                  * data_start */
4338                 i = 0;
4339                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4340                         /* Erase the page */
4341                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4342                                 goto nvram_write_end;
4343
4344                         /* Re-enable the write again for the actual write */
4345                         bnx2_enable_nvram_write(bp);
4346
4347                         for (addr = page_start; addr < data_start;
4348                                 addr += 4, i += 4) {
4349
4350                                 rc = bnx2_nvram_write_dword(bp, addr,
4351                                         &flash_buffer[i], cmd_flags);
4352
4353                                 if (rc != 0)
4354                                         goto nvram_write_end;
4355
4356                                 cmd_flags = 0;
4357                         }
4358                 }
4359
4360                 /* Loop to write the new data from data_start to data_end */
4361                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4362                         if ((addr == page_end - 4) ||
4363                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4364                                  (addr == data_end - 4))) {
4365
4366                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4367                         }
4368                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4369                                 cmd_flags);
4370
4371                         if (rc != 0)
4372                                 goto nvram_write_end;
4373
4374                         cmd_flags = 0;
4375                         buf += 4;
4376                 }
4377
4378                 /* Loop to write back the buffer data from data_end
4379                  * to page_end */
4380                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4381                         for (addr = data_end; addr < page_end;
4382                                 addr += 4, i += 4) {
4383
4384                                 if (addr == page_end-4) {
4385                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4386                                 }
4387                                 rc = bnx2_nvram_write_dword(bp, addr,
4388                                         &flash_buffer[i], cmd_flags);
4389
4390                                 if (rc != 0)
4391                                         goto nvram_write_end;
4392
4393                                 cmd_flags = 0;
4394                         }
4395                 }
4396
4397                 /* Disable writes to flash interface (lock write-protect) */
4398                 bnx2_disable_nvram_write(bp);
4399
4400                 /* Disable access to flash interface */
4401                 bnx2_disable_nvram_access(bp);
4402                 bnx2_release_nvram_lock(bp);
4403
4404                 /* Increment written */
4405                 written += data_end - data_start;
4406         }
4407
4408 nvram_write_end:
4409         kfree(flash_buffer);
4410         kfree(align_buf);
4411         return rc;
4412 }
4413
4414 static void
4415 bnx2_init_fw_cap(struct bnx2 *bp)
4416 {
4417         u32 val, sig = 0;
4418
4419         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4420         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4421
4422         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4423                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4424
4425         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4426         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4427                 return;
4428
4429         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4430                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4431                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4432         }
4433
4434         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4435             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4436                 u32 link;
4437
4438                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4439
4440                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4441                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4442                         bp->phy_port = PORT_FIBRE;
4443                 else
4444                         bp->phy_port = PORT_TP;
4445
4446                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4447                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4448         }
4449
4450         if (netif_running(bp->dev) && sig)
4451                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4452 }
4453
4454 static void
4455 bnx2_setup_msix_tbl(struct bnx2 *bp)
4456 {
4457         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4458
4459         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4460         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4461 }
4462
4463 static int
4464 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4465 {
4466         u32 val;
4467         int i, rc = 0;
4468         u8 old_port;
4469
4470         /* Wait for the current PCI transaction to complete before
4471          * issuing a reset. */
4472         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4473                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4474                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4475                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4476                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4477         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4478         udelay(5);
4479
4480         /* Wait for the firmware to tell us it is ok to issue a reset. */
4481         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4482
4483         /* Deposit a driver reset signature so the firmware knows that
4484          * this is a soft reset. */
4485         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4486                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4487
4488         /* Do a dummy read to force the chip to complete all current transaction
4489          * before we issue a reset. */
4490         val = REG_RD(bp, BNX2_MISC_ID);
4491
4492         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4493                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4494                 REG_RD(bp, BNX2_MISC_COMMAND);
4495                 udelay(5);
4496
4497                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4498                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4499
4500                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4501
4502         } else {
4503                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4504                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4505                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4506
4507                 /* Chip reset. */
4508                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4509
4510                 /* Reading back any register after chip reset will hang the
4511                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4512                  * of margin for write posting.
4513                  */
4514                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4515                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4516                         msleep(20);
4517
4518                 /* Reset takes approximate 30 usec */
4519                 for (i = 0; i < 10; i++) {
4520                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4521                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4522                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4523                                 break;
4524                         udelay(10);
4525                 }
4526
4527                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4528                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4529                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4530                         return -EBUSY;
4531                 }
4532         }
4533
4534         /* Make sure byte swapping is properly configured. */
4535         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4536         if (val != 0x01020304) {
4537                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4538                 return -ENODEV;
4539         }
4540
4541         /* Wait for the firmware to finish its initialization. */
4542         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4543         if (rc)
4544                 return rc;
4545
4546         spin_lock_bh(&bp->phy_lock);
4547         old_port = bp->phy_port;
4548         bnx2_init_fw_cap(bp);
4549         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4550             old_port != bp->phy_port)
4551                 bnx2_set_default_remote_link(bp);
4552         spin_unlock_bh(&bp->phy_lock);
4553
4554         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4555                 /* Adjust the voltage regular to two steps lower.  The default
4556                  * of this register is 0x0000000e. */
4557                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4558
4559                 /* Remove bad rbuf memory from the free pool. */
4560                 rc = bnx2_alloc_bad_rbuf(bp);
4561         }
4562
4563         if (bp->flags & BNX2_FLAG_USING_MSIX)
4564                 bnx2_setup_msix_tbl(bp);
4565
4566         return rc;
4567 }
4568
4569 static int
4570 bnx2_init_chip(struct bnx2 *bp)
4571 {
4572         u32 val, mtu;
4573         int rc, i;
4574
4575         /* Make sure the interrupt is not active. */
4576         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4577
4578         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4579               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4580 #ifdef __BIG_ENDIAN
4581               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4582 #endif
4583               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4584               DMA_READ_CHANS << 12 |
4585               DMA_WRITE_CHANS << 16;
4586
4587         val |= (0x2 << 20) | (1 << 11);
4588
4589         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4590                 val |= (1 << 23);
4591
4592         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4593             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4594                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4595
4596         REG_WR(bp, BNX2_DMA_CONFIG, val);
4597
4598         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4599                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4600                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4601                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4602         }
4603
4604         if (bp->flags & BNX2_FLAG_PCIX) {
4605                 u16 val16;
4606
4607                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4608                                      &val16);
4609                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4610                                       val16 & ~PCI_X_CMD_ERO);
4611         }
4612
4613         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4614                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4615                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4616                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4617
4618         /* Initialize context mapping and zero out the quick contexts.  The
4619          * context block must have already been enabled. */
4620         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4621                 rc = bnx2_init_5709_context(bp);
4622                 if (rc)
4623                         return rc;
4624         } else
4625                 bnx2_init_context(bp);
4626
4627         if ((rc = bnx2_init_cpus(bp)) != 0)
4628                 return rc;
4629
4630         bnx2_init_nvram(bp);
4631
4632         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4633
4634         val = REG_RD(bp, BNX2_MQ_CONFIG);
4635         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4636         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4637         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4638                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4639
4640         REG_WR(bp, BNX2_MQ_CONFIG, val);
4641
4642         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4643         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4644         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4645
4646         val = (BCM_PAGE_BITS - 8) << 24;
4647         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4648
4649         /* Configure page size. */
4650         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4651         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4652         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4653         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4654
4655         val = bp->mac_addr[0] +
4656               (bp->mac_addr[1] << 8) +
4657               (bp->mac_addr[2] << 16) +
4658               bp->mac_addr[3] +
4659               (bp->mac_addr[4] << 8) +
4660               (bp->mac_addr[5] << 16);
4661         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4662
4663         /* Program the MTU.  Also include 4 bytes for CRC32. */
4664         mtu = bp->dev->mtu;
4665         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4666         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4667                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4668         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4669
4670         if (mtu < 1500)
4671                 mtu = 1500;
4672
4673         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4674         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4675         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4676
4677         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4678                 bp->bnx2_napi[i].last_status_idx = 0;
4679
4680         bp->idle_chk_status_idx = 0xffff;
4681
4682         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4683
4684         /* Set up how to generate a link change interrupt. */
4685         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4686
4687         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4688                (u64) bp->status_blk_mapping & 0xffffffff);
4689         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4690
4691         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4692                (u64) bp->stats_blk_mapping & 0xffffffff);
4693         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4694                (u64) bp->stats_blk_mapping >> 32);
4695
4696         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4697                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4698
4699         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4700                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4701
4702         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4703                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4704
4705         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4706
4707         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4708
4709         REG_WR(bp, BNX2_HC_COM_TICKS,
4710                (bp->com_ticks_int << 16) | bp->com_ticks);
4711
4712         REG_WR(bp, BNX2_HC_CMD_TICKS,
4713                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4714
4715         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4716                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4717         else
4718                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4719         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4720
4721         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4722                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4723         else {
4724                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4725                       BNX2_HC_CONFIG_COLLECT_STATS;
4726         }
4727
4728         if (bp->irq_nvecs > 1) {
4729                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4730                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4731
4732                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4733         }
4734
4735         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4736                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4737
4738         REG_WR(bp, BNX2_HC_CONFIG, val);
4739
4740         for (i = 1; i < bp->irq_nvecs; i++) {
4741                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4742                            BNX2_HC_SB_CONFIG_1;
4743
4744                 REG_WR(bp, base,
4745                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4746                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4747                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4748
4749                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4750                         (bp->tx_quick_cons_trip_int << 16) |
4751                          bp->tx_quick_cons_trip);
4752
4753                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4754                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4755
4756                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4757                        (bp->rx_quick_cons_trip_int << 16) |
4758                         bp->rx_quick_cons_trip);
4759
4760                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4761                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4762         }
4763
4764         /* Clear internal stats counters. */
4765         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4766
4767         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4768
4769         /* Initialize the receive filter. */
4770         bnx2_set_rx_mode(bp->dev);
4771
4772         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4773                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4774                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4775                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4776         }
4777         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4778                           1, 0);
4779
4780         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4781         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4782
4783         udelay(20);
4784
4785         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4786
4787         return rc;
4788 }
4789
4790 static void
4791 bnx2_clear_ring_states(struct bnx2 *bp)
4792 {
4793         struct bnx2_napi *bnapi;
4794         struct bnx2_tx_ring_info *txr;
4795         struct bnx2_rx_ring_info *rxr;
4796         int i;
4797
4798         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4799                 bnapi = &bp->bnx2_napi[i];
4800                 txr = &bnapi->tx_ring;
4801                 rxr = &bnapi->rx_ring;
4802
4803                 txr->tx_cons = 0;
4804                 txr->hw_tx_cons = 0;
4805                 rxr->rx_prod_bseq = 0;
4806                 rxr->rx_prod = 0;
4807                 rxr->rx_cons = 0;
4808                 rxr->rx_pg_prod = 0;
4809                 rxr->rx_pg_cons = 0;
4810         }
4811 }
4812
4813 static void
4814 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4815 {
4816         u32 val, offset0, offset1, offset2, offset3;
4817         u32 cid_addr = GET_CID_ADDR(cid);
4818
4819         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4820                 offset0 = BNX2_L2CTX_TYPE_XI;
4821                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4822                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4823                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4824         } else {
4825                 offset0 = BNX2_L2CTX_TYPE;
4826                 offset1 = BNX2_L2CTX_CMD_TYPE;
4827                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4828                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4829         }
4830         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4831         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4832
4833         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4834         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4835
4836         val = (u64) txr->tx_desc_mapping >> 32;
4837         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4838
4839         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4840         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4841 }
4842
4843 static void
4844 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4845 {
4846         struct tx_bd *txbd;
4847         u32 cid = TX_CID;
4848         struct bnx2_napi *bnapi;
4849         struct bnx2_tx_ring_info *txr;
4850
4851         bnapi = &bp->bnx2_napi[ring_num];
4852         txr = &bnapi->tx_ring;
4853
4854         if (ring_num == 0)
4855                 cid = TX_CID;
4856         else
4857                 cid = TX_TSS_CID + ring_num - 1;
4858
4859         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4860
4861         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4862
4863         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4864         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4865
4866         txr->tx_prod = 0;
4867         txr->tx_prod_bseq = 0;
4868
4869         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4870         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4871
4872         bnx2_init_tx_context(bp, cid, txr);
4873 }
4874
4875 static void
4876 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4877                      int num_rings)
4878 {
4879         int i;
4880         struct rx_bd *rxbd;
4881
4882         for (i = 0; i < num_rings; i++) {
4883                 int j;
4884
4885                 rxbd = &rx_ring[i][0];
4886                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4887                         rxbd->rx_bd_len = buf_size;
4888                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4889                 }
4890                 if (i == (num_rings - 1))
4891                         j = 0;
4892                 else
4893                         j = i + 1;
4894                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4895                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4896         }
4897 }
4898
4899 static void
4900 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4901 {
4902         int i;
4903         u16 prod, ring_prod;
4904         u32 cid, rx_cid_addr, val;
4905         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4906         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4907
4908         if (ring_num == 0)
4909                 cid = RX_CID;
4910         else
4911                 cid = RX_RSS_CID + ring_num - 1;
4912
4913         rx_cid_addr = GET_CID_ADDR(cid);
4914
4915         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4916                              bp->rx_buf_use_size, bp->rx_max_ring);
4917
4918         bnx2_init_rx_context(bp, cid);
4919
4920         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4921                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4922                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4923         }
4924
4925         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4926         if (bp->rx_pg_ring_size) {
4927                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4928                                      rxr->rx_pg_desc_mapping,
4929                                      PAGE_SIZE, bp->rx_max_pg_ring);
4930                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4931                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4932                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4933                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4934
4935                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4936                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4937
4938                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4939                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4940
4941                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4942                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4943         }
4944
4945         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4946         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4947
4948         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4949         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4950
4951         ring_prod = prod = rxr->rx_pg_prod;
4952         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4953                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4954                         break;
4955                 prod = NEXT_RX_BD(prod);
4956                 ring_prod = RX_PG_RING_IDX(prod);
4957         }
4958         rxr->rx_pg_prod = prod;
4959
4960         ring_prod = prod = rxr->rx_prod;
4961         for (i = 0; i < bp->rx_ring_size; i++) {
4962                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4963                         break;
4964                 prod = NEXT_RX_BD(prod);
4965                 ring_prod = RX_RING_IDX(prod);
4966         }
4967         rxr->rx_prod = prod;
4968
4969         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4970         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4971         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4972
4973         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4974         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4975
4976         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4977 }
4978
4979 static void
4980 bnx2_init_all_rings(struct bnx2 *bp)
4981 {
4982         int i;
4983         u32 val;
4984
4985         bnx2_clear_ring_states(bp);
4986
4987         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4988         for (i = 0; i < bp->num_tx_rings; i++)
4989                 bnx2_init_tx_ring(bp, i);
4990
4991         if (bp->num_tx_rings > 1)
4992                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4993                        (TX_TSS_CID << 7));
4994
4995         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4996         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4997
4998         for (i = 0; i < bp->num_rx_rings; i++)
4999                 bnx2_init_rx_ring(bp, i);
5000
5001         if (bp->num_rx_rings > 1) {
5002                 u32 tbl_32;
5003                 u8 *tbl = (u8 *) &tbl_32;
5004
5005                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5006                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5007
5008                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5009                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5010                         if ((i % 4) == 3)
5011                                 bnx2_reg_wr_ind(bp,
5012                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5013                                                 cpu_to_be32(tbl_32));
5014                 }
5015
5016                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5017                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5018
5019                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5020
5021         }
5022 }
5023
5024 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5025 {
5026         u32 max, num_rings = 1;
5027
5028         while (ring_size > MAX_RX_DESC_CNT) {
5029                 ring_size -= MAX_RX_DESC_CNT;
5030                 num_rings++;
5031         }
5032         /* round to next power of 2 */
5033         max = max_size;
5034         while ((max & num_rings) == 0)
5035                 max >>= 1;
5036
5037         if (num_rings != max)
5038                 max <<= 1;
5039
5040         return max;
5041 }
5042
5043 static void
5044 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5045 {
5046         u32 rx_size, rx_space, jumbo_size;
5047
5048         /* 8 for CRC and VLAN */
5049         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5050
5051         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5052                 sizeof(struct skb_shared_info);
5053
5054         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5055         bp->rx_pg_ring_size = 0;
5056         bp->rx_max_pg_ring = 0;
5057         bp->rx_max_pg_ring_idx = 0;
5058         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5059                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5060
5061                 jumbo_size = size * pages;
5062                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5063                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5064
5065                 bp->rx_pg_ring_size = jumbo_size;
5066                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5067                                                         MAX_RX_PG_RINGS);
5068                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5069                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5070                 bp->rx_copy_thresh = 0;
5071         }
5072
5073         bp->rx_buf_use_size = rx_size;
5074         /* hw alignment */
5075         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5076         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5077         bp->rx_ring_size = size;
5078         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5079         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5080 }
5081
5082 static void
5083 bnx2_free_tx_skbs(struct bnx2 *bp)
5084 {
5085         int i;
5086
5087         for (i = 0; i < bp->num_tx_rings; i++) {
5088                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5089                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5090                 int j;
5091
5092                 if (txr->tx_buf_ring == NULL)
5093                         continue;
5094
5095                 for (j = 0; j < TX_DESC_CNT; ) {
5096                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5097                         struct sk_buff *skb = tx_buf->skb;
5098
5099                         if (skb == NULL) {
5100                                 j++;
5101                                 continue;
5102                         }
5103
5104                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5105
5106                         tx_buf->skb = NULL;
5107
5108                         j += skb_shinfo(skb)->nr_frags + 1;
5109                         dev_kfree_skb(skb);
5110                 }
5111         }
5112 }
5113
5114 static void
5115 bnx2_free_rx_skbs(struct bnx2 *bp)
5116 {
5117         int i;
5118
5119         for (i = 0; i < bp->num_rx_rings; i++) {
5120                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5121                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5122                 int j;
5123
5124                 if (rxr->rx_buf_ring == NULL)
5125                         return;
5126
5127                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5128                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5129                         struct sk_buff *skb = rx_buf->skb;
5130
5131                         if (skb == NULL)
5132                                 continue;
5133
5134                         pci_unmap_single(bp->pdev,
5135                                          pci_unmap_addr(rx_buf, mapping),
5136                                          bp->rx_buf_use_size,
5137                                          PCI_DMA_FROMDEVICE);
5138
5139                         rx_buf->skb = NULL;
5140
5141                         dev_kfree_skb(skb);
5142                 }
5143                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5144                         bnx2_free_rx_page(bp, rxr, j);
5145         }
5146 }
5147
5148 static void
5149 bnx2_free_skbs(struct bnx2 *bp)
5150 {
5151         bnx2_free_tx_skbs(bp);
5152         bnx2_free_rx_skbs(bp);
5153 }
5154
5155 static int
5156 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5157 {
5158         int rc;
5159
5160         rc = bnx2_reset_chip(bp, reset_code);
5161         bnx2_free_skbs(bp);
5162         if (rc)
5163                 return rc;
5164
5165         if ((rc = bnx2_init_chip(bp)) != 0)
5166                 return rc;
5167
5168         bnx2_init_all_rings(bp);
5169         return 0;
5170 }
5171
5172 static int
5173 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5174 {
5175         int rc;
5176
5177         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5178                 return rc;
5179
5180         spin_lock_bh(&bp->phy_lock);
5181         bnx2_init_phy(bp, reset_phy);
5182         bnx2_set_link(bp);
5183         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5184                 bnx2_remote_phy_event(bp);
5185         spin_unlock_bh(&bp->phy_lock);
5186         return 0;
5187 }
5188
5189 static int
5190 bnx2_shutdown_chip(struct bnx2 *bp)
5191 {
5192         u32 reset_code;
5193
5194         if (bp->flags & BNX2_FLAG_NO_WOL)
5195                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5196         else if (bp->wol)
5197                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5198         else
5199                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5200
5201         return bnx2_reset_chip(bp, reset_code);
5202 }
5203
5204 static int
5205 bnx2_test_registers(struct bnx2 *bp)
5206 {
5207         int ret;
5208         int i, is_5709;
5209         static const struct {
5210                 u16   offset;
5211                 u16   flags;
5212 #define BNX2_FL_NOT_5709        1
5213                 u32   rw_mask;
5214                 u32   ro_mask;
5215         } reg_tbl[] = {
5216                 { 0x006c, 0, 0x00000000, 0x0000003f },
5217                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5218                 { 0x0094, 0, 0x00000000, 0x00000000 },
5219
5220                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5221                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5222                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5223                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5224                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5225                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5226                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5227                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229
5230                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5231                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5232                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5234                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5235                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5236
5237                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5238                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5239                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5240
5241                 { 0x1000, 0, 0x00000000, 0x00000001 },
5242                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5243
5244                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5245                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5246                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5247                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5248                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5249                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5250                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5251                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5252                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5253                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5254
5255                 { 0x1800, 0, 0x00000000, 0x00000001 },
5256                 { 0x1804, 0, 0x00000000, 0x00000003 },
5257
5258                 { 0x2800, 0, 0x00000000, 0x00000001 },
5259                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5260                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5261                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5262                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5263                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5264                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5265                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5266                 { 0x2840, 0, 0x00000000, 0xffffffff },
5267                 { 0x2844, 0, 0x00000000, 0xffffffff },
5268                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5269                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5270
5271                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5272                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5273
5274                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5275                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5276                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5277                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5278                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5279                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5280                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5281                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5282                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5283
5284                 { 0x5004, 0, 0x00000000, 0x0000007f },
5285                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5286
5287                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5288                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5289                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5290                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5291                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5292                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5293                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5294                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5295                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5296
5297                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5298                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5299                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5300                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5301                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5302                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5303                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5304                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5305                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5306                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5307                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5308                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5309                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5310                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5311                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5312                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5313                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5314                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5315                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5316                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5317                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5318                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5319                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5320
5321                 { 0xffff, 0, 0x00000000, 0x00000000 },
5322         };
5323
5324         ret = 0;
5325         is_5709 = 0;
5326         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5327                 is_5709 = 1;
5328
5329         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5330                 u32 offset, rw_mask, ro_mask, save_val, val;
5331                 u16 flags = reg_tbl[i].flags;
5332
5333                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5334                         continue;
5335
5336                 offset = (u32) reg_tbl[i].offset;
5337                 rw_mask = reg_tbl[i].rw_mask;
5338                 ro_mask = reg_tbl[i].ro_mask;
5339
5340                 save_val = readl(bp->regview + offset);
5341
5342                 writel(0, bp->regview + offset);
5343
5344                 val = readl(bp->regview + offset);
5345                 if ((val & rw_mask) != 0) {
5346                         goto reg_test_err;
5347                 }
5348
5349                 if ((val & ro_mask) != (save_val & ro_mask)) {
5350                         goto reg_test_err;
5351                 }
5352
5353                 writel(0xffffffff, bp->regview + offset);
5354
5355                 val = readl(bp->regview + offset);
5356                 if ((val & rw_mask) != rw_mask) {
5357                         goto reg_test_err;
5358                 }
5359
5360                 if ((val & ro_mask) != (save_val & ro_mask)) {
5361                         goto reg_test_err;
5362                 }
5363
5364                 writel(save_val, bp->regview + offset);
5365                 continue;
5366
5367 reg_test_err:
5368                 writel(save_val, bp->regview + offset);
5369                 ret = -ENODEV;
5370                 break;
5371         }
5372         return ret;
5373 }
5374
5375 static int
5376 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5377 {
5378         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5379                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5380         int i;
5381
5382         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5383                 u32 offset;
5384
5385                 for (offset = 0; offset < size; offset += 4) {
5386
5387                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5388
5389                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5390                                 test_pattern[i]) {
5391                                 return -ENODEV;
5392                         }
5393                 }
5394         }
5395         return 0;
5396 }
5397
5398 static int
5399 bnx2_test_memory(struct bnx2 *bp)
5400 {
5401         int ret = 0;
5402         int i;
5403         static struct mem_entry {
5404                 u32   offset;
5405                 u32   len;
5406         } mem_tbl_5706[] = {
5407                 { 0x60000,  0x4000 },
5408                 { 0xa0000,  0x3000 },
5409                 { 0xe0000,  0x4000 },
5410                 { 0x120000, 0x4000 },
5411                 { 0x1a0000, 0x4000 },
5412                 { 0x160000, 0x4000 },
5413                 { 0xffffffff, 0    },
5414         },
5415         mem_tbl_5709[] = {
5416                 { 0x60000,  0x4000 },
5417                 { 0xa0000,  0x3000 },
5418                 { 0xe0000,  0x4000 },
5419                 { 0x120000, 0x4000 },
5420                 { 0x1a0000, 0x4000 },
5421                 { 0xffffffff, 0    },
5422         };
5423         struct mem_entry *mem_tbl;
5424
5425         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5426                 mem_tbl = mem_tbl_5709;
5427         else
5428                 mem_tbl = mem_tbl_5706;
5429
5430         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5431                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5432                         mem_tbl[i].len)) != 0) {
5433                         return ret;
5434                 }
5435         }
5436
5437         return ret;
5438 }
5439
5440 #define BNX2_MAC_LOOPBACK       0
5441 #define BNX2_PHY_LOOPBACK       1
5442
5443 static int
5444 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5445 {
5446         unsigned int pkt_size, num_pkts, i;
5447         struct sk_buff *skb, *rx_skb;
5448         unsigned char *packet;
5449         u16 rx_start_idx, rx_idx;
5450         dma_addr_t map;
5451         struct tx_bd *txbd;
5452         struct sw_bd *rx_buf;
5453         struct l2_fhdr *rx_hdr;
5454         int ret = -ENODEV;
5455         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5456         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5457         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5458
5459         tx_napi = bnapi;
5460
5461         txr = &tx_napi->tx_ring;
5462         rxr = &bnapi->rx_ring;
5463         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5464                 bp->loopback = MAC_LOOPBACK;
5465                 bnx2_set_mac_loopback(bp);
5466         }
5467         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5468                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5469                         return 0;
5470
5471                 bp->loopback = PHY_LOOPBACK;
5472                 bnx2_set_phy_loopback(bp);
5473         }
5474         else
5475                 return -EINVAL;
5476
5477         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5478         skb = netdev_alloc_skb(bp->dev, pkt_size);
5479         if (!skb)
5480                 return -ENOMEM;
5481         packet = skb_put(skb, pkt_size);
5482         memcpy(packet, bp->dev->dev_addr, 6);
5483         memset(packet + 6, 0x0, 8);
5484         for (i = 14; i < pkt_size; i++)
5485                 packet[i] = (unsigned char) (i & 0xff);
5486
5487         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5488                 dev_kfree_skb(skb);
5489                 return -EIO;
5490         }
5491         map = skb_shinfo(skb)->dma_maps[0];
5492
5493         REG_WR(bp, BNX2_HC_COMMAND,
5494                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5495
5496         REG_RD(bp, BNX2_HC_COMMAND);
5497
5498         udelay(5);
5499         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5500
5501         num_pkts = 0;
5502
5503         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5504
5505         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5506         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5507         txbd->tx_bd_mss_nbytes = pkt_size;
5508         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5509
5510         num_pkts++;
5511         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5512         txr->tx_prod_bseq += pkt_size;
5513
5514         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5515         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5516
5517         udelay(100);
5518
5519         REG_WR(bp, BNX2_HC_COMMAND,
5520                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5521
5522         REG_RD(bp, BNX2_HC_COMMAND);
5523
5524         udelay(5);
5525
5526         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5527         dev_kfree_skb(skb);
5528
5529         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5530                 goto loopback_test_done;
5531
5532         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5533         if (rx_idx != rx_start_idx + num_pkts) {
5534                 goto loopback_test_done;
5535         }
5536
5537         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5538         rx_skb = rx_buf->skb;
5539
5540         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5541         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5542
5543         pci_dma_sync_single_for_cpu(bp->pdev,
5544                 pci_unmap_addr(rx_buf, mapping),
5545                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5546
5547         if (rx_hdr->l2_fhdr_status &
5548                 (L2_FHDR_ERRORS_BAD_CRC |
5549                 L2_FHDR_ERRORS_PHY_DECODE |
5550                 L2_FHDR_ERRORS_ALIGNMENT |
5551                 L2_FHDR_ERRORS_TOO_SHORT |
5552                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5553
5554                 goto loopback_test_done;
5555         }
5556
5557         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5558                 goto loopback_test_done;
5559         }
5560
5561         for (i = 14; i < pkt_size; i++) {
5562                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5563                         goto loopback_test_done;
5564                 }
5565         }
5566
5567         ret = 0;
5568
5569 loopback_test_done:
5570         bp->loopback = 0;
5571         return ret;
5572 }
5573
5574 #define BNX2_MAC_LOOPBACK_FAILED        1
5575 #define BNX2_PHY_LOOPBACK_FAILED        2
5576 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5577                                          BNX2_PHY_LOOPBACK_FAILED)
5578
5579 static int
5580 bnx2_test_loopback(struct bnx2 *bp)
5581 {
5582         int rc = 0;
5583
5584         if (!netif_running(bp->dev))
5585                 return BNX2_LOOPBACK_FAILED;
5586
5587         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5588         spin_lock_bh(&bp->phy_lock);
5589         bnx2_init_phy(bp, 1);
5590         spin_unlock_bh(&bp->phy_lock);
5591         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5592                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5593         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5594                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5595         return rc;
5596 }
5597
5598 #define NVRAM_SIZE 0x200
5599 #define CRC32_RESIDUAL 0xdebb20e3
5600
5601 static int
5602 bnx2_test_nvram(struct bnx2 *bp)
5603 {
5604         __be32 buf[NVRAM_SIZE / 4];
5605         u8 *data = (u8 *) buf;
5606         int rc = 0;
5607         u32 magic, csum;
5608
5609         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5610                 goto test_nvram_done;
5611
5612         magic = be32_to_cpu(buf[0]);
5613         if (magic != 0x669955aa) {
5614                 rc = -ENODEV;
5615                 goto test_nvram_done;
5616         }
5617
5618         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5619                 goto test_nvram_done;
5620
5621         csum = ether_crc_le(0x100, data);
5622         if (csum != CRC32_RESIDUAL) {
5623                 rc = -ENODEV;
5624                 goto test_nvram_done;
5625         }
5626
5627         csum = ether_crc_le(0x100, data + 0x100);
5628         if (csum != CRC32_RESIDUAL) {
5629                 rc = -ENODEV;
5630         }
5631
5632 test_nvram_done:
5633         return rc;
5634 }
5635
5636 static int
5637 bnx2_test_link(struct bnx2 *bp)
5638 {
5639         u32 bmsr;
5640
5641         if (!netif_running(bp->dev))
5642                 return -ENODEV;
5643
5644         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5645                 if (bp->link_up)
5646                         return 0;
5647                 return -ENODEV;
5648         }
5649         spin_lock_bh(&bp->phy_lock);
5650         bnx2_enable_bmsr1(bp);
5651         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5652         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5653         bnx2_disable_bmsr1(bp);
5654         spin_unlock_bh(&bp->phy_lock);
5655
5656         if (bmsr & BMSR_LSTATUS) {
5657                 return 0;
5658         }
5659         return -ENODEV;
5660 }
5661
5662 static int
5663 bnx2_test_intr(struct bnx2 *bp)
5664 {
5665         int i;
5666         u16 status_idx;
5667
5668         if (!netif_running(bp->dev))
5669                 return -ENODEV;
5670
5671         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5672
5673         /* This register is not touched during run-time. */
5674         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5675         REG_RD(bp, BNX2_HC_COMMAND);
5676
5677         for (i = 0; i < 10; i++) {
5678                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5679                         status_idx) {
5680
5681                         break;
5682                 }
5683
5684                 msleep_interruptible(10);
5685         }
5686         if (i < 10)
5687                 return 0;
5688
5689         return -ENODEV;
5690 }
5691
5692 /* Determining link for parallel detection. */
5693 static int
5694 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5695 {
5696         u32 mode_ctl, an_dbg, exp;
5697
5698         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5699                 return 0;
5700
5701         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5702         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5703
5704         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5705                 return 0;
5706
5707         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5708         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5709         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5710
5711         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5712                 return 0;
5713
5714         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5715         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5716         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5717
5718         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5719                 return 0;
5720
5721         return 1;
5722 }
5723
5724 static void
5725 bnx2_5706_serdes_timer(struct bnx2 *bp)
5726 {
5727         int check_link = 1;
5728
5729         spin_lock(&bp->phy_lock);
5730         if (bp->serdes_an_pending) {
5731                 bp->serdes_an_pending--;
5732                 check_link = 0;
5733         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5734                 u32 bmcr;
5735
5736                 bp->current_interval = BNX2_TIMER_INTERVAL;
5737
5738                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5739
5740                 if (bmcr & BMCR_ANENABLE) {
5741                         if (bnx2_5706_serdes_has_link(bp)) {
5742                                 bmcr &= ~BMCR_ANENABLE;
5743                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5744                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5745                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5746                         }
5747                 }
5748         }
5749         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5750                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5751                 u32 phy2;
5752
5753                 bnx2_write_phy(bp, 0x17, 0x0f01);
5754                 bnx2_read_phy(bp, 0x15, &phy2);
5755                 if (phy2 & 0x20) {
5756                         u32 bmcr;
5757
5758                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5759                         bmcr |= BMCR_ANENABLE;
5760                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5761
5762                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5763                 }
5764         } else
5765                 bp->current_interval = BNX2_TIMER_INTERVAL;
5766
5767         if (check_link) {
5768                 u32 val;
5769
5770                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5771                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5772                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5773
5774                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5775                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5776                                 bnx2_5706s_force_link_dn(bp, 1);
5777                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5778                         } else
5779                                 bnx2_set_link(bp);
5780                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5781                         bnx2_set_link(bp);
5782         }
5783         spin_unlock(&bp->phy_lock);
5784 }
5785
5786 static void
5787 bnx2_5708_serdes_timer(struct bnx2 *bp)
5788 {
5789         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5790                 return;
5791
5792         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5793                 bp->serdes_an_pending = 0;
5794                 return;
5795         }
5796
5797         spin_lock(&bp->phy_lock);
5798         if (bp->serdes_an_pending)
5799                 bp->serdes_an_pending--;
5800         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5801                 u32 bmcr;
5802
5803                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5804                 if (bmcr & BMCR_ANENABLE) {
5805                         bnx2_enable_forced_2g5(bp);
5806                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5807                 } else {
5808                         bnx2_disable_forced_2g5(bp);
5809                         bp->serdes_an_pending = 2;
5810                         bp->current_interval = BNX2_TIMER_INTERVAL;
5811                 }
5812
5813         } else
5814                 bp->current_interval = BNX2_TIMER_INTERVAL;
5815
5816         spin_unlock(&bp->phy_lock);
5817 }
5818
5819 static void
5820 bnx2_timer(unsigned long data)
5821 {
5822         struct bnx2 *bp = (struct bnx2 *) data;
5823
5824         if (!netif_running(bp->dev))
5825                 return;
5826
5827         if (atomic_read(&bp->intr_sem) != 0)
5828                 goto bnx2_restart_timer;
5829
5830         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5831              BNX2_FLAG_USING_MSI)
5832                 bnx2_chk_missed_msi(bp);
5833
5834         bnx2_send_heart_beat(bp);
5835
5836         bp->stats_blk->stat_FwRxDrop =
5837                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5838
5839         /* workaround occasional corrupted counters */
5840         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5841                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5842                                             BNX2_HC_COMMAND_STATS_NOW);
5843
5844         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5845                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5846                         bnx2_5706_serdes_timer(bp);
5847                 else
5848                         bnx2_5708_serdes_timer(bp);
5849         }
5850
5851 bnx2_restart_timer:
5852         mod_timer(&bp->timer, jiffies + bp->current_interval);
5853 }
5854
5855 static int
5856 bnx2_request_irq(struct bnx2 *bp)
5857 {
5858         unsigned long flags;
5859         struct bnx2_irq *irq;
5860         int rc = 0, i;
5861
5862         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5863                 flags = 0;
5864         else
5865                 flags = IRQF_SHARED;
5866
5867         for (i = 0; i < bp->irq_nvecs; i++) {
5868                 irq = &bp->irq_tbl[i];
5869                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5870                                  &bp->bnx2_napi[i]);
5871                 if (rc)
5872                         break;
5873                 irq->requested = 1;
5874         }
5875         return rc;
5876 }
5877
5878 static void
5879 bnx2_free_irq(struct bnx2 *bp)
5880 {
5881         struct bnx2_irq *irq;
5882         int i;
5883
5884         for (i = 0; i < bp->irq_nvecs; i++) {
5885                 irq = &bp->irq_tbl[i];
5886                 if (irq->requested)
5887                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5888                 irq->requested = 0;
5889         }
5890         if (bp->flags & BNX2_FLAG_USING_MSI)
5891                 pci_disable_msi(bp->pdev);
5892         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5893                 pci_disable_msix(bp->pdev);
5894
5895         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5896 }
5897
5898 static void
5899 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5900 {
5901         int i, rc;
5902         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5903         struct net_device *dev = bp->dev;
5904         const int len = sizeof(bp->irq_tbl[0].name);
5905
5906         bnx2_setup_msix_tbl(bp);
5907         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5908         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5909         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5910
5911         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5912                 msix_ent[i].entry = i;
5913                 msix_ent[i].vector = 0;
5914         }
5915
5916         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5917         if (rc != 0)
5918                 return;
5919
5920         bp->irq_nvecs = msix_vecs;
5921         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5922         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5923                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5924                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5925                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5926         }
5927 }
5928
5929 static void
5930 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5931 {
5932         int cpus = num_online_cpus();
5933         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5934
5935         bp->irq_tbl[0].handler = bnx2_interrupt;
5936         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5937         bp->irq_nvecs = 1;
5938         bp->irq_tbl[0].vector = bp->pdev->irq;
5939
5940         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5941                 bnx2_enable_msix(bp, msix_vecs);
5942
5943         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5944             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5945                 if (pci_enable_msi(bp->pdev) == 0) {
5946                         bp->flags |= BNX2_FLAG_USING_MSI;
5947                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5948                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5949                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5950                         } else
5951                                 bp->irq_tbl[0].handler = bnx2_msi;
5952
5953                         bp->irq_tbl[0].vector = bp->pdev->irq;
5954                 }
5955         }
5956
5957         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5958         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5959
5960         bp->num_rx_rings = bp->irq_nvecs;
5961 }
5962
5963 /* Called with rtnl_lock */
5964 static int
5965 bnx2_open(struct net_device *dev)
5966 {
5967         struct bnx2 *bp = netdev_priv(dev);
5968         int rc;
5969
5970         netif_carrier_off(dev);
5971
5972         bnx2_set_power_state(bp, PCI_D0);
5973         bnx2_disable_int(bp);
5974
5975         bnx2_setup_int_mode(bp, disable_msi);
5976         bnx2_napi_enable(bp);
5977         rc = bnx2_alloc_mem(bp);
5978         if (rc)
5979                 goto open_err;
5980
5981         rc = bnx2_request_irq(bp);
5982         if (rc)
5983                 goto open_err;
5984
5985         rc = bnx2_init_nic(bp, 1);
5986         if (rc)
5987                 goto open_err;
5988
5989         mod_timer(&bp->timer, jiffies + bp->current_interval);
5990
5991         atomic_set(&bp->intr_sem, 0);
5992
5993         bnx2_enable_int(bp);
5994
5995         if (bp->flags & BNX2_FLAG_USING_MSI) {
5996                 /* Test MSI to make sure it is working
5997                  * If MSI test fails, go back to INTx mode
5998                  */
5999                 if (bnx2_test_intr(bp) != 0) {
6000                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6001                                " using MSI, switching to INTx mode. Please"
6002                                " report this failure to the PCI maintainer"
6003                                " and include system chipset information.\n",
6004                                bp->dev->name);
6005
6006                         bnx2_disable_int(bp);
6007                         bnx2_free_irq(bp);
6008
6009                         bnx2_setup_int_mode(bp, 1);
6010
6011                         rc = bnx2_init_nic(bp, 0);
6012
6013                         if (!rc)
6014                                 rc = bnx2_request_irq(bp);
6015
6016                         if (rc) {
6017                                 del_timer_sync(&bp->timer);
6018                                 goto open_err;
6019                         }
6020                         bnx2_enable_int(bp);
6021                 }
6022         }
6023         if (bp->flags & BNX2_FLAG_USING_MSI)
6024                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6025         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6026                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6027
6028         netif_tx_start_all_queues(dev);
6029
6030         return 0;
6031
6032 open_err:
6033         bnx2_napi_disable(bp);
6034         bnx2_free_skbs(bp);
6035         bnx2_free_irq(bp);
6036         bnx2_free_mem(bp);
6037         return rc;
6038 }
6039
6040 static void
6041 bnx2_reset_task(struct work_struct *work)
6042 {
6043         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6044
6045         if (!netif_running(bp->dev))
6046                 return;
6047
6048         bnx2_netif_stop(bp);
6049
6050         bnx2_init_nic(bp, 1);
6051
6052         atomic_set(&bp->intr_sem, 1);
6053         bnx2_netif_start(bp);
6054 }
6055
6056 static void
6057 bnx2_tx_timeout(struct net_device *dev)
6058 {
6059         struct bnx2 *bp = netdev_priv(dev);
6060
6061         /* This allows the netif to be shutdown gracefully before resetting */
6062         schedule_work(&bp->reset_task);
6063 }
6064
6065 #ifdef BCM_VLAN
6066 /* Called with rtnl_lock */
6067 static void
6068 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6069 {
6070         struct bnx2 *bp = netdev_priv(dev);
6071
6072         bnx2_netif_stop(bp);
6073
6074         bp->vlgrp = vlgrp;
6075         bnx2_set_rx_mode(dev);
6076         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6077                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6078
6079         bnx2_netif_start(bp);
6080 }
6081 #endif
6082
6083 /* Called with netif_tx_lock.
6084  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6085  * netif_wake_queue().
6086  */
6087 static int
6088 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6089 {
6090         struct bnx2 *bp = netdev_priv(dev);
6091         dma_addr_t mapping;
6092         struct tx_bd *txbd;
6093         struct sw_tx_bd *tx_buf;
6094         u32 len, vlan_tag_flags, last_frag, mss;
6095         u16 prod, ring_prod;
6096         int i;
6097         struct bnx2_napi *bnapi;
6098         struct bnx2_tx_ring_info *txr;
6099         struct netdev_queue *txq;
6100         struct skb_shared_info *sp;
6101
6102         /*  Determine which tx ring we will be placed on */
6103         i = skb_get_queue_mapping(skb);
6104         bnapi = &bp->bnx2_napi[i];
6105         txr = &bnapi->tx_ring;
6106         txq = netdev_get_tx_queue(dev, i);
6107
6108         if (unlikely(bnx2_tx_avail(bp, txr) <
6109             (skb_shinfo(skb)->nr_frags + 1))) {
6110                 netif_tx_stop_queue(txq);
6111                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6112                         dev->name);
6113
6114                 return NETDEV_TX_BUSY;
6115         }
6116         len = skb_headlen(skb);
6117         prod = txr->tx_prod;
6118         ring_prod = TX_RING_IDX(prod);
6119
6120         vlan_tag_flags = 0;
6121         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6122                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6123         }
6124
6125 #ifdef BCM_VLAN
6126         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6127                 vlan_tag_flags |=
6128                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6129         }
6130 #endif
6131         if ((mss = skb_shinfo(skb)->gso_size)) {
6132                 u32 tcp_opt_len;
6133                 struct iphdr *iph;
6134
6135                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6136
6137                 tcp_opt_len = tcp_optlen(skb);
6138
6139                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6140                         u32 tcp_off = skb_transport_offset(skb) -
6141                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6142
6143                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6144                                           TX_BD_FLAGS_SW_FLAGS;
6145                         if (likely(tcp_off == 0))
6146                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6147                         else {
6148                                 tcp_off >>= 3;
6149                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6150                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6151                                                   ((tcp_off & 0x10) <<
6152                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6153                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6154                         }
6155                 } else {
6156                         iph = ip_hdr(skb);
6157                         if (tcp_opt_len || (iph->ihl > 5)) {
6158                                 vlan_tag_flags |= ((iph->ihl - 5) +
6159                                                    (tcp_opt_len >> 2)) << 8;
6160                         }
6161                 }
6162         } else
6163                 mss = 0;
6164
6165         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6166                 dev_kfree_skb(skb);
6167                 return NETDEV_TX_OK;
6168         }
6169
6170         sp = skb_shinfo(skb);
6171         mapping = sp->dma_maps[0];
6172
6173         tx_buf = &txr->tx_buf_ring[ring_prod];
6174         tx_buf->skb = skb;
6175
6176         txbd = &txr->tx_desc_ring[ring_prod];
6177
6178         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6179         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6180         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6181         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6182
6183         last_frag = skb_shinfo(skb)->nr_frags;
6184         tx_buf->nr_frags = last_frag;
6185         tx_buf->is_gso = skb_is_gso(skb);
6186
6187         for (i = 0; i < last_frag; i++) {
6188                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6189
6190                 prod = NEXT_TX_BD(prod);
6191                 ring_prod = TX_RING_IDX(prod);
6192                 txbd = &txr->tx_desc_ring[ring_prod];
6193
6194                 len = frag->size;
6195                 mapping = sp->dma_maps[i + 1];
6196
6197                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6198                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6199                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6200                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6201
6202         }
6203         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6204
6205         prod = NEXT_TX_BD(prod);
6206         txr->tx_prod_bseq += skb->len;
6207
6208         REG_WR16(bp, txr->tx_bidx_addr, prod);
6209         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6210
6211         mmiowb();
6212
6213         txr->tx_prod = prod;
6214         dev->trans_start = jiffies;
6215
6216         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6217                 netif_tx_stop_queue(txq);
6218                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6219                         netif_tx_wake_queue(txq);
6220         }
6221
6222         return NETDEV_TX_OK;
6223 }
6224
6225 /* Called with rtnl_lock */
6226 static int
6227 bnx2_close(struct net_device *dev)
6228 {
6229         struct bnx2 *bp = netdev_priv(dev);
6230
6231         cancel_work_sync(&bp->reset_task);
6232
6233         bnx2_disable_int_sync(bp);
6234         bnx2_napi_disable(bp);
6235         del_timer_sync(&bp->timer);
6236         bnx2_shutdown_chip(bp);
6237         bnx2_free_irq(bp);
6238         bnx2_free_skbs(bp);
6239         bnx2_free_mem(bp);
6240         bp->link_up = 0;
6241         netif_carrier_off(bp->dev);
6242         bnx2_set_power_state(bp, PCI_D3hot);
6243         return 0;
6244 }
6245
6246 #define GET_NET_STATS64(ctr)                                    \
6247         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6248         (unsigned long) (ctr##_lo)
6249
6250 #define GET_NET_STATS32(ctr)            \
6251         (ctr##_lo)
6252
6253 #if (BITS_PER_LONG == 64)
6254 #define GET_NET_STATS   GET_NET_STATS64
6255 #else
6256 #define GET_NET_STATS   GET_NET_STATS32
6257 #endif
6258
6259 static struct net_device_stats *
6260 bnx2_get_stats(struct net_device *dev)
6261 {
6262         struct bnx2 *bp = netdev_priv(dev);
6263         struct statistics_block *stats_blk = bp->stats_blk;
6264         struct net_device_stats *net_stats = &dev->stats;
6265
6266         if (bp->stats_blk == NULL) {
6267                 return net_stats;
6268         }
6269         net_stats->rx_packets =
6270                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6271                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6272                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6273
6274         net_stats->tx_packets =
6275                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6276                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6277                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6278
6279         net_stats->rx_bytes =
6280                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6281
6282         net_stats->tx_bytes =
6283                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6284
6285         net_stats->multicast =
6286                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6287
6288         net_stats->collisions =
6289                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6290
6291         net_stats->rx_length_errors =
6292                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6293                 stats_blk->stat_EtherStatsOverrsizePkts);
6294
6295         net_stats->rx_over_errors =
6296                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6297
6298         net_stats->rx_frame_errors =
6299                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6300
6301         net_stats->rx_crc_errors =
6302                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6303
6304         net_stats->rx_errors = net_stats->rx_length_errors +
6305                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6306                 net_stats->rx_crc_errors;
6307
6308         net_stats->tx_aborted_errors =
6309                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6310                 stats_blk->stat_Dot3StatsLateCollisions);
6311
6312         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6313             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6314                 net_stats->tx_carrier_errors = 0;
6315         else {
6316                 net_stats->tx_carrier_errors =
6317                         (unsigned long)
6318                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6319         }
6320
6321         net_stats->tx_errors =
6322                 (unsigned long)
6323                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6324                 +
6325                 net_stats->tx_aborted_errors +
6326                 net_stats->tx_carrier_errors;
6327
6328         net_stats->rx_missed_errors =
6329                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6330                 stats_blk->stat_FwRxDrop);
6331
6332         return net_stats;
6333 }
6334
6335 /* All ethtool functions called with rtnl_lock */
6336
6337 static int
6338 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6339 {
6340         struct bnx2 *bp = netdev_priv(dev);
6341         int support_serdes = 0, support_copper = 0;
6342
6343         cmd->supported = SUPPORTED_Autoneg;
6344         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6345                 support_serdes = 1;
6346                 support_copper = 1;
6347         } else if (bp->phy_port == PORT_FIBRE)
6348                 support_serdes = 1;
6349         else
6350                 support_copper = 1;
6351
6352         if (support_serdes) {
6353                 cmd->supported |= SUPPORTED_1000baseT_Full |
6354                         SUPPORTED_FIBRE;
6355                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6356                         cmd->supported |= SUPPORTED_2500baseX_Full;
6357
6358         }
6359         if (support_copper) {
6360                 cmd->supported |= SUPPORTED_10baseT_Half |
6361                         SUPPORTED_10baseT_Full |
6362                         SUPPORTED_100baseT_Half |
6363                         SUPPORTED_100baseT_Full |
6364                         SUPPORTED_1000baseT_Full |
6365                         SUPPORTED_TP;
6366
6367         }
6368
6369         spin_lock_bh(&bp->phy_lock);
6370         cmd->port = bp->phy_port;
6371         cmd->advertising = bp->advertising;
6372
6373         if (bp->autoneg & AUTONEG_SPEED) {
6374                 cmd->autoneg = AUTONEG_ENABLE;
6375         }
6376         else {
6377                 cmd->autoneg = AUTONEG_DISABLE;
6378         }
6379
6380         if (netif_carrier_ok(dev)) {
6381                 cmd->speed = bp->line_speed;
6382                 cmd->duplex = bp->duplex;
6383         }
6384         else {
6385                 cmd->speed = -1;
6386                 cmd->duplex = -1;
6387         }
6388         spin_unlock_bh(&bp->phy_lock);
6389
6390         cmd->transceiver = XCVR_INTERNAL;
6391         cmd->phy_address = bp->phy_addr;
6392
6393         return 0;
6394 }
6395
6396 static int
6397 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6398 {
6399         struct bnx2 *bp = netdev_priv(dev);
6400         u8 autoneg = bp->autoneg;
6401         u8 req_duplex = bp->req_duplex;
6402         u16 req_line_speed = bp->req_line_speed;
6403         u32 advertising = bp->advertising;
6404         int err = -EINVAL;
6405
6406         spin_lock_bh(&bp->phy_lock);
6407
6408         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6409                 goto err_out_unlock;
6410
6411         if (cmd->port != bp->phy_port &&
6412             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6413                 goto err_out_unlock;
6414
6415         /* If device is down, we can store the settings only if the user
6416          * is setting the currently active port.
6417          */
6418         if (!netif_running(dev) && cmd->port != bp->phy_port)
6419                 goto err_out_unlock;
6420
6421         if (cmd->autoneg == AUTONEG_ENABLE) {
6422                 autoneg |= AUTONEG_SPEED;
6423
6424                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6425
6426                 /* allow advertising 1 speed */
6427                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6428                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6429                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6430                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6431
6432                         if (cmd->port == PORT_FIBRE)
6433                                 goto err_out_unlock;
6434
6435                         advertising = cmd->advertising;
6436
6437                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6438                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6439                             (cmd->port == PORT_TP))
6440                                 goto err_out_unlock;
6441                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6442                         advertising = cmd->advertising;
6443                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6444                         goto err_out_unlock;
6445                 else {
6446                         if (cmd->port == PORT_FIBRE)
6447                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6448                         else
6449                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6450                 }
6451                 advertising |= ADVERTISED_Autoneg;
6452         }
6453         else {
6454                 if (cmd->port == PORT_FIBRE) {
6455                         if ((cmd->speed != SPEED_1000 &&
6456                              cmd->speed != SPEED_2500) ||
6457                             (cmd->duplex != DUPLEX_FULL))
6458                                 goto err_out_unlock;
6459
6460                         if (cmd->speed == SPEED_2500 &&
6461                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6462                                 goto err_out_unlock;
6463                 }
6464                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6465                         goto err_out_unlock;
6466
6467                 autoneg &= ~AUTONEG_SPEED;
6468                 req_line_speed = cmd->speed;
6469                 req_duplex = cmd->duplex;
6470                 advertising = 0;
6471         }
6472
6473         bp->autoneg = autoneg;
6474         bp->advertising = advertising;
6475         bp->req_line_speed = req_line_speed;
6476         bp->req_duplex = req_duplex;
6477
6478         err = 0;
6479         /* If device is down, the new settings will be picked up when it is
6480          * brought up.
6481          */
6482         if (netif_running(dev))
6483                 err = bnx2_setup_phy(bp, cmd->port);
6484
6485 err_out_unlock:
6486         spin_unlock_bh(&bp->phy_lock);
6487
6488         return err;
6489 }
6490
6491 static void
6492 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6493 {
6494         struct bnx2 *bp = netdev_priv(dev);
6495
6496         strcpy(info->driver, DRV_MODULE_NAME);
6497         strcpy(info->version, DRV_MODULE_VERSION);
6498         strcpy(info->bus_info, pci_name(bp->pdev));
6499         strcpy(info->fw_version, bp->fw_version);
6500 }
6501
6502 #define BNX2_REGDUMP_LEN                (32 * 1024)
6503
6504 static int
6505 bnx2_get_regs_len(struct net_device *dev)
6506 {
6507         return BNX2_REGDUMP_LEN;
6508 }
6509
6510 static void
6511 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6512 {
6513         u32 *p = _p, i, offset;
6514         u8 *orig_p = _p;
6515         struct bnx2 *bp = netdev_priv(dev);
6516         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6517                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6518                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6519                                  0x1040, 0x1048, 0x1080, 0x10a4,
6520                                  0x1400, 0x1490, 0x1498, 0x14f0,
6521                                  0x1500, 0x155c, 0x1580, 0x15dc,
6522                                  0x1600, 0x1658, 0x1680, 0x16d8,
6523                                  0x1800, 0x1820, 0x1840, 0x1854,
6524                                  0x1880, 0x1894, 0x1900, 0x1984,
6525                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6526                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6527                                  0x2000, 0x2030, 0x23c0, 0x2400,
6528                                  0x2800, 0x2820, 0x2830, 0x2850,
6529                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6530                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6531                                  0x4080, 0x4090, 0x43c0, 0x4458,
6532                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6533                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6534                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6535                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6536                                  0x6800, 0x6848, 0x684c, 0x6860,
6537                                  0x6888, 0x6910, 0x8000 };
6538
6539         regs->version = 0;
6540
6541         memset(p, 0, BNX2_REGDUMP_LEN);
6542
6543         if (!netif_running(bp->dev))
6544                 return;
6545
6546         i = 0;
6547         offset = reg_boundaries[0];
6548         p += offset;
6549         while (offset < BNX2_REGDUMP_LEN) {
6550                 *p++ = REG_RD(bp, offset);
6551                 offset += 4;
6552                 if (offset == reg_boundaries[i + 1]) {
6553                         offset = reg_boundaries[i + 2];
6554                         p = (u32 *) (orig_p + offset);
6555                         i += 2;
6556                 }
6557         }
6558 }
6559
6560 static void
6561 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6562 {
6563         struct bnx2 *bp = netdev_priv(dev);
6564
6565         if (bp->flags & BNX2_FLAG_NO_WOL) {
6566                 wol->supported = 0;
6567                 wol->wolopts = 0;
6568         }
6569         else {
6570                 wol->supported = WAKE_MAGIC;
6571                 if (bp->wol)
6572                         wol->wolopts = WAKE_MAGIC;
6573                 else
6574                         wol->wolopts = 0;
6575         }
6576         memset(&wol->sopass, 0, sizeof(wol->sopass));
6577 }
6578
6579 static int
6580 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6581 {
6582         struct bnx2 *bp = netdev_priv(dev);
6583
6584         if (wol->wolopts & ~WAKE_MAGIC)
6585                 return -EINVAL;
6586
6587         if (wol->wolopts & WAKE_MAGIC) {
6588                 if (bp->flags & BNX2_FLAG_NO_WOL)
6589                         return -EINVAL;
6590
6591                 bp->wol = 1;
6592         }
6593         else {
6594                 bp->wol = 0;
6595         }
6596         return 0;
6597 }
6598
6599 static int
6600 bnx2_nway_reset(struct net_device *dev)
6601 {
6602         struct bnx2 *bp = netdev_priv(dev);
6603         u32 bmcr;
6604
6605         if (!netif_running(dev))
6606                 return -EAGAIN;
6607
6608         if (!(bp->autoneg & AUTONEG_SPEED)) {
6609                 return -EINVAL;
6610         }
6611
6612         spin_lock_bh(&bp->phy_lock);
6613
6614         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6615                 int rc;
6616
6617                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6618                 spin_unlock_bh(&bp->phy_lock);
6619                 return rc;
6620         }
6621
6622         /* Force a link down visible on the other side */
6623         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6624                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6625                 spin_unlock_bh(&bp->phy_lock);
6626
6627                 msleep(20);
6628
6629                 spin_lock_bh(&bp->phy_lock);
6630
6631                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6632                 bp->serdes_an_pending = 1;
6633                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6634         }
6635
6636         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6637         bmcr &= ~BMCR_LOOPBACK;
6638         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6639
6640         spin_unlock_bh(&bp->phy_lock);
6641
6642         return 0;
6643 }
6644
6645 static int
6646 bnx2_get_eeprom_len(struct net_device *dev)
6647 {
6648         struct bnx2 *bp = netdev_priv(dev);
6649
6650         if (bp->flash_info == NULL)
6651                 return 0;
6652
6653         return (int) bp->flash_size;
6654 }
6655
6656 static int
6657 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6658                 u8 *eebuf)
6659 {
6660         struct bnx2 *bp = netdev_priv(dev);
6661         int rc;
6662
6663         if (!netif_running(dev))
6664                 return -EAGAIN;
6665
6666         /* parameters already validated in ethtool_get_eeprom */
6667
6668         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6669
6670         return rc;
6671 }
6672
6673 static int
6674 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6675                 u8 *eebuf)
6676 {
6677         struct bnx2 *bp = netdev_priv(dev);
6678         int rc;
6679
6680         if (!netif_running(dev))
6681                 return -EAGAIN;
6682
6683         /* parameters already validated in ethtool_set_eeprom */
6684
6685         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6686
6687         return rc;
6688 }
6689
6690 static int
6691 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6692 {
6693         struct bnx2 *bp = netdev_priv(dev);
6694
6695         memset(coal, 0, sizeof(struct ethtool_coalesce));
6696
6697         coal->rx_coalesce_usecs = bp->rx_ticks;
6698         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6699         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6700         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6701
6702         coal->tx_coalesce_usecs = bp->tx_ticks;
6703         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6704         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6705         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6706
6707         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6708
6709         return 0;
6710 }
6711
6712 static int
6713 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6714 {
6715         struct bnx2 *bp = netdev_priv(dev);
6716
6717         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6718         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6719
6720         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6721         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6722
6723         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6724         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6725
6726         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6727         if (bp->rx_quick_cons_trip_int > 0xff)
6728                 bp->rx_quick_cons_trip_int = 0xff;
6729
6730         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6731         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6732
6733         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6734         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6735
6736         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6737         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6738
6739         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6740         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6741                 0xff;
6742
6743         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6744         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6745                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6746                         bp->stats_ticks = USEC_PER_SEC;
6747         }
6748         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6749                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6750         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6751
6752         if (netif_running(bp->dev)) {
6753                 bnx2_netif_stop(bp);
6754                 bnx2_init_nic(bp, 0);
6755                 bnx2_netif_start(bp);
6756         }
6757
6758         return 0;
6759 }
6760
6761 static void
6762 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6763 {
6764         struct bnx2 *bp = netdev_priv(dev);
6765
6766         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6767         ering->rx_mini_max_pending = 0;
6768         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6769
6770         ering->rx_pending = bp->rx_ring_size;
6771         ering->rx_mini_pending = 0;
6772         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6773
6774         ering->tx_max_pending = MAX_TX_DESC_CNT;
6775         ering->tx_pending = bp->tx_ring_size;
6776 }
6777
6778 static int
6779 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6780 {
6781         if (netif_running(bp->dev)) {
6782                 bnx2_netif_stop(bp);
6783                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6784                 bnx2_free_skbs(bp);
6785                 bnx2_free_mem(bp);
6786         }
6787
6788         bnx2_set_rx_ring_size(bp, rx);
6789         bp->tx_ring_size = tx;
6790
6791         if (netif_running(bp->dev)) {
6792                 int rc;
6793
6794                 rc = bnx2_alloc_mem(bp);
6795                 if (rc)
6796                         return rc;
6797                 bnx2_init_nic(bp, 0);
6798                 bnx2_netif_start(bp);
6799         }
6800         return 0;
6801 }
6802
6803 static int
6804 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6805 {
6806         struct bnx2 *bp = netdev_priv(dev);
6807         int rc;
6808
6809         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6810                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6811                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6812
6813                 return -EINVAL;
6814         }
6815         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6816         return rc;
6817 }
6818
6819 static void
6820 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6821 {
6822         struct bnx2 *bp = netdev_priv(dev);
6823
6824         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6825         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6826         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6827 }
6828
6829 static int
6830 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6831 {
6832         struct bnx2 *bp = netdev_priv(dev);
6833
6834         bp->req_flow_ctrl = 0;
6835         if (epause->rx_pause)
6836                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6837         if (epause->tx_pause)
6838                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6839
6840         if (epause->autoneg) {
6841                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6842         }
6843         else {
6844                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6845         }
6846
6847         if (netif_running(dev)) {
6848                 spin_lock_bh(&bp->phy_lock);
6849                 bnx2_setup_phy(bp, bp->phy_port);
6850                 spin_unlock_bh(&bp->phy_lock);
6851         }
6852
6853         return 0;
6854 }
6855
6856 static u32
6857 bnx2_get_rx_csum(struct net_device *dev)
6858 {
6859         struct bnx2 *bp = netdev_priv(dev);
6860
6861         return bp->rx_csum;
6862 }
6863
6864 static int
6865 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6866 {
6867         struct bnx2 *bp = netdev_priv(dev);
6868
6869         bp->rx_csum = data;
6870         return 0;
6871 }
6872
6873 static int
6874 bnx2_set_tso(struct net_device *dev, u32 data)
6875 {
6876         struct bnx2 *bp = netdev_priv(dev);
6877
6878         if (data) {
6879                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6880                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6881                         dev->features |= NETIF_F_TSO6;
6882         } else
6883                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6884                                    NETIF_F_TSO_ECN);
6885         return 0;
6886 }
6887
6888 #define BNX2_NUM_STATS 46
6889
6890 static struct {
6891         char string[ETH_GSTRING_LEN];
6892 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6893         { "rx_bytes" },
6894         { "rx_error_bytes" },
6895         { "tx_bytes" },
6896         { "tx_error_bytes" },
6897         { "rx_ucast_packets" },
6898         { "rx_mcast_packets" },
6899         { "rx_bcast_packets" },
6900         { "tx_ucast_packets" },
6901         { "tx_mcast_packets" },
6902         { "tx_bcast_packets" },
6903         { "tx_mac_errors" },
6904         { "tx_carrier_errors" },
6905         { "rx_crc_errors" },
6906         { "rx_align_errors" },
6907         { "tx_single_collisions" },
6908         { "tx_multi_collisions" },
6909         { "tx_deferred" },
6910         { "tx_excess_collisions" },
6911         { "tx_late_collisions" },
6912         { "tx_total_collisions" },
6913         { "rx_fragments" },
6914         { "rx_jabbers" },
6915         { "rx_undersize_packets" },
6916         { "rx_oversize_packets" },
6917         { "rx_64_byte_packets" },
6918         { "rx_65_to_127_byte_packets" },
6919         { "rx_128_to_255_byte_packets" },
6920         { "rx_256_to_511_byte_packets" },
6921         { "rx_512_to_1023_byte_packets" },
6922         { "rx_1024_to_1522_byte_packets" },
6923         { "rx_1523_to_9022_byte_packets" },
6924         { "tx_64_byte_packets" },
6925         { "tx_65_to_127_byte_packets" },
6926         { "tx_128_to_255_byte_packets" },
6927         { "tx_256_to_511_byte_packets" },
6928         { "tx_512_to_1023_byte_packets" },
6929         { "tx_1024_to_1522_byte_packets" },
6930         { "tx_1523_to_9022_byte_packets" },
6931         { "rx_xon_frames" },
6932         { "rx_xoff_frames" },
6933         { "tx_xon_frames" },
6934         { "tx_xoff_frames" },
6935         { "rx_mac_ctrl_frames" },
6936         { "rx_filtered_packets" },
6937         { "rx_discards" },
6938         { "rx_fw_discards" },
6939 };
6940
6941 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6942
6943 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6944     STATS_OFFSET32(stat_IfHCInOctets_hi),
6945     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6946     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6947     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6948     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6949     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6950     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6951     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6952     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6953     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6954     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6955     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6956     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6957     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6958     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6959     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6960     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6961     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6962     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6963     STATS_OFFSET32(stat_EtherStatsCollisions),
6964     STATS_OFFSET32(stat_EtherStatsFragments),
6965     STATS_OFFSET32(stat_EtherStatsJabbers),
6966     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6967     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6968     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6969     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6970     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6971     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6972     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6973     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6974     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6975     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6976     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6977     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6978     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6979     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6980     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6981     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6982     STATS_OFFSET32(stat_XonPauseFramesReceived),
6983     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6984     STATS_OFFSET32(stat_OutXonSent),
6985     STATS_OFFSET32(stat_OutXoffSent),
6986     STATS_OFFSET32(stat_MacControlFramesReceived),
6987     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6988     STATS_OFFSET32(stat_IfInMBUFDiscards),
6989     STATS_OFFSET32(stat_FwRxDrop),
6990 };
6991
6992 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6993  * skipped because of errata.
6994  */
6995 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6996         8,0,8,8,8,8,8,8,8,8,
6997         4,0,4,4,4,4,4,4,4,4,
6998         4,4,4,4,4,4,4,4,4,4,
6999         4,4,4,4,4,4,4,4,4,4,
7000         4,4,4,4,4,4,
7001 };
7002
7003 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7004         8,0,8,8,8,8,8,8,8,8,
7005         4,4,4,4,4,4,4,4,4,4,
7006         4,4,4,4,4,4,4,4,4,4,
7007         4,4,4,4,4,4,4,4,4,4,
7008         4,4,4,4,4,4,
7009 };
7010
7011 #define BNX2_NUM_TESTS 6
7012
7013 static struct {
7014         char string[ETH_GSTRING_LEN];
7015 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7016         { "register_test (offline)" },
7017         { "memory_test (offline)" },
7018         { "loopback_test (offline)" },
7019         { "nvram_test (online)" },
7020         { "interrupt_test (online)" },
7021         { "link_test (online)" },
7022 };
7023
7024 static int
7025 bnx2_get_sset_count(struct net_device *dev, int sset)
7026 {
7027         switch (sset) {
7028         case ETH_SS_TEST:
7029                 return BNX2_NUM_TESTS;
7030         case ETH_SS_STATS:
7031                 return BNX2_NUM_STATS;
7032         default:
7033                 return -EOPNOTSUPP;
7034         }
7035 }
7036
7037 static void
7038 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7039 {
7040         struct bnx2 *bp = netdev_priv(dev);
7041
7042         bnx2_set_power_state(bp, PCI_D0);
7043
7044         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7045         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7046                 int i;
7047
7048                 bnx2_netif_stop(bp);
7049                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7050                 bnx2_free_skbs(bp);
7051
7052                 if (bnx2_test_registers(bp) != 0) {
7053                         buf[0] = 1;
7054                         etest->flags |= ETH_TEST_FL_FAILED;
7055                 }
7056                 if (bnx2_test_memory(bp) != 0) {
7057                         buf[1] = 1;
7058                         etest->flags |= ETH_TEST_FL_FAILED;
7059                 }
7060                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7061                         etest->flags |= ETH_TEST_FL_FAILED;
7062
7063                 if (!netif_running(bp->dev))
7064                         bnx2_shutdown_chip(bp);
7065                 else {
7066                         bnx2_init_nic(bp, 1);
7067                         bnx2_netif_start(bp);
7068                 }
7069
7070                 /* wait for link up */
7071                 for (i = 0; i < 7; i++) {
7072                         if (bp->link_up)
7073                                 break;
7074                         msleep_interruptible(1000);
7075                 }
7076         }
7077
7078         if (bnx2_test_nvram(bp) != 0) {
7079                 buf[3] = 1;
7080                 etest->flags |= ETH_TEST_FL_FAILED;
7081         }
7082         if (bnx2_test_intr(bp) != 0) {
7083                 buf[4] = 1;
7084                 etest->flags |= ETH_TEST_FL_FAILED;
7085         }
7086
7087         if (bnx2_test_link(bp) != 0) {
7088                 buf[5] = 1;
7089                 etest->flags |= ETH_TEST_FL_FAILED;
7090
7091         }
7092         if (!netif_running(bp->dev))
7093                 bnx2_set_power_state(bp, PCI_D3hot);
7094 }
7095
7096 static void
7097 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7098 {
7099         switch (stringset) {
7100         case ETH_SS_STATS:
7101                 memcpy(buf, bnx2_stats_str_arr,
7102                         sizeof(bnx2_stats_str_arr));
7103                 break;
7104         case ETH_SS_TEST:
7105                 memcpy(buf, bnx2_tests_str_arr,
7106                         sizeof(bnx2_tests_str_arr));
7107                 break;
7108         }
7109 }
7110
7111 static void
7112 bnx2_get_ethtool_stats(struct net_device *dev,
7113                 struct ethtool_stats *stats, u64 *buf)
7114 {
7115         struct bnx2 *bp = netdev_priv(dev);
7116         int i;
7117         u32 *hw_stats = (u32 *) bp->stats_blk;
7118         u8 *stats_len_arr = NULL;
7119
7120         if (hw_stats == NULL) {
7121                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7122                 return;
7123         }
7124
7125         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7126             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7127             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7128             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7129                 stats_len_arr = bnx2_5706_stats_len_arr;
7130         else
7131                 stats_len_arr = bnx2_5708_stats_len_arr;
7132
7133         for (i = 0; i < BNX2_NUM_STATS; i++) {
7134                 if (stats_len_arr[i] == 0) {
7135                         /* skip this counter */
7136                         buf[i] = 0;
7137                         continue;
7138                 }
7139                 if (stats_len_arr[i] == 4) {
7140                         /* 4-byte counter */
7141                         buf[i] = (u64)
7142                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7143                         continue;
7144                 }
7145                 /* 8-byte counter */
7146                 buf[i] = (((u64) *(hw_stats +
7147                                         bnx2_stats_offset_arr[i])) << 32) +
7148                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7149         }
7150 }
7151
7152 static int
7153 bnx2_phys_id(struct net_device *dev, u32 data)
7154 {
7155         struct bnx2 *bp = netdev_priv(dev);
7156         int i;
7157         u32 save;
7158
7159         bnx2_set_power_state(bp, PCI_D0);
7160
7161         if (data == 0)
7162                 data = 2;
7163
7164         save = REG_RD(bp, BNX2_MISC_CFG);
7165         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7166
7167         for (i = 0; i < (data * 2); i++) {
7168                 if ((i % 2) == 0) {
7169                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7170                 }
7171                 else {
7172                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7173                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7174                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7175                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7176                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7177                                 BNX2_EMAC_LED_TRAFFIC);
7178                 }
7179                 msleep_interruptible(500);
7180                 if (signal_pending(current))
7181                         break;
7182         }
7183         REG_WR(bp, BNX2_EMAC_LED, 0);
7184         REG_WR(bp, BNX2_MISC_CFG, save);
7185
7186         if (!netif_running(dev))
7187                 bnx2_set_power_state(bp, PCI_D3hot);
7188
7189         return 0;
7190 }
7191
7192 static int
7193 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7194 {
7195         struct bnx2 *bp = netdev_priv(dev);
7196
7197         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7198                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7199         else
7200                 return (ethtool_op_set_tx_csum(dev, data));
7201 }
7202
7203 static const struct ethtool_ops bnx2_ethtool_ops = {
7204         .get_settings           = bnx2_get_settings,
7205         .set_settings           = bnx2_set_settings,
7206         .get_drvinfo            = bnx2_get_drvinfo,
7207         .get_regs_len           = bnx2_get_regs_len,
7208         .get_regs               = bnx2_get_regs,
7209         .get_wol                = bnx2_get_wol,
7210         .set_wol                = bnx2_set_wol,
7211         .nway_reset             = bnx2_nway_reset,
7212         .get_link               = ethtool_op_get_link,
7213         .get_eeprom_len         = bnx2_get_eeprom_len,
7214         .get_eeprom             = bnx2_get_eeprom,
7215         .set_eeprom             = bnx2_set_eeprom,
7216         .get_coalesce           = bnx2_get_coalesce,
7217         .set_coalesce           = bnx2_set_coalesce,
7218         .get_ringparam          = bnx2_get_ringparam,
7219         .set_ringparam          = bnx2_set_ringparam,
7220         .get_pauseparam         = bnx2_get_pauseparam,
7221         .set_pauseparam         = bnx2_set_pauseparam,
7222         .get_rx_csum            = bnx2_get_rx_csum,
7223         .set_rx_csum            = bnx2_set_rx_csum,
7224         .set_tx_csum            = bnx2_set_tx_csum,
7225         .set_sg                 = ethtool_op_set_sg,
7226         .set_tso                = bnx2_set_tso,
7227         .self_test              = bnx2_self_test,
7228         .get_strings            = bnx2_get_strings,
7229         .phys_id                = bnx2_phys_id,
7230         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7231         .get_sset_count         = bnx2_get_sset_count,
7232 };
7233
7234 /* Called with rtnl_lock */
7235 static int
7236 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7237 {
7238         struct mii_ioctl_data *data = if_mii(ifr);
7239         struct bnx2 *bp = netdev_priv(dev);
7240         int err;
7241
7242         switch(cmd) {
7243         case SIOCGMIIPHY:
7244                 data->phy_id = bp->phy_addr;
7245
7246                 /* fallthru */
7247         case SIOCGMIIREG: {
7248                 u32 mii_regval;
7249
7250                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7251                         return -EOPNOTSUPP;
7252
7253                 if (!netif_running(dev))
7254                         return -EAGAIN;
7255
7256                 spin_lock_bh(&bp->phy_lock);
7257                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7258                 spin_unlock_bh(&bp->phy_lock);
7259
7260                 data->val_out = mii_regval;
7261
7262                 return err;
7263         }
7264
7265         case SIOCSMIIREG:
7266                 if (!capable(CAP_NET_ADMIN))
7267                         return -EPERM;
7268
7269                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7270                         return -EOPNOTSUPP;
7271
7272                 if (!netif_running(dev))
7273                         return -EAGAIN;
7274
7275                 spin_lock_bh(&bp->phy_lock);
7276                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7277                 spin_unlock_bh(&bp->phy_lock);
7278
7279                 return err;
7280
7281         default:
7282                 /* do nothing */
7283                 break;
7284         }
7285         return -EOPNOTSUPP;
7286 }
7287
7288 /* Called with rtnl_lock */
7289 static int
7290 bnx2_change_mac_addr(struct net_device *dev, void *p)
7291 {
7292         struct sockaddr *addr = p;
7293         struct bnx2 *bp = netdev_priv(dev);
7294
7295         if (!is_valid_ether_addr(addr->sa_data))
7296                 return -EINVAL;
7297
7298         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7299         if (netif_running(dev))
7300                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7301
7302         return 0;
7303 }
7304
7305 /* Called with rtnl_lock */
7306 static int
7307 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7308 {
7309         struct bnx2 *bp = netdev_priv(dev);
7310
7311         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7312                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7313                 return -EINVAL;
7314
7315         dev->mtu = new_mtu;
7316         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7317 }
7318
7319 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7320 static void
7321 poll_bnx2(struct net_device *dev)
7322 {
7323         struct bnx2 *bp = netdev_priv(dev);
7324         int i;
7325
7326         for (i = 0; i < bp->irq_nvecs; i++) {
7327                 disable_irq(bp->irq_tbl[i].vector);
7328                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7329                 enable_irq(bp->irq_tbl[i].vector);
7330         }
7331 }
7332 #endif
7333
7334 static void __devinit
7335 bnx2_get_5709_media(struct bnx2 *bp)
7336 {
7337         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7338         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7339         u32 strap;
7340
7341         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7342                 return;
7343         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7344                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7345                 return;
7346         }
7347
7348         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7349                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7350         else
7351                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7352
7353         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7354                 switch (strap) {
7355                 case 0x4:
7356                 case 0x5:
7357                 case 0x6:
7358                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7359                         return;
7360                 }
7361         } else {
7362                 switch (strap) {
7363                 case 0x1:
7364                 case 0x2:
7365                 case 0x4:
7366                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7367                         return;
7368                 }
7369         }
7370 }
7371
7372 static void __devinit
7373 bnx2_get_pci_speed(struct bnx2 *bp)
7374 {
7375         u32 reg;
7376
7377         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7378         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7379                 u32 clkreg;
7380
7381                 bp->flags |= BNX2_FLAG_PCIX;
7382
7383                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7384
7385                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7386                 switch (clkreg) {
7387                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7388                         bp->bus_speed_mhz = 133;
7389                         break;
7390
7391                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7392                         bp->bus_speed_mhz = 100;
7393                         break;
7394
7395                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7396                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7397                         bp->bus_speed_mhz = 66;
7398                         break;
7399
7400                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7401                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7402                         bp->bus_speed_mhz = 50;
7403                         break;
7404
7405                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7406                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7407                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7408                         bp->bus_speed_mhz = 33;
7409                         break;
7410                 }
7411         }
7412         else {
7413                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7414                         bp->bus_speed_mhz = 66;
7415                 else
7416                         bp->bus_speed_mhz = 33;
7417         }
7418
7419         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7420                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7421
7422 }
7423
7424 static int __devinit
7425 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7426 {
7427         struct bnx2 *bp;
7428         unsigned long mem_len;
7429         int rc, i, j;
7430         u32 reg;
7431         u64 dma_mask, persist_dma_mask;
7432
7433         SET_NETDEV_DEV(dev, &pdev->dev);
7434         bp = netdev_priv(dev);
7435
7436         bp->flags = 0;
7437         bp->phy_flags = 0;
7438
7439         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7440         rc = pci_enable_device(pdev);
7441         if (rc) {
7442                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7443                 goto err_out;
7444         }
7445
7446         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7447                 dev_err(&pdev->dev,
7448                         "Cannot find PCI device base address, aborting.\n");
7449                 rc = -ENODEV;
7450                 goto err_out_disable;
7451         }
7452
7453         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7454         if (rc) {
7455                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7456                 goto err_out_disable;
7457         }
7458
7459         pci_set_master(pdev);
7460         pci_save_state(pdev);
7461
7462         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7463         if (bp->pm_cap == 0) {
7464                 dev_err(&pdev->dev,
7465                         "Cannot find power management capability, aborting.\n");
7466                 rc = -EIO;
7467                 goto err_out_release;
7468         }
7469
7470         bp->dev = dev;
7471         bp->pdev = pdev;
7472
7473         spin_lock_init(&bp->phy_lock);
7474         spin_lock_init(&bp->indirect_lock);
7475         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7476
7477         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7478         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7479         dev->mem_end = dev->mem_start + mem_len;
7480         dev->irq = pdev->irq;
7481
7482         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7483
7484         if (!bp->regview) {
7485                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7486                 rc = -ENOMEM;
7487                 goto err_out_release;
7488         }
7489
7490         /* Configure byte swap and enable write to the reg_window registers.
7491          * Rely on CPU to do target byte swapping on big endian systems
7492          * The chip's target access swapping will not swap all accesses
7493          */
7494         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7495                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7496                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7497
7498         bnx2_set_power_state(bp, PCI_D0);
7499
7500         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7501
7502         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7503                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7504                         dev_err(&pdev->dev,
7505                                 "Cannot find PCIE capability, aborting.\n");
7506                         rc = -EIO;
7507                         goto err_out_unmap;
7508                 }
7509                 bp->flags |= BNX2_FLAG_PCIE;
7510                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7511                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7512         } else {
7513                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7514                 if (bp->pcix_cap == 0) {
7515                         dev_err(&pdev->dev,
7516                                 "Cannot find PCIX capability, aborting.\n");
7517                         rc = -EIO;
7518                         goto err_out_unmap;
7519                 }
7520         }
7521
7522         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7523                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7524                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7525         }
7526
7527         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7528                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7529                         bp->flags |= BNX2_FLAG_MSI_CAP;
7530         }
7531
7532         /* 5708 cannot support DMA addresses > 40-bit.  */
7533         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7534                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7535         else
7536                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7537
7538         /* Configure DMA attributes. */
7539         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7540                 dev->features |= NETIF_F_HIGHDMA;
7541                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7542                 if (rc) {
7543                         dev_err(&pdev->dev,
7544                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7545                         goto err_out_unmap;
7546                 }
7547         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7548                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7549                 goto err_out_unmap;
7550         }
7551
7552         if (!(bp->flags & BNX2_FLAG_PCIE))
7553                 bnx2_get_pci_speed(bp);
7554
7555         /* 5706A0 may falsely detect SERR and PERR. */
7556         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7557                 reg = REG_RD(bp, PCI_COMMAND);
7558                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7559                 REG_WR(bp, PCI_COMMAND, reg);
7560         }
7561         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7562                 !(bp->flags & BNX2_FLAG_PCIX)) {
7563
7564                 dev_err(&pdev->dev,
7565                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7566                 goto err_out_unmap;
7567         }
7568
7569         bnx2_init_nvram(bp);
7570
7571         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7572
7573         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7574             BNX2_SHM_HDR_SIGNATURE_SIG) {
7575                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7576
7577                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7578         } else
7579                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7580
7581         /* Get the permanent MAC address.  First we need to make sure the
7582          * firmware is actually running.
7583          */
7584         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7585
7586         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7587             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7588                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7589                 rc = -ENODEV;
7590                 goto err_out_unmap;
7591         }
7592
7593         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7594         for (i = 0, j = 0; i < 3; i++) {
7595                 u8 num, k, skip0;
7596
7597                 num = (u8) (reg >> (24 - (i * 8)));
7598                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7599                         if (num >= k || !skip0 || k == 1) {
7600                                 bp->fw_version[j++] = (num / k) + '0';
7601                                 skip0 = 0;
7602                         }
7603                 }
7604                 if (i != 2)
7605                         bp->fw_version[j++] = '.';
7606         }
7607         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7608         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7609                 bp->wol = 1;
7610
7611         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7612                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7613
7614                 for (i = 0; i < 30; i++) {
7615                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7616                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7617                                 break;
7618                         msleep(10);
7619                 }
7620         }
7621         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7622         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7623         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7624             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7625                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7626
7627                 bp->fw_version[j++] = ' ';
7628                 for (i = 0; i < 3; i++) {
7629                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7630                         reg = swab32(reg);
7631                         memcpy(&bp->fw_version[j], &reg, 4);
7632                         j += 4;
7633                 }
7634         }
7635
7636         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7637         bp->mac_addr[0] = (u8) (reg >> 8);
7638         bp->mac_addr[1] = (u8) reg;
7639
7640         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7641         bp->mac_addr[2] = (u8) (reg >> 24);
7642         bp->mac_addr[3] = (u8) (reg >> 16);
7643         bp->mac_addr[4] = (u8) (reg >> 8);
7644         bp->mac_addr[5] = (u8) reg;
7645
7646         bp->tx_ring_size = MAX_TX_DESC_CNT;
7647         bnx2_set_rx_ring_size(bp, 255);
7648
7649         bp->rx_csum = 1;
7650
7651         bp->tx_quick_cons_trip_int = 20;
7652         bp->tx_quick_cons_trip = 20;
7653         bp->tx_ticks_int = 80;
7654         bp->tx_ticks = 80;
7655
7656         bp->rx_quick_cons_trip_int = 6;
7657         bp->rx_quick_cons_trip = 6;
7658         bp->rx_ticks_int = 18;
7659         bp->rx_ticks = 18;
7660
7661         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7662
7663         bp->current_interval = BNX2_TIMER_INTERVAL;
7664
7665         bp->phy_addr = 1;
7666
7667         /* Disable WOL support if we are running on a SERDES chip. */
7668         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7669                 bnx2_get_5709_media(bp);
7670         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7671                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7672
7673         bp->phy_port = PORT_TP;
7674         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7675                 bp->phy_port = PORT_FIBRE;
7676                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7677                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7678                         bp->flags |= BNX2_FLAG_NO_WOL;
7679                         bp->wol = 0;
7680                 }
7681                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7682                         /* Don't do parallel detect on this board because of
7683                          * some board problems.  The link will not go down
7684                          * if we do parallel detect.
7685                          */
7686                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7687                             pdev->subsystem_device == 0x310c)
7688                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7689                 } else {
7690                         bp->phy_addr = 2;
7691                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7692                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7693                 }
7694         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7695                    CHIP_NUM(bp) == CHIP_NUM_5708)
7696                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7697         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7698                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7699                   CHIP_REV(bp) == CHIP_REV_Bx))
7700                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7701
7702         bnx2_init_fw_cap(bp);
7703
7704         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7705             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7706             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7707             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7708                 bp->flags |= BNX2_FLAG_NO_WOL;
7709                 bp->wol = 0;
7710         }
7711
7712         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7713                 bp->tx_quick_cons_trip_int =
7714                         bp->tx_quick_cons_trip;
7715                 bp->tx_ticks_int = bp->tx_ticks;
7716                 bp->rx_quick_cons_trip_int =
7717                         bp->rx_quick_cons_trip;
7718                 bp->rx_ticks_int = bp->rx_ticks;
7719                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7720                 bp->com_ticks_int = bp->com_ticks;
7721                 bp->cmd_ticks_int = bp->cmd_ticks;
7722         }
7723
7724         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7725          *
7726          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7727          * with byte enables disabled on the unused 32-bit word.  This is legal
7728          * but causes problems on the AMD 8132 which will eventually stop
7729          * responding after a while.
7730          *
7731          * AMD believes this incompatibility is unique to the 5706, and
7732          * prefers to locally disable MSI rather than globally disabling it.
7733          */
7734         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7735                 struct pci_dev *amd_8132 = NULL;
7736
7737                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7738                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7739                                                   amd_8132))) {
7740
7741                         if (amd_8132->revision >= 0x10 &&
7742                             amd_8132->revision <= 0x13) {
7743                                 disable_msi = 1;
7744                                 pci_dev_put(amd_8132);
7745                                 break;
7746                         }
7747                 }
7748         }
7749
7750         bnx2_set_default_link(bp);
7751         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7752
7753         init_timer(&bp->timer);
7754         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7755         bp->timer.data = (unsigned long) bp;
7756         bp->timer.function = bnx2_timer;
7757
7758         return 0;
7759
7760 err_out_unmap:
7761         if (bp->regview) {
7762                 iounmap(bp->regview);
7763                 bp->regview = NULL;
7764         }
7765
7766 err_out_release:
7767         pci_release_regions(pdev);
7768
7769 err_out_disable:
7770         pci_disable_device(pdev);
7771         pci_set_drvdata(pdev, NULL);
7772
7773 err_out:
7774         return rc;
7775 }
7776
7777 static char * __devinit
7778 bnx2_bus_string(struct bnx2 *bp, char *str)
7779 {
7780         char *s = str;
7781
7782         if (bp->flags & BNX2_FLAG_PCIE) {
7783                 s += sprintf(s, "PCI Express");
7784         } else {
7785                 s += sprintf(s, "PCI");
7786                 if (bp->flags & BNX2_FLAG_PCIX)
7787                         s += sprintf(s, "-X");
7788                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7789                         s += sprintf(s, " 32-bit");
7790                 else
7791                         s += sprintf(s, " 64-bit");
7792                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7793         }
7794         return str;
7795 }
7796
7797 static void __devinit
7798 bnx2_init_napi(struct bnx2 *bp)
7799 {
7800         int i;
7801
7802         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7803                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7804                 int (*poll)(struct napi_struct *, int);
7805
7806                 if (i == 0)
7807                         poll = bnx2_poll;
7808                 else
7809                         poll = bnx2_poll_msix;
7810
7811                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7812                 bnapi->bp = bp;
7813         }
7814 }
7815
7816 static const struct net_device_ops bnx2_netdev_ops = {
7817         .ndo_open               = bnx2_open,
7818         .ndo_start_xmit         = bnx2_start_xmit,
7819         .ndo_stop               = bnx2_close,
7820         .ndo_get_stats          = bnx2_get_stats,
7821         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7822         .ndo_do_ioctl           = bnx2_ioctl,
7823         .ndo_validate_addr      = eth_validate_addr,
7824         .ndo_set_mac_address    = bnx2_change_mac_addr,
7825         .ndo_change_mtu         = bnx2_change_mtu,
7826         .ndo_tx_timeout         = bnx2_tx_timeout,
7827 #ifdef BCM_VLAN
7828         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7829 #endif
7830 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7831         .ndo_poll_controller    = poll_bnx2,
7832 #endif
7833 };
7834
7835 static int __devinit
7836 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7837 {
7838         static int version_printed = 0;
7839         struct net_device *dev = NULL;
7840         struct bnx2 *bp;
7841         int rc;
7842         char str[40];
7843
7844         if (version_printed++ == 0)
7845                 printk(KERN_INFO "%s", version);
7846
7847         /* dev zeroed in init_etherdev */
7848         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7849
7850         if (!dev)
7851                 return -ENOMEM;
7852
7853         rc = bnx2_init_board(pdev, dev);
7854         if (rc < 0) {
7855                 free_netdev(dev);
7856                 return rc;
7857         }
7858
7859         dev->netdev_ops = &bnx2_netdev_ops;
7860         dev->watchdog_timeo = TX_TIMEOUT;
7861         dev->ethtool_ops = &bnx2_ethtool_ops;
7862
7863         bp = netdev_priv(dev);
7864         bnx2_init_napi(bp);
7865
7866         pci_set_drvdata(pdev, dev);
7867
7868         rc = bnx2_request_firmware(bp);
7869         if (rc)
7870                 goto error;
7871
7872         memcpy(dev->dev_addr, bp->mac_addr, 6);
7873         memcpy(dev->perm_addr, bp->mac_addr, 6);
7874
7875         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7876         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7877                 dev->features |= NETIF_F_IPV6_CSUM;
7878
7879 #ifdef BCM_VLAN
7880         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7881 #endif
7882         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7883         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7884                 dev->features |= NETIF_F_TSO6;
7885
7886         if ((rc = register_netdev(dev))) {
7887                 dev_err(&pdev->dev, "Cannot register net device\n");
7888                 goto error;
7889         }
7890
7891         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7892                 "IRQ %d, node addr %pM\n",
7893                 dev->name,
7894                 board_info[ent->driver_data].name,
7895                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7896                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7897                 bnx2_bus_string(bp, str),
7898                 dev->base_addr,
7899                 bp->pdev->irq, dev->dev_addr);
7900
7901         return 0;
7902
7903 error:
7904         if (bp->mips_firmware)
7905                 release_firmware(bp->mips_firmware);
7906         if (bp->rv2p_firmware)
7907                 release_firmware(bp->rv2p_firmware);
7908
7909         if (bp->regview)
7910                 iounmap(bp->regview);
7911         pci_release_regions(pdev);
7912         pci_disable_device(pdev);
7913         pci_set_drvdata(pdev, NULL);
7914         free_netdev(dev);
7915         return rc;
7916 }
7917
7918 static void __devexit
7919 bnx2_remove_one(struct pci_dev *pdev)
7920 {
7921         struct net_device *dev = pci_get_drvdata(pdev);
7922         struct bnx2 *bp = netdev_priv(dev);
7923
7924         flush_scheduled_work();
7925
7926         unregister_netdev(dev);
7927
7928         if (bp->mips_firmware)
7929                 release_firmware(bp->mips_firmware);
7930         if (bp->rv2p_firmware)
7931                 release_firmware(bp->rv2p_firmware);
7932
7933         if (bp->regview)
7934                 iounmap(bp->regview);
7935
7936         free_netdev(dev);
7937         pci_release_regions(pdev);
7938         pci_disable_device(pdev);
7939         pci_set_drvdata(pdev, NULL);
7940 }
7941
7942 static int
7943 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7944 {
7945         struct net_device *dev = pci_get_drvdata(pdev);
7946         struct bnx2 *bp = netdev_priv(dev);
7947
7948         /* PCI register 4 needs to be saved whether netif_running() or not.
7949          * MSI address and data need to be saved if using MSI and
7950          * netif_running().
7951          */
7952         pci_save_state(pdev);
7953         if (!netif_running(dev))
7954                 return 0;
7955
7956         flush_scheduled_work();
7957         bnx2_netif_stop(bp);
7958         netif_device_detach(dev);
7959         del_timer_sync(&bp->timer);
7960         bnx2_shutdown_chip(bp);
7961         bnx2_free_skbs(bp);
7962         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7963         return 0;
7964 }
7965
7966 static int
7967 bnx2_resume(struct pci_dev *pdev)
7968 {
7969         struct net_device *dev = pci_get_drvdata(pdev);
7970         struct bnx2 *bp = netdev_priv(dev);
7971
7972         pci_restore_state(pdev);
7973         if (!netif_running(dev))
7974                 return 0;
7975
7976         bnx2_set_power_state(bp, PCI_D0);
7977         netif_device_attach(dev);
7978         bnx2_init_nic(bp, 1);
7979         bnx2_netif_start(bp);
7980         return 0;
7981 }
7982
7983 /**
7984  * bnx2_io_error_detected - called when PCI error is detected
7985  * @pdev: Pointer to PCI device
7986  * @state: The current pci connection state
7987  *
7988  * This function is called after a PCI bus error affecting
7989  * this device has been detected.
7990  */
7991 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7992                                                pci_channel_state_t state)
7993 {
7994         struct net_device *dev = pci_get_drvdata(pdev);
7995         struct bnx2 *bp = netdev_priv(dev);
7996
7997         rtnl_lock();
7998         netif_device_detach(dev);
7999
8000         if (netif_running(dev)) {
8001                 bnx2_netif_stop(bp);
8002                 del_timer_sync(&bp->timer);
8003                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8004         }
8005
8006         pci_disable_device(pdev);
8007         rtnl_unlock();
8008
8009         /* Request a slot slot reset. */
8010         return PCI_ERS_RESULT_NEED_RESET;
8011 }
8012
8013 /**
8014  * bnx2_io_slot_reset - called after the pci bus has been reset.
8015  * @pdev: Pointer to PCI device
8016  *
8017  * Restart the card from scratch, as if from a cold-boot.
8018  */
8019 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8020 {
8021         struct net_device *dev = pci_get_drvdata(pdev);
8022         struct bnx2 *bp = netdev_priv(dev);
8023
8024         rtnl_lock();
8025         if (pci_enable_device(pdev)) {
8026                 dev_err(&pdev->dev,
8027                         "Cannot re-enable PCI device after reset.\n");
8028                 rtnl_unlock();
8029                 return PCI_ERS_RESULT_DISCONNECT;
8030         }
8031         pci_set_master(pdev);
8032         pci_restore_state(pdev);
8033
8034         if (netif_running(dev)) {
8035                 bnx2_set_power_state(bp, PCI_D0);
8036                 bnx2_init_nic(bp, 1);
8037         }
8038
8039         rtnl_unlock();
8040         return PCI_ERS_RESULT_RECOVERED;
8041 }
8042
8043 /**
8044  * bnx2_io_resume - called when traffic can start flowing again.
8045  * @pdev: Pointer to PCI device
8046  *
8047  * This callback is called when the error recovery driver tells us that
8048  * its OK to resume normal operation.
8049  */
8050 static void bnx2_io_resume(struct pci_dev *pdev)
8051 {
8052         struct net_device *dev = pci_get_drvdata(pdev);
8053         struct bnx2 *bp = netdev_priv(dev);
8054
8055         rtnl_lock();
8056         if (netif_running(dev))
8057                 bnx2_netif_start(bp);
8058
8059         netif_device_attach(dev);
8060         rtnl_unlock();
8061 }
8062
8063 static struct pci_error_handlers bnx2_err_handler = {
8064         .error_detected = bnx2_io_error_detected,
8065         .slot_reset     = bnx2_io_slot_reset,
8066         .resume         = bnx2_io_resume,
8067 };
8068
8069 static struct pci_driver bnx2_pci_driver = {
8070         .name           = DRV_MODULE_NAME,
8071         .id_table       = bnx2_pci_tbl,
8072         .probe          = bnx2_init_one,
8073         .remove         = __devexit_p(bnx2_remove_one),
8074         .suspend        = bnx2_suspend,
8075         .resume         = bnx2_resume,
8076         .err_handler    = &bnx2_err_handler,
8077 };
8078
8079 static int __init bnx2_init(void)
8080 {
8081         return pci_register_driver(&bnx2_pci_driver);
8082 }
8083
8084 static void __exit bnx2_cleanup(void)
8085 {
8086         pci_unregister_driver(&bnx2_pci_driver);
8087 }
8088
8089 module_init(bnx2_init);
8090 module_exit(bnx2_cleanup);
8091
8092
8093