[BNX2]: Modify link up message.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.11"
58 #define DRV_MODULE_RELDATE      "June 4, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554                 return;
555
556         if (bp->link_up) {
557                 u32 bmsr;
558
559                 switch (bp->line_speed) {
560                 case SPEED_10:
561                         if (bp->duplex == DUPLEX_HALF)
562                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
563                         else
564                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
565                         break;
566                 case SPEED_100:
567                         if (bp->duplex == DUPLEX_HALF)
568                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
569                         else
570                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
571                         break;
572                 case SPEED_1000:
573                         if (bp->duplex == DUPLEX_HALF)
574                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575                         else
576                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577                         break;
578                 case SPEED_2500:
579                         if (bp->duplex == DUPLEX_HALF)
580                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581                         else
582                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583                         break;
584                 }
585
586                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588                 if (bp->autoneg) {
589                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
591                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
593
594                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597                         else
598                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599                 }
600         }
601         else
602                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 }
606
607 static char *
608 bnx2_xceiver_str(struct bnx2 *bp)
609 {
610         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612                  "Copper"));
613 }
614
615 static void
616 bnx2_report_link(struct bnx2 *bp)
617 {
618         if (bp->link_up) {
619                 netif_carrier_on(bp->dev);
620                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621                        bnx2_xceiver_str(bp));
622
623                 printk("%d Mbps ", bp->line_speed);
624
625                 if (bp->duplex == DUPLEX_FULL)
626                         printk("full duplex");
627                 else
628                         printk("half duplex");
629
630                 if (bp->flow_ctrl) {
631                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
632                                 printk(", receive ");
633                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
634                                         printk("& transmit ");
635                         }
636                         else {
637                                 printk(", transmit ");
638                         }
639                         printk("flow control ON");
640                 }
641                 printk("\n");
642         }
643         else {
644                 netif_carrier_off(bp->dev);
645                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646                        bnx2_xceiver_str(bp));
647         }
648
649         bnx2_report_fw_link(bp);
650 }
651
652 static void
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654 {
655         u32 local_adv, remote_adv;
656
657         bp->flow_ctrl = 0;
658         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661                 if (bp->duplex == DUPLEX_FULL) {
662                         bp->flow_ctrl = bp->req_flow_ctrl;
663                 }
664                 return;
665         }
666
667         if (bp->duplex != DUPLEX_FULL) {
668                 return;
669         }
670
671         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673                 u32 val;
674
675                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677                         bp->flow_ctrl |= FLOW_CTRL_TX;
678                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679                         bp->flow_ctrl |= FLOW_CTRL_RX;
680                 return;
681         }
682
683         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
685
686         if (bp->phy_flags & PHY_SERDES_FLAG) {
687                 u32 new_local_adv = 0;
688                 u32 new_remote_adv = 0;
689
690                 if (local_adv & ADVERTISE_1000XPAUSE)
691                         new_local_adv |= ADVERTISE_PAUSE_CAP;
692                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
694                 if (remote_adv & ADVERTISE_1000XPAUSE)
695                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
696                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699                 local_adv = new_local_adv;
700                 remote_adv = new_remote_adv;
701         }
702
703         /* See Table 28B-3 of 802.3ab-1999 spec. */
704         if (local_adv & ADVERTISE_PAUSE_CAP) {
705                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
707                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708                         }
709                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710                                 bp->flow_ctrl = FLOW_CTRL_RX;
711                         }
712                 }
713                 else {
714                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
715                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716                         }
717                 }
718         }
719         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723                         bp->flow_ctrl = FLOW_CTRL_TX;
724                 }
725         }
726 }
727
728 static int
729 bnx2_5709s_linkup(struct bnx2 *bp)
730 {
731         u32 val, speed;
732
733         bp->link_up = 1;
734
735         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740                 bp->line_speed = bp->req_line_speed;
741                 bp->duplex = bp->req_duplex;
742                 return 0;
743         }
744         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745         switch (speed) {
746                 case MII_BNX2_GP_TOP_AN_SPEED_10:
747                         bp->line_speed = SPEED_10;
748                         break;
749                 case MII_BNX2_GP_TOP_AN_SPEED_100:
750                         bp->line_speed = SPEED_100;
751                         break;
752                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754                         bp->line_speed = SPEED_1000;
755                         break;
756                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757                         bp->line_speed = SPEED_2500;
758                         break;
759         }
760         if (val & MII_BNX2_GP_TOP_AN_FD)
761                 bp->duplex = DUPLEX_FULL;
762         else
763                 bp->duplex = DUPLEX_HALF;
764         return 0;
765 }
766
767 static int
768 bnx2_5708s_linkup(struct bnx2 *bp)
769 {
770         u32 val;
771
772         bp->link_up = 1;
773         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775                 case BCM5708S_1000X_STAT1_SPEED_10:
776                         bp->line_speed = SPEED_10;
777                         break;
778                 case BCM5708S_1000X_STAT1_SPEED_100:
779                         bp->line_speed = SPEED_100;
780                         break;
781                 case BCM5708S_1000X_STAT1_SPEED_1G:
782                         bp->line_speed = SPEED_1000;
783                         break;
784                 case BCM5708S_1000X_STAT1_SPEED_2G5:
785                         bp->line_speed = SPEED_2500;
786                         break;
787         }
788         if (val & BCM5708S_1000X_STAT1_FD)
789                 bp->duplex = DUPLEX_FULL;
790         else
791                 bp->duplex = DUPLEX_HALF;
792
793         return 0;
794 }
795
796 static int
797 bnx2_5706s_linkup(struct bnx2 *bp)
798 {
799         u32 bmcr, local_adv, remote_adv, common;
800
801         bp->link_up = 1;
802         bp->line_speed = SPEED_1000;
803
804         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805         if (bmcr & BMCR_FULLDPLX) {
806                 bp->duplex = DUPLEX_FULL;
807         }
808         else {
809                 bp->duplex = DUPLEX_HALF;
810         }
811
812         if (!(bmcr & BMCR_ANENABLE)) {
813                 return 0;
814         }
815
816         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
818
819         common = local_adv & remote_adv;
820         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822                 if (common & ADVERTISE_1000XFULL) {
823                         bp->duplex = DUPLEX_FULL;
824                 }
825                 else {
826                         bp->duplex = DUPLEX_HALF;
827                 }
828         }
829
830         return 0;
831 }
832
833 static int
834 bnx2_copper_linkup(struct bnx2 *bp)
835 {
836         u32 bmcr;
837
838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839         if (bmcr & BMCR_ANENABLE) {
840                 u32 local_adv, remote_adv, common;
841
842                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845                 common = local_adv & (remote_adv >> 2);
846                 if (common & ADVERTISE_1000FULL) {
847                         bp->line_speed = SPEED_1000;
848                         bp->duplex = DUPLEX_FULL;
849                 }
850                 else if (common & ADVERTISE_1000HALF) {
851                         bp->line_speed = SPEED_1000;
852                         bp->duplex = DUPLEX_HALF;
853                 }
854                 else {
855                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
857
858                         common = local_adv & remote_adv;
859                         if (common & ADVERTISE_100FULL) {
860                                 bp->line_speed = SPEED_100;
861                                 bp->duplex = DUPLEX_FULL;
862                         }
863                         else if (common & ADVERTISE_100HALF) {
864                                 bp->line_speed = SPEED_100;
865                                 bp->duplex = DUPLEX_HALF;
866                         }
867                         else if (common & ADVERTISE_10FULL) {
868                                 bp->line_speed = SPEED_10;
869                                 bp->duplex = DUPLEX_FULL;
870                         }
871                         else if (common & ADVERTISE_10HALF) {
872                                 bp->line_speed = SPEED_10;
873                                 bp->duplex = DUPLEX_HALF;
874                         }
875                         else {
876                                 bp->line_speed = 0;
877                                 bp->link_up = 0;
878                         }
879                 }
880         }
881         else {
882                 if (bmcr & BMCR_SPEED100) {
883                         bp->line_speed = SPEED_100;
884                 }
885                 else {
886                         bp->line_speed = SPEED_10;
887                 }
888                 if (bmcr & BMCR_FULLDPLX) {
889                         bp->duplex = DUPLEX_FULL;
890                 }
891                 else {
892                         bp->duplex = DUPLEX_HALF;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int
900 bnx2_set_mac_link(struct bnx2 *bp)
901 {
902         u32 val;
903
904         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906                 (bp->duplex == DUPLEX_HALF)) {
907                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908         }
909
910         /* Configure the EMAC mode register. */
911         val = REG_RD(bp, BNX2_EMAC_MODE);
912
913         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915                 BNX2_EMAC_MODE_25G_MODE);
916
917         if (bp->link_up) {
918                 switch (bp->line_speed) {
919                         case SPEED_10:
920                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
922                                         break;
923                                 }
924                                 /* fall through */
925                         case SPEED_100:
926                                 val |= BNX2_EMAC_MODE_PORT_MII;
927                                 break;
928                         case SPEED_2500:
929                                 val |= BNX2_EMAC_MODE_25G_MODE;
930                                 /* fall through */
931                         case SPEED_1000:
932                                 val |= BNX2_EMAC_MODE_PORT_GMII;
933                                 break;
934                 }
935         }
936         else {
937                 val |= BNX2_EMAC_MODE_PORT_GMII;
938         }
939
940         /* Set the MAC to operate in the appropriate duplex mode. */
941         if (bp->duplex == DUPLEX_HALF)
942                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943         REG_WR(bp, BNX2_EMAC_MODE, val);
944
945         /* Enable/disable rx PAUSE. */
946         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948         if (bp->flow_ctrl & FLOW_CTRL_RX)
949                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952         /* Enable/disable tx PAUSE. */
953         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956         if (bp->flow_ctrl & FLOW_CTRL_TX)
957                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960         /* Acknowledge the interrupt. */
961         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963         return 0;
964 }
965
966 static void
967 bnx2_enable_bmsr1(struct bnx2 *bp)
968 {
969         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970             (CHIP_NUM(bp) == CHIP_NUM_5709))
971                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972                                MII_BNX2_BLK_ADDR_GP_STATUS);
973 }
974
975 static void
976 bnx2_disable_bmsr1(struct bnx2 *bp)
977 {
978         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979             (CHIP_NUM(bp) == CHIP_NUM_5709))
980                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982 }
983
984 static int
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
986 {
987         u32 up1;
988         int ret = 1;
989
990         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991                 return 0;
992
993         if (bp->autoneg & AUTONEG_SPEED)
994                 bp->advertising |= ADVERTISED_2500baseX_Full;
995
996         if (CHIP_NUM(bp) == CHIP_NUM_5709)
997                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
999         bnx2_read_phy(bp, bp->mii_up1, &up1);
1000         if (!(up1 & BCM5708S_UP1_2G5)) {
1001                 up1 |= BCM5708S_UP1_2G5;
1002                 bnx2_write_phy(bp, bp->mii_up1, up1);
1003                 ret = 0;
1004         }
1005
1006         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
1010         return ret;
1011 }
1012
1013 static int
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015 {
1016         u32 up1;
1017         int ret = 0;
1018
1019         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020                 return 0;
1021
1022         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
1025         bnx2_read_phy(bp, bp->mii_up1, &up1);
1026         if (up1 & BCM5708S_UP1_2G5) {
1027                 up1 &= ~BCM5708S_UP1_2G5;
1028                 bnx2_write_phy(bp, bp->mii_up1, up1);
1029                 ret = 1;
1030         }
1031
1032         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
1036         return ret;
1037 }
1038
1039 static void
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1041 {
1042         u32 bmcr;
1043
1044         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045                 return;
1046
1047         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048                 u32 val;
1049
1050                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1052                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064         }
1065
1066         if (bp->autoneg & AUTONEG_SPEED) {
1067                 bmcr &= ~BMCR_ANENABLE;
1068                 if (bp->req_duplex == DUPLEX_FULL)
1069                         bmcr |= BMCR_FULLDPLX;
1070         }
1071         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072 }
1073
1074 static void
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1076 {
1077         u32 bmcr;
1078
1079         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080                 return;
1081
1082         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083                 u32 val;
1084
1085                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1087                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098         }
1099
1100         if (bp->autoneg & AUTONEG_SPEED)
1101                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103 }
1104
1105 static int
1106 bnx2_set_link(struct bnx2 *bp)
1107 {
1108         u32 bmsr;
1109         u8 link_up;
1110
1111         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1112                 bp->link_up = 1;
1113                 return 0;
1114         }
1115
1116         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117                 return 0;
1118
1119         link_up = bp->link_up;
1120
1121         bnx2_enable_bmsr1(bp);
1122         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124         bnx2_disable_bmsr1(bp);
1125
1126         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128                 u32 val;
1129
1130                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131                 if (val & BNX2_EMAC_STATUS_LINK)
1132                         bmsr |= BMSR_LSTATUS;
1133                 else
1134                         bmsr &= ~BMSR_LSTATUS;
1135         }
1136
1137         if (bmsr & BMSR_LSTATUS) {
1138                 bp->link_up = 1;
1139
1140                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142                                 bnx2_5706s_linkup(bp);
1143                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144                                 bnx2_5708s_linkup(bp);
1145                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146                                 bnx2_5709s_linkup(bp);
1147                 }
1148                 else {
1149                         bnx2_copper_linkup(bp);
1150                 }
1151                 bnx2_resolve_flow_ctrl(bp);
1152         }
1153         else {
1154                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155                     (bp->autoneg & AUTONEG_SPEED))
1156                         bnx2_disable_forced_2g5(bp);
1157
1158                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159                 bp->link_up = 0;
1160         }
1161
1162         if (bp->link_up != link_up) {
1163                 bnx2_report_link(bp);
1164         }
1165
1166         bnx2_set_mac_link(bp);
1167
1168         return 0;
1169 }
1170
1171 static int
1172 bnx2_reset_phy(struct bnx2 *bp)
1173 {
1174         int i;
1175         u32 reg;
1176
1177         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1178
1179 #define PHY_RESET_MAX_WAIT 100
1180         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181                 udelay(10);
1182
1183                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1184                 if (!(reg & BMCR_RESET)) {
1185                         udelay(20);
1186                         break;
1187                 }
1188         }
1189         if (i == PHY_RESET_MAX_WAIT) {
1190                 return -EBUSY;
1191         }
1192         return 0;
1193 }
1194
1195 static u32
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197 {
1198         u32 adv = 0;
1199
1200         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204                         adv = ADVERTISE_1000XPAUSE;
1205                 }
1206                 else {
1207                         adv = ADVERTISE_PAUSE_CAP;
1208                 }
1209         }
1210         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212                         adv = ADVERTISE_1000XPSE_ASYM;
1213                 }
1214                 else {
1215                         adv = ADVERTISE_PAUSE_ASYM;
1216                 }
1217         }
1218         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221                 }
1222                 else {
1223                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224                 }
1225         }
1226         return adv;
1227 }
1228
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
1231 static int
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233 {
1234         u32 speed_arg = 0, pause_adv;
1235
1236         pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238         if (bp->autoneg & AUTONEG_SPEED) {
1239                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240                 if (bp->advertising & ADVERTISED_10baseT_Half)
1241                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242                 if (bp->advertising & ADVERTISED_10baseT_Full)
1243                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244                 if (bp->advertising & ADVERTISED_100baseT_Half)
1245                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246                 if (bp->advertising & ADVERTISED_100baseT_Full)
1247                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252         } else {
1253                 if (bp->req_line_speed == SPEED_2500)
1254                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255                 else if (bp->req_line_speed == SPEED_1000)
1256                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257                 else if (bp->req_line_speed == SPEED_100) {
1258                         if (bp->req_duplex == DUPLEX_FULL)
1259                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260                         else
1261                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262                 } else if (bp->req_line_speed == SPEED_10) {
1263                         if (bp->req_duplex == DUPLEX_FULL)
1264                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265                         else
1266                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267                 }
1268         }
1269
1270         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275         if (port == PORT_TP)
1276                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281         spin_unlock_bh(&bp->phy_lock);
1282         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283         spin_lock_bh(&bp->phy_lock);
1284
1285         return 0;
1286 }
1287
1288 static int
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1290 {
1291         u32 adv, bmcr;
1292         u32 new_adv = 0;
1293
1294         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295                 return (bnx2_setup_remote_phy(bp, port));
1296
1297         if (!(bp->autoneg & AUTONEG_SPEED)) {
1298                 u32 new_bmcr;
1299                 int force_link_down = 0;
1300
1301                 if (bp->req_line_speed == SPEED_2500) {
1302                         if (!bnx2_test_and_enable_2g5(bp))
1303                                 force_link_down = 1;
1304                 } else if (bp->req_line_speed == SPEED_1000) {
1305                         if (bnx2_test_and_disable_2g5(bp))
1306                                 force_link_down = 1;
1307                 }
1308                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
1311                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313                 new_bmcr |= BMCR_SPEED1000;
1314
1315                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316                         if (bp->req_line_speed == SPEED_2500)
1317                                 bnx2_enable_forced_2g5(bp);
1318                         else if (bp->req_line_speed == SPEED_1000) {
1319                                 bnx2_disable_forced_2g5(bp);
1320                                 new_bmcr &= ~0x2000;
1321                         }
1322
1323                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324                         if (bp->req_line_speed == SPEED_2500)
1325                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326                         else
1327                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1328                 }
1329
1330                 if (bp->req_duplex == DUPLEX_FULL) {
1331                         adv |= ADVERTISE_1000XFULL;
1332                         new_bmcr |= BMCR_FULLDPLX;
1333                 }
1334                 else {
1335                         adv |= ADVERTISE_1000XHALF;
1336                         new_bmcr &= ~BMCR_FULLDPLX;
1337                 }
1338                 if ((new_bmcr != bmcr) || (force_link_down)) {
1339                         /* Force a link down visible on the other side */
1340                         if (bp->link_up) {
1341                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1342                                                ~(ADVERTISE_1000XFULL |
1343                                                  ADVERTISE_1000XHALF));
1344                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345                                         BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347                                 bp->link_up = 0;
1348                                 netif_carrier_off(bp->dev);
1349                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350                                 bnx2_report_link(bp);
1351                         }
1352                         bnx2_write_phy(bp, bp->mii_adv, adv);
1353                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1354                 } else {
1355                         bnx2_resolve_flow_ctrl(bp);
1356                         bnx2_set_mac_link(bp);
1357                 }
1358                 return 0;
1359         }
1360
1361         bnx2_test_and_enable_2g5(bp);
1362
1363         if (bp->advertising & ADVERTISED_1000baseT_Full)
1364                 new_adv |= ADVERTISE_1000XFULL;
1365
1366         new_adv |= bnx2_phy_get_pause_adv(bp);
1367
1368         bnx2_read_phy(bp, bp->mii_adv, &adv);
1369         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1370
1371         bp->serdes_an_pending = 0;
1372         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373                 /* Force a link down visible on the other side */
1374                 if (bp->link_up) {
1375                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376                         spin_unlock_bh(&bp->phy_lock);
1377                         msleep(20);
1378                         spin_lock_bh(&bp->phy_lock);
1379                 }
1380
1381                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1383                         BMCR_ANENABLE);
1384                 /* Speed up link-up time when the link partner
1385                  * does not autonegotiate which is very common
1386                  * in blade servers. Some blade servers use
1387                  * IPMI for kerboard input and it's important
1388                  * to minimize link disruptions. Autoneg. involves
1389                  * exchanging base pages plus 3 next pages and
1390                  * normally completes in about 120 msec.
1391                  */
1392                 bp->current_interval = SERDES_AN_TIMEOUT;
1393                 bp->serdes_an_pending = 1;
1394                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1395         } else {
1396                 bnx2_resolve_flow_ctrl(bp);
1397                 bnx2_set_mac_link(bp);
1398         }
1399
1400         return 0;
1401 }
1402
1403 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1404         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1405                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406                 (ADVERTISED_1000baseT_Full)
1407
1408 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1409         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1410         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1411         ADVERTISED_1000baseT_Full)
1412
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1415
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
1418 static void
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1420 {
1421         u32 link;
1422
1423         if (bp->phy_port == PORT_TP)
1424                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425         else
1426                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429                 bp->req_line_speed = 0;
1430                 bp->autoneg |= AUTONEG_SPEED;
1431                 bp->advertising = ADVERTISED_Autoneg;
1432                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433                         bp->advertising |= ADVERTISED_10baseT_Half;
1434                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435                         bp->advertising |= ADVERTISED_10baseT_Full;
1436                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437                         bp->advertising |= ADVERTISED_100baseT_Half;
1438                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439                         bp->advertising |= ADVERTISED_100baseT_Full;
1440                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441                         bp->advertising |= ADVERTISED_1000baseT_Full;
1442                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443                         bp->advertising |= ADVERTISED_2500baseX_Full;
1444         } else {
1445                 bp->autoneg = 0;
1446                 bp->advertising = 0;
1447                 bp->req_duplex = DUPLEX_FULL;
1448                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449                         bp->req_line_speed = SPEED_10;
1450                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451                                 bp->req_duplex = DUPLEX_HALF;
1452                 }
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454                         bp->req_line_speed = SPEED_100;
1455                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456                                 bp->req_duplex = DUPLEX_HALF;
1457                 }
1458                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459                         bp->req_line_speed = SPEED_1000;
1460                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461                         bp->req_line_speed = SPEED_2500;
1462         }
1463 }
1464
1465 static void
1466 bnx2_set_default_link(struct bnx2 *bp)
1467 {
1468         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469                 return bnx2_set_default_remote_link(bp);
1470
1471         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472         bp->req_line_speed = 0;
1473         if (bp->phy_flags & PHY_SERDES_FLAG) {
1474                 u32 reg;
1475
1476                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481                         bp->autoneg = 0;
1482                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1483                         bp->req_duplex = DUPLEX_FULL;
1484                 }
1485         } else
1486                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487 }
1488
1489 static void
1490 bnx2_remote_phy_event(struct bnx2 *bp)
1491 {
1492         u32 msg;
1493         u8 link_up = bp->link_up;
1494         u8 old_port;
1495
1496         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1497
1498         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1499                 bp->link_up = 0;
1500         else {
1501                 u32 speed;
1502
1503                 bp->link_up = 1;
1504                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505                 bp->duplex = DUPLEX_FULL;
1506                 switch (speed) {
1507                         case BNX2_LINK_STATUS_10HALF:
1508                                 bp->duplex = DUPLEX_HALF;
1509                         case BNX2_LINK_STATUS_10FULL:
1510                                 bp->line_speed = SPEED_10;
1511                                 break;
1512                         case BNX2_LINK_STATUS_100HALF:
1513                                 bp->duplex = DUPLEX_HALF;
1514                         case BNX2_LINK_STATUS_100BASE_T4:
1515                         case BNX2_LINK_STATUS_100FULL:
1516                                 bp->line_speed = SPEED_100;
1517                                 break;
1518                         case BNX2_LINK_STATUS_1000HALF:
1519                                 bp->duplex = DUPLEX_HALF;
1520                         case BNX2_LINK_STATUS_1000FULL:
1521                                 bp->line_speed = SPEED_1000;
1522                                 break;
1523                         case BNX2_LINK_STATUS_2500HALF:
1524                                 bp->duplex = DUPLEX_HALF;
1525                         case BNX2_LINK_STATUS_2500FULL:
1526                                 bp->line_speed = SPEED_2500;
1527                                 break;
1528                         default:
1529                                 bp->line_speed = 0;
1530                                 break;
1531                 }
1532
1533                 spin_lock(&bp->phy_lock);
1534                 bp->flow_ctrl = 0;
1535                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537                         if (bp->duplex == DUPLEX_FULL)
1538                                 bp->flow_ctrl = bp->req_flow_ctrl;
1539                 } else {
1540                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1542                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1544                 }
1545
1546                 old_port = bp->phy_port;
1547                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548                         bp->phy_port = PORT_FIBRE;
1549                 else
1550                         bp->phy_port = PORT_TP;
1551
1552                 if (old_port != bp->phy_port)
1553                         bnx2_set_default_link(bp);
1554
1555                 spin_unlock(&bp->phy_lock);
1556         }
1557         if (bp->link_up != link_up)
1558                 bnx2_report_link(bp);
1559
1560         bnx2_set_mac_link(bp);
1561 }
1562
1563 static int
1564 bnx2_set_remote_link(struct bnx2 *bp)
1565 {
1566         u32 evt_code;
1567
1568         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1569         switch (evt_code) {
1570                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571                         bnx2_remote_phy_event(bp);
1572                         break;
1573                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1574                 default:
1575                         break;
1576         }
1577         return 0;
1578 }
1579
1580 static int
1581 bnx2_setup_copper_phy(struct bnx2 *bp)
1582 {
1583         u32 bmcr;
1584         u32 new_bmcr;
1585
1586         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1587
1588         if (bp->autoneg & AUTONEG_SPEED) {
1589                 u32 adv_reg, adv1000_reg;
1590                 u32 new_adv_reg = 0;
1591                 u32 new_adv1000_reg = 0;
1592
1593                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1594                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595                         ADVERTISE_PAUSE_ASYM);
1596
1597                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598                 adv1000_reg &= PHY_ALL_1000_SPEED;
1599
1600                 if (bp->advertising & ADVERTISED_10baseT_Half)
1601                         new_adv_reg |= ADVERTISE_10HALF;
1602                 if (bp->advertising & ADVERTISED_10baseT_Full)
1603                         new_adv_reg |= ADVERTISE_10FULL;
1604                 if (bp->advertising & ADVERTISED_100baseT_Half)
1605                         new_adv_reg |= ADVERTISE_100HALF;
1606                 if (bp->advertising & ADVERTISED_100baseT_Full)
1607                         new_adv_reg |= ADVERTISE_100FULL;
1608                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609                         new_adv1000_reg |= ADVERTISE_1000FULL;
1610
1611                 new_adv_reg |= ADVERTISE_CSMA;
1612
1613                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1614
1615                 if ((adv1000_reg != new_adv1000_reg) ||
1616                         (adv_reg != new_adv_reg) ||
1617                         ((bmcr & BMCR_ANENABLE) == 0)) {
1618
1619                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1620                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1621                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1622                                 BMCR_ANENABLE);
1623                 }
1624                 else if (bp->link_up) {
1625                         /* Flow ctrl may have changed from auto to forced */
1626                         /* or vice-versa. */
1627
1628                         bnx2_resolve_flow_ctrl(bp);
1629                         bnx2_set_mac_link(bp);
1630                 }
1631                 return 0;
1632         }
1633
1634         new_bmcr = 0;
1635         if (bp->req_line_speed == SPEED_100) {
1636                 new_bmcr |= BMCR_SPEED100;
1637         }
1638         if (bp->req_duplex == DUPLEX_FULL) {
1639                 new_bmcr |= BMCR_FULLDPLX;
1640         }
1641         if (new_bmcr != bmcr) {
1642                 u32 bmsr;
1643
1644                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1646
1647                 if (bmsr & BMSR_LSTATUS) {
1648                         /* Force link down */
1649                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1650                         spin_unlock_bh(&bp->phy_lock);
1651                         msleep(50);
1652                         spin_lock_bh(&bp->phy_lock);
1653
1654                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1656                 }
1657
1658                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1659
1660                 /* Normally, the new speed is setup after the link has
1661                  * gone down and up again. In some cases, link will not go
1662                  * down so we need to set up the new speed here.
1663                  */
1664                 if (bmsr & BMSR_LSTATUS) {
1665                         bp->line_speed = bp->req_line_speed;
1666                         bp->duplex = bp->req_duplex;
1667                         bnx2_resolve_flow_ctrl(bp);
1668                         bnx2_set_mac_link(bp);
1669                 }
1670         } else {
1671                 bnx2_resolve_flow_ctrl(bp);
1672                 bnx2_set_mac_link(bp);
1673         }
1674         return 0;
1675 }
1676
1677 static int
1678 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1679 {
1680         if (bp->loopback == MAC_LOOPBACK)
1681                 return 0;
1682
1683         if (bp->phy_flags & PHY_SERDES_FLAG) {
1684                 return (bnx2_setup_serdes_phy(bp, port));
1685         }
1686         else {
1687                 return (bnx2_setup_copper_phy(bp));
1688         }
1689 }
1690
1691 static int
1692 bnx2_init_5709s_phy(struct bnx2 *bp)
1693 {
1694         u32 val;
1695
1696         bp->mii_bmcr = MII_BMCR + 0x10;
1697         bp->mii_bmsr = MII_BMSR + 0x10;
1698         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699         bp->mii_adv = MII_ADVERTISE + 0x10;
1700         bp->mii_lpa = MII_LPA + 0x10;
1701         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1702
1703         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1705
1706         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1707         bnx2_reset_phy(bp);
1708
1709         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1710
1711         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1715
1716         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719                 val |= BCM5708S_UP1_2G5;
1720         else
1721                 val &= ~BCM5708S_UP1_2G5;
1722         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1723
1724         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1728
1729         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1730
1731         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1734
1735         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1736
1737         return 0;
1738 }
1739
1740 static int
1741 bnx2_init_5708s_phy(struct bnx2 *bp)
1742 {
1743         u32 val;
1744
1745         bnx2_reset_phy(bp);
1746
1747         bp->mii_up1 = BCM5708S_UP1;
1748
1749         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1752
1753         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1756
1757         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1760
1761         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763                 val |= BCM5708S_UP1_2G5;
1764                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1765         }
1766
1767         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1768             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1770                 /* increase tx signal amplitude */
1771                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772                                BCM5708S_BLK_ADDR_TX_MISC);
1773                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777         }
1778
1779         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1780               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1781
1782         if (val) {
1783                 u32 is_backplane;
1784
1785                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1786                                           BNX2_SHARED_HW_CFG_CONFIG);
1787                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789                                        BCM5708S_BLK_ADDR_TX_MISC);
1790                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792                                        BCM5708S_BLK_ADDR_DIG);
1793                 }
1794         }
1795         return 0;
1796 }
1797
1798 static int
1799 bnx2_init_5706s_phy(struct bnx2 *bp)
1800 {
1801         bnx2_reset_phy(bp);
1802
1803         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1804
1805         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1807
1808         if (bp->dev->mtu > 1500) {
1809                 u32 val;
1810
1811                 /* Set extended packet length bit */
1812                 bnx2_write_phy(bp, 0x18, 0x7);
1813                 bnx2_read_phy(bp, 0x18, &val);
1814                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1815
1816                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817                 bnx2_read_phy(bp, 0x1c, &val);
1818                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1819         }
1820         else {
1821                 u32 val;
1822
1823                 bnx2_write_phy(bp, 0x18, 0x7);
1824                 bnx2_read_phy(bp, 0x18, &val);
1825                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1826
1827                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828                 bnx2_read_phy(bp, 0x1c, &val);
1829                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1830         }
1831
1832         return 0;
1833 }
1834
1835 static int
1836 bnx2_init_copper_phy(struct bnx2 *bp)
1837 {
1838         u32 val;
1839
1840         bnx2_reset_phy(bp);
1841
1842         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843                 bnx2_write_phy(bp, 0x18, 0x0c00);
1844                 bnx2_write_phy(bp, 0x17, 0x000a);
1845                 bnx2_write_phy(bp, 0x15, 0x310b);
1846                 bnx2_write_phy(bp, 0x17, 0x201f);
1847                 bnx2_write_phy(bp, 0x15, 0x9506);
1848                 bnx2_write_phy(bp, 0x17, 0x401f);
1849                 bnx2_write_phy(bp, 0x15, 0x14e2);
1850                 bnx2_write_phy(bp, 0x18, 0x0400);
1851         }
1852
1853         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1856                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1857                 val &= ~(1 << 8);
1858                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1859         }
1860
1861         if (bp->dev->mtu > 1500) {
1862                 /* Set extended packet length bit */
1863                 bnx2_write_phy(bp, 0x18, 0x7);
1864                 bnx2_read_phy(bp, 0x18, &val);
1865                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1866
1867                 bnx2_read_phy(bp, 0x10, &val);
1868                 bnx2_write_phy(bp, 0x10, val | 0x1);
1869         }
1870         else {
1871                 bnx2_write_phy(bp, 0x18, 0x7);
1872                 bnx2_read_phy(bp, 0x18, &val);
1873                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1874
1875                 bnx2_read_phy(bp, 0x10, &val);
1876                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1877         }
1878
1879         /* ethernet@wirespeed */
1880         bnx2_write_phy(bp, 0x18, 0x7007);
1881         bnx2_read_phy(bp, 0x18, &val);
1882         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1883         return 0;
1884 }
1885
1886
1887 static int
1888 bnx2_init_phy(struct bnx2 *bp)
1889 {
1890         u32 val;
1891         int rc = 0;
1892
1893         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1895
1896         bp->mii_bmcr = MII_BMCR;
1897         bp->mii_bmsr = MII_BMSR;
1898         bp->mii_bmsr1 = MII_BMSR;
1899         bp->mii_adv = MII_ADVERTISE;
1900         bp->mii_lpa = MII_LPA;
1901
1902         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1903
1904         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1905                 goto setup_phy;
1906
1907         bnx2_read_phy(bp, MII_PHYSID1, &val);
1908         bp->phy_id = val << 16;
1909         bnx2_read_phy(bp, MII_PHYSID2, &val);
1910         bp->phy_id |= val & 0xffff;
1911
1912         if (bp->phy_flags & PHY_SERDES_FLAG) {
1913                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914                         rc = bnx2_init_5706s_phy(bp);
1915                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916                         rc = bnx2_init_5708s_phy(bp);
1917                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918                         rc = bnx2_init_5709s_phy(bp);
1919         }
1920         else {
1921                 rc = bnx2_init_copper_phy(bp);
1922         }
1923
1924 setup_phy:
1925         if (!rc)
1926                 rc = bnx2_setup_phy(bp, bp->phy_port);
1927
1928         return rc;
1929 }
1930
1931 static int
1932 bnx2_set_mac_loopback(struct bnx2 *bp)
1933 {
1934         u32 mac_mode;
1935
1936         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1940         bp->link_up = 1;
1941         return 0;
1942 }
1943
1944 static int bnx2_test_link(struct bnx2 *);
1945
1946 static int
1947 bnx2_set_phy_loopback(struct bnx2 *bp)
1948 {
1949         u32 mac_mode;
1950         int rc, i;
1951
1952         spin_lock_bh(&bp->phy_lock);
1953         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1954                             BMCR_SPEED1000);
1955         spin_unlock_bh(&bp->phy_lock);
1956         if (rc)
1957                 return rc;
1958
1959         for (i = 0; i < 10; i++) {
1960                 if (bnx2_test_link(bp) == 0)
1961                         break;
1962                 msleep(100);
1963         }
1964
1965         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1968                       BNX2_EMAC_MODE_25G_MODE);
1969
1970         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1972         bp->link_up = 1;
1973         return 0;
1974 }
1975
1976 static int
1977 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1978 {
1979         int i;
1980         u32 val;
1981
1982         bp->fw_wr_seq++;
1983         msg_data |= bp->fw_wr_seq;
1984
1985         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1986
1987         /* wait for an acknowledgement. */
1988         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1989                 msleep(10);
1990
1991                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1992
1993                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1994                         break;
1995         }
1996         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1997                 return 0;
1998
1999         /* If we timed out, inform the firmware that this is the case. */
2000         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2001                 if (!silent)
2002                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2003                                             "%x\n", msg_data);
2004
2005                 msg_data &= ~BNX2_DRV_MSG_CODE;
2006                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2007
2008                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2009
2010                 return -EBUSY;
2011         }
2012
2013         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2014                 return -EIO;
2015
2016         return 0;
2017 }
2018
2019 static int
2020 bnx2_init_5709_context(struct bnx2 *bp)
2021 {
2022         int i, ret = 0;
2023         u32 val;
2024
2025         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026         val |= (BCM_PAGE_BITS - 8) << 16;
2027         REG_WR(bp, BNX2_CTX_COMMAND, val);
2028         for (i = 0; i < 10; i++) {
2029                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2031                         break;
2032                 udelay(2);
2033         }
2034         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2035                 return -EBUSY;
2036
2037         for (i = 0; i < bp->ctx_pages; i++) {
2038                 int j;
2039
2040                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044                        (u64) bp->ctx_blk_mapping[i] >> 32);
2045                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047                 for (j = 0; j < 10; j++) {
2048
2049                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2051                                 break;
2052                         udelay(5);
2053                 }
2054                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2055                         ret = -EBUSY;
2056                         break;
2057                 }
2058         }
2059         return ret;
2060 }
2061
2062 static void
2063 bnx2_init_context(struct bnx2 *bp)
2064 {
2065         u32 vcid;
2066
2067         vcid = 96;
2068         while (vcid) {
2069                 u32 vcid_addr, pcid_addr, offset;
2070                 int i;
2071
2072                 vcid--;
2073
2074                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2075                         u32 new_vcid;
2076
2077                         vcid_addr = GET_PCID_ADDR(vcid);
2078                         if (vcid & 0x8) {
2079                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2080                         }
2081                         else {
2082                                 new_vcid = vcid;
2083                         }
2084                         pcid_addr = GET_PCID_ADDR(new_vcid);
2085                 }
2086                 else {
2087                         vcid_addr = GET_CID_ADDR(vcid);
2088                         pcid_addr = vcid_addr;
2089                 }
2090
2091                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092                         vcid_addr += (i << PHY_CTX_SHIFT);
2093                         pcid_addr += (i << PHY_CTX_SHIFT);
2094
2095                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2097
2098                         /* Zero out the context. */
2099                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100                                 CTX_WR(bp, 0x00, offset, 0);
2101
2102                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2104                 }
2105         }
2106 }
2107
2108 static int
2109 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2110 {
2111         u16 *good_mbuf;
2112         u32 good_mbuf_cnt;
2113         u32 val;
2114
2115         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116         if (good_mbuf == NULL) {
2117                 printk(KERN_ERR PFX "Failed to allocate memory in "
2118                                     "bnx2_alloc_bad_rbuf\n");
2119                 return -ENOMEM;
2120         }
2121
2122         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2124
2125         good_mbuf_cnt = 0;
2126
2127         /* Allocate a bunch of mbufs and save the good ones in an array. */
2128         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2131
2132                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2133
2134                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2135
2136                 /* The addresses with Bit 9 set are bad memory blocks. */
2137                 if (!(val & (1 << 9))) {
2138                         good_mbuf[good_mbuf_cnt] = (u16) val;
2139                         good_mbuf_cnt++;
2140                 }
2141
2142                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2143         }
2144
2145         /* Free the good ones back to the mbuf pool thus discarding
2146          * all the bad ones. */
2147         while (good_mbuf_cnt) {
2148                 good_mbuf_cnt--;
2149
2150                 val = good_mbuf[good_mbuf_cnt];
2151                 val = (val << 9) | val | 1;
2152
2153                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2154         }
2155         kfree(good_mbuf);
2156         return 0;
2157 }
2158
2159 static void
2160 bnx2_set_mac_addr(struct bnx2 *bp)
2161 {
2162         u32 val;
2163         u8 *mac_addr = bp->dev->dev_addr;
2164
2165         val = (mac_addr[0] << 8) | mac_addr[1];
2166
2167         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2168
2169         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2170                 (mac_addr[4] << 8) | mac_addr[5];
2171
2172         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2173 }
2174
2175 static inline int
2176 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2177 {
2178         struct sk_buff *skb;
2179         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2180         dma_addr_t mapping;
2181         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2182         unsigned long align;
2183
2184         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2185         if (skb == NULL) {
2186                 return -ENOMEM;
2187         }
2188
2189         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2191
2192         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193                 PCI_DMA_FROMDEVICE);
2194
2195         rx_buf->skb = skb;
2196         pci_unmap_addr_set(rx_buf, mapping, mapping);
2197
2198         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2200
2201         bp->rx_prod_bseq += bp->rx_buf_use_size;
2202
2203         return 0;
2204 }
2205
2206 static int
2207 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2208 {
2209         struct status_block *sblk = bp->status_blk;
2210         u32 new_link_state, old_link_state;
2211         int is_set = 1;
2212
2213         new_link_state = sblk->status_attn_bits & event;
2214         old_link_state = sblk->status_attn_bits_ack & event;
2215         if (new_link_state != old_link_state) {
2216                 if (new_link_state)
2217                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2218                 else
2219                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2220         } else
2221                 is_set = 0;
2222
2223         return is_set;
2224 }
2225
2226 static void
2227 bnx2_phy_int(struct bnx2 *bp)
2228 {
2229         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230                 spin_lock(&bp->phy_lock);
2231                 bnx2_set_link(bp);
2232                 spin_unlock(&bp->phy_lock);
2233         }
2234         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235                 bnx2_set_remote_link(bp);
2236
2237 }
2238
2239 static void
2240 bnx2_tx_int(struct bnx2 *bp)
2241 {
2242         struct status_block *sblk = bp->status_blk;
2243         u16 hw_cons, sw_cons, sw_ring_cons;
2244         int tx_free_bd = 0;
2245
2246         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2247         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2248                 hw_cons++;
2249         }
2250         sw_cons = bp->tx_cons;
2251
2252         while (sw_cons != hw_cons) {
2253                 struct sw_bd *tx_buf;
2254                 struct sk_buff *skb;
2255                 int i, last;
2256
2257                 sw_ring_cons = TX_RING_IDX(sw_cons);
2258
2259                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2260                 skb = tx_buf->skb;
2261
2262                 /* partial BD completions possible with TSO packets */
2263                 if (skb_is_gso(skb)) {
2264                         u16 last_idx, last_ring_idx;
2265
2266                         last_idx = sw_cons +
2267                                 skb_shinfo(skb)->nr_frags + 1;
2268                         last_ring_idx = sw_ring_cons +
2269                                 skb_shinfo(skb)->nr_frags + 1;
2270                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2271                                 last_idx++;
2272                         }
2273                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2274                                 break;
2275                         }
2276                 }
2277
2278                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279                         skb_headlen(skb), PCI_DMA_TODEVICE);
2280
2281                 tx_buf->skb = NULL;
2282                 last = skb_shinfo(skb)->nr_frags;
2283
2284                 for (i = 0; i < last; i++) {
2285                         sw_cons = NEXT_TX_BD(sw_cons);
2286
2287                         pci_unmap_page(bp->pdev,
2288                                 pci_unmap_addr(
2289                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2290                                         mapping),
2291                                 skb_shinfo(skb)->frags[i].size,
2292                                 PCI_DMA_TODEVICE);
2293                 }
2294
2295                 sw_cons = NEXT_TX_BD(sw_cons);
2296
2297                 tx_free_bd += last + 1;
2298
2299                 dev_kfree_skb(skb);
2300
2301                 hw_cons = bp->hw_tx_cons =
2302                         sblk->status_tx_quick_consumer_index0;
2303
2304                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2305                         hw_cons++;
2306                 }
2307         }
2308
2309         bp->tx_cons = sw_cons;
2310         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311          * before checking for netif_queue_stopped().  Without the
2312          * memory barrier, there is a small possibility that bnx2_start_xmit()
2313          * will miss it and cause the queue to be stopped forever.
2314          */
2315         smp_mb();
2316
2317         if (unlikely(netif_queue_stopped(bp->dev)) &&
2318                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319                 netif_tx_lock(bp->dev);
2320                 if ((netif_queue_stopped(bp->dev)) &&
2321                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2322                         netif_wake_queue(bp->dev);
2323                 netif_tx_unlock(bp->dev);
2324         }
2325 }
2326
2327 static inline void
2328 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2329         u16 cons, u16 prod)
2330 {
2331         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332         struct rx_bd *cons_bd, *prod_bd;
2333
2334         cons_rx_buf = &bp->rx_buf_ring[cons];
2335         prod_rx_buf = &bp->rx_buf_ring[prod];
2336
2337         pci_dma_sync_single_for_device(bp->pdev,
2338                 pci_unmap_addr(cons_rx_buf, mapping),
2339                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2340
2341         bp->rx_prod_bseq += bp->rx_buf_use_size;
2342
2343         prod_rx_buf->skb = skb;
2344
2345         if (cons == prod)
2346                 return;
2347
2348         pci_unmap_addr_set(prod_rx_buf, mapping,
2349                         pci_unmap_addr(cons_rx_buf, mapping));
2350
2351         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2353         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2355 }
2356
2357 static int
2358 bnx2_rx_int(struct bnx2 *bp, int budget)
2359 {
2360         struct status_block *sblk = bp->status_blk;
2361         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362         struct l2_fhdr *rx_hdr;
2363         int rx_pkt = 0;
2364
2365         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2366         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2367                 hw_cons++;
2368         }
2369         sw_cons = bp->rx_cons;
2370         sw_prod = bp->rx_prod;
2371
2372         /* Memory barrier necessary as speculative reads of the rx
2373          * buffer can be ahead of the index in the status block
2374          */
2375         rmb();
2376         while (sw_cons != hw_cons) {
2377                 unsigned int len;
2378                 u32 status;
2379                 struct sw_bd *rx_buf;
2380                 struct sk_buff *skb;
2381                 dma_addr_t dma_addr;
2382
2383                 sw_ring_cons = RX_RING_IDX(sw_cons);
2384                 sw_ring_prod = RX_RING_IDX(sw_prod);
2385
2386                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2387                 skb = rx_buf->skb;
2388
2389                 rx_buf->skb = NULL;
2390
2391                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2392
2393                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2394                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2395
2396                 rx_hdr = (struct l2_fhdr *) skb->data;
2397                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2398
2399                 if ((status = rx_hdr->l2_fhdr_status) &
2400                         (L2_FHDR_ERRORS_BAD_CRC |
2401                         L2_FHDR_ERRORS_PHY_DECODE |
2402                         L2_FHDR_ERRORS_ALIGNMENT |
2403                         L2_FHDR_ERRORS_TOO_SHORT |
2404                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2405
2406                         goto reuse_rx;
2407                 }
2408
2409                 /* Since we don't have a jumbo ring, copy small packets
2410                  * if mtu > 1500
2411                  */
2412                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413                         struct sk_buff *new_skb;
2414
2415                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2416                         if (new_skb == NULL)
2417                                 goto reuse_rx;
2418
2419                         /* aligned copy */
2420                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421                                       new_skb->data, len + 2);
2422                         skb_reserve(new_skb, 2);
2423                         skb_put(new_skb, len);
2424
2425                         bnx2_reuse_rx_skb(bp, skb,
2426                                 sw_ring_cons, sw_ring_prod);
2427
2428                         skb = new_skb;
2429                 }
2430                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2431                         pci_unmap_single(bp->pdev, dma_addr,
2432                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2433
2434                         skb_reserve(skb, bp->rx_offset);
2435                         skb_put(skb, len);
2436                 }
2437                 else {
2438 reuse_rx:
2439                         bnx2_reuse_rx_skb(bp, skb,
2440                                 sw_ring_cons, sw_ring_prod);
2441                         goto next_rx;
2442                 }
2443
2444                 skb->protocol = eth_type_trans(skb, bp->dev);
2445
2446                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2447                         (ntohs(skb->protocol) != 0x8100)) {
2448
2449                         dev_kfree_skb(skb);
2450                         goto next_rx;
2451
2452                 }
2453
2454                 skb->ip_summed = CHECKSUM_NONE;
2455                 if (bp->rx_csum &&
2456                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2458
2459                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2461                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2462                 }
2463
2464 #ifdef BCM_VLAN
2465                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467                                 rx_hdr->l2_fhdr_vlan_tag);
2468                 }
2469                 else
2470 #endif
2471                         netif_receive_skb(skb);
2472
2473                 bp->dev->last_rx = jiffies;
2474                 rx_pkt++;
2475
2476 next_rx:
2477                 sw_cons = NEXT_RX_BD(sw_cons);
2478                 sw_prod = NEXT_RX_BD(sw_prod);
2479
2480                 if ((rx_pkt == budget))
2481                         break;
2482
2483                 /* Refresh hw_cons to see if there is new work */
2484                 if (sw_cons == hw_cons) {
2485                         hw_cons = bp->hw_rx_cons =
2486                                 sblk->status_rx_quick_consumer_index0;
2487                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2488                                 hw_cons++;
2489                         rmb();
2490                 }
2491         }
2492         bp->rx_cons = sw_cons;
2493         bp->rx_prod = sw_prod;
2494
2495         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2496
2497         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2498
2499         mmiowb();
2500
2501         return rx_pkt;
2502
2503 }
2504
2505 /* MSI ISR - The only difference between this and the INTx ISR
2506  * is that the MSI interrupt is always serviced.
2507  */
2508 static irqreturn_t
2509 bnx2_msi(int irq, void *dev_instance)
2510 {
2511         struct net_device *dev = dev_instance;
2512         struct bnx2 *bp = netdev_priv(dev);
2513
2514         prefetch(bp->status_blk);
2515         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2518
2519         /* Return here if interrupt is disabled. */
2520         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2521                 return IRQ_HANDLED;
2522
2523         netif_rx_schedule(dev);
2524
2525         return IRQ_HANDLED;
2526 }
2527
2528 static irqreturn_t
2529 bnx2_msi_1shot(int irq, void *dev_instance)
2530 {
2531         struct net_device *dev = dev_instance;
2532         struct bnx2 *bp = netdev_priv(dev);
2533
2534         prefetch(bp->status_blk);
2535
2536         /* Return here if interrupt is disabled. */
2537         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2538                 return IRQ_HANDLED;
2539
2540         netif_rx_schedule(dev);
2541
2542         return IRQ_HANDLED;
2543 }
2544
2545 static irqreturn_t
2546 bnx2_interrupt(int irq, void *dev_instance)
2547 {
2548         struct net_device *dev = dev_instance;
2549         struct bnx2 *bp = netdev_priv(dev);
2550
2551         /* When using INTx, it is possible for the interrupt to arrive
2552          * at the CPU before the status block posted prior to the
2553          * interrupt. Reading a register will flush the status block.
2554          * When using MSI, the MSI message will always complete after
2555          * the status block write.
2556          */
2557         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2558             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2559              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2560                 return IRQ_NONE;
2561
2562         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2563                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2564                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2565
2566         /* Return here if interrupt is shared and is disabled. */
2567         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2568                 return IRQ_HANDLED;
2569
2570         netif_rx_schedule(dev);
2571
2572         return IRQ_HANDLED;
2573 }
2574
2575 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2576                                  STATUS_ATTN_BITS_TIMER_ABORT)
2577
2578 static inline int
2579 bnx2_has_work(struct bnx2 *bp)
2580 {
2581         struct status_block *sblk = bp->status_blk;
2582
2583         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2584             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2585                 return 1;
2586
2587         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2588             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2589                 return 1;
2590
2591         return 0;
2592 }
2593
2594 static int
2595 bnx2_poll(struct net_device *dev, int *budget)
2596 {
2597         struct bnx2 *bp = netdev_priv(dev);
2598         struct status_block *sblk = bp->status_blk;
2599         u32 status_attn_bits = sblk->status_attn_bits;
2600         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2601
2602         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2603             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2604
2605                 bnx2_phy_int(bp);
2606
2607                 /* This is needed to take care of transient status
2608                  * during link changes.
2609                  */
2610                 REG_WR(bp, BNX2_HC_COMMAND,
2611                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2612                 REG_RD(bp, BNX2_HC_COMMAND);
2613         }
2614
2615         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2616                 bnx2_tx_int(bp);
2617
2618         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2619                 int orig_budget = *budget;
2620                 int work_done;
2621
2622                 if (orig_budget > dev->quota)
2623                         orig_budget = dev->quota;
2624
2625                 work_done = bnx2_rx_int(bp, orig_budget);
2626                 *budget -= work_done;
2627                 dev->quota -= work_done;
2628         }
2629
2630         bp->last_status_idx = bp->status_blk->status_idx;
2631         rmb();
2632
2633         if (!bnx2_has_work(bp)) {
2634                 netif_rx_complete(dev);
2635                 if (likely(bp->flags & USING_MSI_FLAG)) {
2636                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2637                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638                                bp->last_status_idx);
2639                         return 0;
2640                 }
2641                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2642                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2643                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2644                        bp->last_status_idx);
2645
2646                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2647                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2648                        bp->last_status_idx);
2649                 return 0;
2650         }
2651
2652         return 1;
2653 }
2654
2655 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2656  * from set_multicast.
2657  */
2658 static void
2659 bnx2_set_rx_mode(struct net_device *dev)
2660 {
2661         struct bnx2 *bp = netdev_priv(dev);
2662         u32 rx_mode, sort_mode;
2663         int i;
2664
2665         spin_lock_bh(&bp->phy_lock);
2666
2667         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2668                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2669         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2670 #ifdef BCM_VLAN
2671         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2672                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2673 #else
2674         if (!(bp->flags & ASF_ENABLE_FLAG))
2675                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2676 #endif
2677         if (dev->flags & IFF_PROMISC) {
2678                 /* Promiscuous mode. */
2679                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2680                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2681                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2682         }
2683         else if (dev->flags & IFF_ALLMULTI) {
2684                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2685                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2686                                0xffffffff);
2687                 }
2688                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2689         }
2690         else {
2691                 /* Accept one or more multicast(s). */
2692                 struct dev_mc_list *mclist;
2693                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2694                 u32 regidx;
2695                 u32 bit;
2696                 u32 crc;
2697
2698                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2699
2700                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2701                      i++, mclist = mclist->next) {
2702
2703                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2704                         bit = crc & 0xff;
2705                         regidx = (bit & 0xe0) >> 5;
2706                         bit &= 0x1f;
2707                         mc_filter[regidx] |= (1 << bit);
2708                 }
2709
2710                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2711                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2712                                mc_filter[i]);
2713                 }
2714
2715                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2716         }
2717
2718         if (rx_mode != bp->rx_mode) {
2719                 bp->rx_mode = rx_mode;
2720                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2721         }
2722
2723         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2724         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2725         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2726
2727         spin_unlock_bh(&bp->phy_lock);
2728 }
2729
2730 #define FW_BUF_SIZE     0x8000
2731
2732 static int
2733 bnx2_gunzip_init(struct bnx2 *bp)
2734 {
2735         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2736                 goto gunzip_nomem1;
2737
2738         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2739                 goto gunzip_nomem2;
2740
2741         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2742         if (bp->strm->workspace == NULL)
2743                 goto gunzip_nomem3;
2744
2745         return 0;
2746
2747 gunzip_nomem3:
2748         kfree(bp->strm);
2749         bp->strm = NULL;
2750
2751 gunzip_nomem2:
2752         vfree(bp->gunzip_buf);
2753         bp->gunzip_buf = NULL;
2754
2755 gunzip_nomem1:
2756         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2757                             "uncompression.\n", bp->dev->name);
2758         return -ENOMEM;
2759 }
2760
2761 static void
2762 bnx2_gunzip_end(struct bnx2 *bp)
2763 {
2764         kfree(bp->strm->workspace);
2765
2766         kfree(bp->strm);
2767         bp->strm = NULL;
2768
2769         if (bp->gunzip_buf) {
2770                 vfree(bp->gunzip_buf);
2771                 bp->gunzip_buf = NULL;
2772         }
2773 }
2774
2775 static int
2776 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2777 {
2778         int n, rc;
2779
2780         /* check gzip header */
2781         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2782                 return -EINVAL;
2783
2784         n = 10;
2785
2786 #define FNAME   0x8
2787         if (zbuf[3] & FNAME)
2788                 while ((zbuf[n++] != 0) && (n < len));
2789
2790         bp->strm->next_in = zbuf + n;
2791         bp->strm->avail_in = len - n;
2792         bp->strm->next_out = bp->gunzip_buf;
2793         bp->strm->avail_out = FW_BUF_SIZE;
2794
2795         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2796         if (rc != Z_OK)
2797                 return rc;
2798
2799         rc = zlib_inflate(bp->strm, Z_FINISH);
2800
2801         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2802         *outbuf = bp->gunzip_buf;
2803
2804         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2805                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2806                        bp->dev->name, bp->strm->msg);
2807
2808         zlib_inflateEnd(bp->strm);
2809
2810         if (rc == Z_STREAM_END)
2811                 return 0;
2812
2813         return rc;
2814 }
2815
2816 static void
2817 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2818         u32 rv2p_proc)
2819 {
2820         int i;
2821         u32 val;
2822
2823
2824         for (i = 0; i < rv2p_code_len; i += 8) {
2825                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2826                 rv2p_code++;
2827                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2828                 rv2p_code++;
2829
2830                 if (rv2p_proc == RV2P_PROC1) {
2831                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2832                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2833                 }
2834                 else {
2835                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2836                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2837                 }
2838         }
2839
2840         /* Reset the processor, un-stall is done later. */
2841         if (rv2p_proc == RV2P_PROC1) {
2842                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2843         }
2844         else {
2845                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2846         }
2847 }
2848
2849 static int
2850 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2851 {
2852         u32 offset;
2853         u32 val;
2854         int rc;
2855
2856         /* Halt the CPU. */
2857         val = REG_RD_IND(bp, cpu_reg->mode);
2858         val |= cpu_reg->mode_value_halt;
2859         REG_WR_IND(bp, cpu_reg->mode, val);
2860         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2861
2862         /* Load the Text area. */
2863         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2864         if (fw->gz_text) {
2865                 u32 text_len;
2866                 void *text;
2867
2868                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2869                                  &text_len);
2870                 if (rc)
2871                         return rc;
2872
2873                 fw->text = text;
2874         }
2875         if (fw->gz_text) {
2876                 int j;
2877
2878                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2879                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2880                 }
2881         }
2882
2883         /* Load the Data area. */
2884         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2885         if (fw->data) {
2886                 int j;
2887
2888                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2889                         REG_WR_IND(bp, offset, fw->data[j]);
2890                 }
2891         }
2892
2893         /* Load the SBSS area. */
2894         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2895         if (fw->sbss) {
2896                 int j;
2897
2898                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2899                         REG_WR_IND(bp, offset, fw->sbss[j]);
2900                 }
2901         }
2902
2903         /* Load the BSS area. */
2904         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2905         if (fw->bss) {
2906                 int j;
2907
2908                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2909                         REG_WR_IND(bp, offset, fw->bss[j]);
2910                 }
2911         }
2912
2913         /* Load the Read-Only area. */
2914         offset = cpu_reg->spad_base +
2915                 (fw->rodata_addr - cpu_reg->mips_view_base);
2916         if (fw->rodata) {
2917                 int j;
2918
2919                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2920                         REG_WR_IND(bp, offset, fw->rodata[j]);
2921                 }
2922         }
2923
2924         /* Clear the pre-fetch instruction. */
2925         REG_WR_IND(bp, cpu_reg->inst, 0);
2926         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2927
2928         /* Start the CPU. */
2929         val = REG_RD_IND(bp, cpu_reg->mode);
2930         val &= ~cpu_reg->mode_value_halt;
2931         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2932         REG_WR_IND(bp, cpu_reg->mode, val);
2933
2934         return 0;
2935 }
2936
2937 static int
2938 bnx2_init_cpus(struct bnx2 *bp)
2939 {
2940         struct cpu_reg cpu_reg;
2941         struct fw_info *fw;
2942         int rc = 0;
2943         void *text;
2944         u32 text_len;
2945
2946         if ((rc = bnx2_gunzip_init(bp)) != 0)
2947                 return rc;
2948
2949         /* Initialize the RV2P processor. */
2950         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2951                          &text_len);
2952         if (rc)
2953                 goto init_cpu_err;
2954
2955         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2956
2957         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2958                          &text_len);
2959         if (rc)
2960                 goto init_cpu_err;
2961
2962         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2963
2964         /* Initialize the RX Processor. */
2965         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2966         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2967         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2968         cpu_reg.state = BNX2_RXP_CPU_STATE;
2969         cpu_reg.state_value_clear = 0xffffff;
2970         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2971         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2972         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2973         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2974         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2975         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2976         cpu_reg.mips_view_base = 0x8000000;
2977
2978         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2979                 fw = &bnx2_rxp_fw_09;
2980         else
2981                 fw = &bnx2_rxp_fw_06;
2982
2983         rc = load_cpu_fw(bp, &cpu_reg, fw);
2984         if (rc)
2985                 goto init_cpu_err;
2986
2987         /* Initialize the TX Processor. */
2988         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2989         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2990         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2991         cpu_reg.state = BNX2_TXP_CPU_STATE;
2992         cpu_reg.state_value_clear = 0xffffff;
2993         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2994         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2995         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2996         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2997         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2998         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2999         cpu_reg.mips_view_base = 0x8000000;
3000
3001         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3002                 fw = &bnx2_txp_fw_09;
3003         else
3004                 fw = &bnx2_txp_fw_06;
3005
3006         rc = load_cpu_fw(bp, &cpu_reg, fw);
3007         if (rc)
3008                 goto init_cpu_err;
3009
3010         /* Initialize the TX Patch-up Processor. */
3011         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3012         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3013         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3014         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3015         cpu_reg.state_value_clear = 0xffffff;
3016         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3017         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3018         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3019         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3020         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3021         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3022         cpu_reg.mips_view_base = 0x8000000;
3023
3024         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3025                 fw = &bnx2_tpat_fw_09;
3026         else
3027                 fw = &bnx2_tpat_fw_06;
3028
3029         rc = load_cpu_fw(bp, &cpu_reg, fw);
3030         if (rc)
3031                 goto init_cpu_err;
3032
3033         /* Initialize the Completion Processor. */
3034         cpu_reg.mode = BNX2_COM_CPU_MODE;
3035         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3036         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3037         cpu_reg.state = BNX2_COM_CPU_STATE;
3038         cpu_reg.state_value_clear = 0xffffff;
3039         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3040         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3041         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3042         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3043         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3044         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3045         cpu_reg.mips_view_base = 0x8000000;
3046
3047         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3048                 fw = &bnx2_com_fw_09;
3049         else
3050                 fw = &bnx2_com_fw_06;
3051
3052         rc = load_cpu_fw(bp, &cpu_reg, fw);
3053         if (rc)
3054                 goto init_cpu_err;
3055
3056         /* Initialize the Command Processor. */
3057         cpu_reg.mode = BNX2_CP_CPU_MODE;
3058         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3059         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3060         cpu_reg.state = BNX2_CP_CPU_STATE;
3061         cpu_reg.state_value_clear = 0xffffff;
3062         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3063         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3064         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3065         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3066         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3067         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3068         cpu_reg.mips_view_base = 0x8000000;
3069
3070         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3071                 fw = &bnx2_cp_fw_09;
3072
3073                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3074                 if (rc)
3075                         goto init_cpu_err;
3076         }
3077 init_cpu_err:
3078         bnx2_gunzip_end(bp);
3079         return rc;
3080 }
3081
3082 static int
3083 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3084 {
3085         u16 pmcsr;
3086
3087         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3088
3089         switch (state) {
3090         case PCI_D0: {
3091                 u32 val;
3092
3093                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3094                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3095                         PCI_PM_CTRL_PME_STATUS);
3096
3097                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3098                         /* delay required during transition out of D3hot */
3099                         msleep(20);
3100
3101                 val = REG_RD(bp, BNX2_EMAC_MODE);
3102                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3103                 val &= ~BNX2_EMAC_MODE_MPKT;
3104                 REG_WR(bp, BNX2_EMAC_MODE, val);
3105
3106                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3107                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3108                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3109                 break;
3110         }
3111         case PCI_D3hot: {
3112                 int i;
3113                 u32 val, wol_msg;
3114
3115                 if (bp->wol) {
3116                         u32 advertising;
3117                         u8 autoneg;
3118
3119                         autoneg = bp->autoneg;
3120                         advertising = bp->advertising;
3121
3122                         bp->autoneg = AUTONEG_SPEED;
3123                         bp->advertising = ADVERTISED_10baseT_Half |
3124                                 ADVERTISED_10baseT_Full |
3125                                 ADVERTISED_100baseT_Half |
3126                                 ADVERTISED_100baseT_Full |
3127                                 ADVERTISED_Autoneg;
3128
3129                         bnx2_setup_copper_phy(bp);
3130
3131                         bp->autoneg = autoneg;
3132                         bp->advertising = advertising;
3133
3134                         bnx2_set_mac_addr(bp);
3135
3136                         val = REG_RD(bp, BNX2_EMAC_MODE);
3137
3138                         /* Enable port mode. */
3139                         val &= ~BNX2_EMAC_MODE_PORT;
3140                         val |= BNX2_EMAC_MODE_PORT_MII |
3141                                BNX2_EMAC_MODE_MPKT_RCVD |
3142                                BNX2_EMAC_MODE_ACPI_RCVD |
3143                                BNX2_EMAC_MODE_MPKT;
3144
3145                         REG_WR(bp, BNX2_EMAC_MODE, val);
3146
3147                         /* receive all multicast */
3148                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3149                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3150                                        0xffffffff);
3151                         }
3152                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3153                                BNX2_EMAC_RX_MODE_SORT_MODE);
3154
3155                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3156                               BNX2_RPM_SORT_USER0_MC_EN;
3157                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3158                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3159                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3160                                BNX2_RPM_SORT_USER0_ENA);
3161
3162                         /* Need to enable EMAC and RPM for WOL. */
3163                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3164                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3165                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3166                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3167
3168                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3169                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3170                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3171
3172                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3173                 }
3174                 else {
3175                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3176                 }
3177
3178                 if (!(bp->flags & NO_WOL_FLAG))
3179                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3180
3181                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3182                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3183                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3184
3185                         if (bp->wol)
3186                                 pmcsr |= 3;
3187                 }
3188                 else {
3189                         pmcsr |= 3;
3190                 }
3191                 if (bp->wol) {
3192                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3193                 }
3194                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3195                                       pmcsr);
3196
3197                 /* No more memory access after this point until
3198                  * device is brought back to D0.
3199                  */
3200                 udelay(50);
3201                 break;
3202         }
3203         default:
3204                 return -EINVAL;
3205         }
3206         return 0;
3207 }
3208
3209 static int
3210 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3211 {
3212         u32 val;
3213         int j;
3214
3215         /* Request access to the flash interface. */
3216         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3217         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3218                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3219                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3220                         break;
3221
3222                 udelay(5);
3223         }
3224
3225         if (j >= NVRAM_TIMEOUT_COUNT)
3226                 return -EBUSY;
3227
3228         return 0;
3229 }
3230
3231 static int
3232 bnx2_release_nvram_lock(struct bnx2 *bp)
3233 {
3234         int j;
3235         u32 val;
3236
3237         /* Relinquish nvram interface. */
3238         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3239
3240         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3241                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3242                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3243                         break;
3244
3245                 udelay(5);
3246         }
3247
3248         if (j >= NVRAM_TIMEOUT_COUNT)
3249                 return -EBUSY;
3250
3251         return 0;
3252 }
3253
3254
3255 static int
3256 bnx2_enable_nvram_write(struct bnx2 *bp)
3257 {
3258         u32 val;
3259
3260         val = REG_RD(bp, BNX2_MISC_CFG);
3261         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3262
3263         if (!bp->flash_info->buffered) {
3264                 int j;
3265
3266                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3267                 REG_WR(bp, BNX2_NVM_COMMAND,
3268                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3269
3270                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3271                         udelay(5);
3272
3273                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3274                         if (val & BNX2_NVM_COMMAND_DONE)
3275                                 break;
3276                 }
3277
3278                 if (j >= NVRAM_TIMEOUT_COUNT)
3279                         return -EBUSY;
3280         }
3281         return 0;
3282 }
3283
3284 static void
3285 bnx2_disable_nvram_write(struct bnx2 *bp)
3286 {
3287         u32 val;
3288
3289         val = REG_RD(bp, BNX2_MISC_CFG);
3290         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3291 }
3292
3293
3294 static void
3295 bnx2_enable_nvram_access(struct bnx2 *bp)
3296 {
3297         u32 val;
3298
3299         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3300         /* Enable both bits, even on read. */
3301         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3302                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3303 }
3304
3305 static void
3306 bnx2_disable_nvram_access(struct bnx2 *bp)
3307 {
3308         u32 val;
3309
3310         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3311         /* Disable both bits, even after read. */
3312         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3313                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3314                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3315 }
3316
3317 static int
3318 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3319 {
3320         u32 cmd;
3321         int j;
3322
3323         if (bp->flash_info->buffered)
3324                 /* Buffered flash, no erase needed */
3325                 return 0;
3326
3327         /* Build an erase command */
3328         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3329               BNX2_NVM_COMMAND_DOIT;
3330
3331         /* Need to clear DONE bit separately. */
3332         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3333
3334         /* Address of the NVRAM to read from. */
3335         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3336
3337         /* Issue an erase command. */
3338         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3339
3340         /* Wait for completion. */
3341         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3342                 u32 val;
3343
3344                 udelay(5);
3345
3346                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3347                 if (val & BNX2_NVM_COMMAND_DONE)
3348                         break;
3349         }
3350
3351         if (j >= NVRAM_TIMEOUT_COUNT)
3352                 return -EBUSY;
3353
3354         return 0;
3355 }
3356
3357 static int
3358 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3359 {
3360         u32 cmd;
3361         int j;
3362
3363         /* Build the command word. */
3364         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3365
3366         /* Calculate an offset of a buffered flash. */
3367         if (bp->flash_info->buffered) {
3368                 offset = ((offset / bp->flash_info->page_size) <<
3369                            bp->flash_info->page_bits) +
3370                           (offset % bp->flash_info->page_size);
3371         }
3372
3373         /* Need to clear DONE bit separately. */
3374         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3375
3376         /* Address of the NVRAM to read from. */
3377         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3378
3379         /* Issue a read command. */
3380         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3381
3382         /* Wait for completion. */
3383         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3384                 u32 val;
3385
3386                 udelay(5);
3387
3388                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3389                 if (val & BNX2_NVM_COMMAND_DONE) {
3390                         val = REG_RD(bp, BNX2_NVM_READ);
3391
3392                         val = be32_to_cpu(val);
3393                         memcpy(ret_val, &val, 4);
3394                         break;
3395                 }
3396         }
3397         if (j >= NVRAM_TIMEOUT_COUNT)
3398                 return -EBUSY;
3399
3400         return 0;
3401 }
3402
3403
3404 static int
3405 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3406 {
3407         u32 cmd, val32;
3408         int j;
3409
3410         /* Build the command word. */
3411         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3412
3413         /* Calculate an offset of a buffered flash. */
3414         if (bp->flash_info->buffered) {
3415                 offset = ((offset / bp->flash_info->page_size) <<
3416                           bp->flash_info->page_bits) +
3417                          (offset % bp->flash_info->page_size);
3418         }
3419
3420         /* Need to clear DONE bit separately. */
3421         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3422
3423         memcpy(&val32, val, 4);
3424         val32 = cpu_to_be32(val32);
3425
3426         /* Write the data. */
3427         REG_WR(bp, BNX2_NVM_WRITE, val32);
3428
3429         /* Address of the NVRAM to write to. */
3430         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3431
3432         /* Issue the write command. */
3433         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3434
3435         /* Wait for completion. */
3436         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3437                 udelay(5);
3438
3439                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3440                         break;
3441         }
3442         if (j >= NVRAM_TIMEOUT_COUNT)
3443                 return -EBUSY;
3444
3445         return 0;
3446 }
3447
3448 static int
3449 bnx2_init_nvram(struct bnx2 *bp)
3450 {
3451         u32 val;
3452         int j, entry_count, rc;
3453         struct flash_spec *flash;
3454
3455         /* Determine the selected interface. */
3456         val = REG_RD(bp, BNX2_NVM_CFG1);
3457
3458         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3459
3460         rc = 0;
3461         if (val & 0x40000000) {
3462
3463                 /* Flash interface has been reconfigured */
3464                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3465                      j++, flash++) {
3466                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3467                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3468                                 bp->flash_info = flash;
3469                                 break;
3470                         }
3471                 }
3472         }
3473         else {
3474                 u32 mask;
3475                 /* Not yet been reconfigured */
3476
3477                 if (val & (1 << 23))
3478                         mask = FLASH_BACKUP_STRAP_MASK;
3479                 else
3480                         mask = FLASH_STRAP_MASK;
3481
3482                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3483                         j++, flash++) {
3484
3485                         if ((val & mask) == (flash->strapping & mask)) {
3486                                 bp->flash_info = flash;
3487
3488                                 /* Request access to the flash interface. */
3489                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3490                                         return rc;
3491
3492                                 /* Enable access to flash interface */
3493                                 bnx2_enable_nvram_access(bp);
3494
3495                                 /* Reconfigure the flash interface */
3496                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3497                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3498                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3499                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3500
3501                                 /* Disable access to flash interface */
3502                                 bnx2_disable_nvram_access(bp);
3503                                 bnx2_release_nvram_lock(bp);
3504
3505                                 break;
3506                         }
3507                 }
3508         } /* if (val & 0x40000000) */
3509
3510         if (j == entry_count) {
3511                 bp->flash_info = NULL;
3512                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3513                 return -ENODEV;
3514         }
3515
3516         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3517         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3518         if (val)
3519                 bp->flash_size = val;
3520         else
3521                 bp->flash_size = bp->flash_info->total_size;
3522
3523         return rc;
3524 }
3525
3526 static int
3527 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3528                 int buf_size)
3529 {
3530         int rc = 0;
3531         u32 cmd_flags, offset32, len32, extra;
3532
3533         if (buf_size == 0)
3534                 return 0;
3535
3536         /* Request access to the flash interface. */
3537         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3538                 return rc;
3539
3540         /* Enable access to flash interface */
3541         bnx2_enable_nvram_access(bp);
3542
3543         len32 = buf_size;
3544         offset32 = offset;
3545         extra = 0;
3546
3547         cmd_flags = 0;
3548
3549         if (offset32 & 3) {
3550                 u8 buf[4];
3551                 u32 pre_len;
3552
3553                 offset32 &= ~3;
3554                 pre_len = 4 - (offset & 3);
3555
3556                 if (pre_len >= len32) {
3557                         pre_len = len32;
3558                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3559                                     BNX2_NVM_COMMAND_LAST;
3560                 }
3561                 else {
3562                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3563                 }
3564
3565                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3566
3567                 if (rc)
3568                         return rc;
3569
3570                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3571
3572                 offset32 += 4;
3573                 ret_buf += pre_len;
3574                 len32 -= pre_len;
3575         }
3576         if (len32 & 3) {
3577                 extra = 4 - (len32 & 3);
3578                 len32 = (len32 + 4) & ~3;
3579         }
3580
3581         if (len32 == 4) {
3582                 u8 buf[4];
3583
3584                 if (cmd_flags)
3585                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3586                 else
3587                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3588                                     BNX2_NVM_COMMAND_LAST;
3589
3590                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3591
3592                 memcpy(ret_buf, buf, 4 - extra);
3593         }
3594         else if (len32 > 0) {
3595                 u8 buf[4];
3596
3597                 /* Read the first word. */
3598                 if (cmd_flags)
3599                         cmd_flags = 0;
3600                 else
3601                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3602
3603                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3604
3605                 /* Advance to the next dword. */
3606                 offset32 += 4;
3607                 ret_buf += 4;
3608                 len32 -= 4;
3609
3610                 while (len32 > 4 && rc == 0) {
3611                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3612
3613                         /* Advance to the next dword. */
3614                         offset32 += 4;
3615                         ret_buf += 4;
3616                         len32 -= 4;
3617                 }
3618
3619                 if (rc)
3620                         return rc;
3621
3622                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3623                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3624
3625                 memcpy(ret_buf, buf, 4 - extra);
3626         }
3627
3628         /* Disable access to flash interface */
3629         bnx2_disable_nvram_access(bp);
3630
3631         bnx2_release_nvram_lock(bp);
3632
3633         return rc;
3634 }
3635
3636 static int
3637 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3638                 int buf_size)
3639 {
3640         u32 written, offset32, len32;
3641         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3642         int rc = 0;
3643         int align_start, align_end;
3644
3645         buf = data_buf;
3646         offset32 = offset;
3647         len32 = buf_size;
3648         align_start = align_end = 0;
3649
3650         if ((align_start = (offset32 & 3))) {
3651                 offset32 &= ~3;
3652                 len32 += align_start;
3653                 if (len32 < 4)
3654                         len32 = 4;
3655                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3656                         return rc;
3657         }
3658
3659         if (len32 & 3) {
3660                 align_end = 4 - (len32 & 3);
3661                 len32 += align_end;
3662                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3663                         return rc;
3664         }
3665
3666         if (align_start || align_end) {
3667                 align_buf = kmalloc(len32, GFP_KERNEL);
3668                 if (align_buf == NULL)
3669                         return -ENOMEM;
3670                 if (align_start) {
3671                         memcpy(align_buf, start, 4);
3672                 }
3673                 if (align_end) {
3674                         memcpy(align_buf + len32 - 4, end, 4);
3675                 }
3676                 memcpy(align_buf + align_start, data_buf, buf_size);
3677                 buf = align_buf;
3678         }
3679
3680         if (bp->flash_info->buffered == 0) {
3681                 flash_buffer = kmalloc(264, GFP_KERNEL);
3682                 if (flash_buffer == NULL) {
3683                         rc = -ENOMEM;
3684                         goto nvram_write_end;
3685                 }
3686         }
3687
3688         written = 0;
3689         while ((written < len32) && (rc == 0)) {
3690                 u32 page_start, page_end, data_start, data_end;
3691                 u32 addr, cmd_flags;
3692                 int i;
3693
3694                 /* Find the page_start addr */
3695                 page_start = offset32 + written;
3696                 page_start -= (page_start % bp->flash_info->page_size);
3697                 /* Find the page_end addr */
3698                 page_end = page_start + bp->flash_info->page_size;
3699                 /* Find the data_start addr */
3700                 data_start = (written == 0) ? offset32 : page_start;
3701                 /* Find the data_end addr */
3702                 data_end = (page_end > offset32 + len32) ?
3703                         (offset32 + len32) : page_end;
3704
3705                 /* Request access to the flash interface. */
3706                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3707                         goto nvram_write_end;
3708
3709                 /* Enable access to flash interface */
3710                 bnx2_enable_nvram_access(bp);
3711
3712                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3713                 if (bp->flash_info->buffered == 0) {
3714                         int j;
3715
3716                         /* Read the whole page into the buffer
3717                          * (non-buffer flash only) */
3718                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3719                                 if (j == (bp->flash_info->page_size - 4)) {
3720                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3721                                 }
3722                                 rc = bnx2_nvram_read_dword(bp,
3723                                         page_start + j,
3724                                         &flash_buffer[j],
3725                                         cmd_flags);
3726
3727                                 if (rc)
3728                                         goto nvram_write_end;
3729
3730                                 cmd_flags = 0;
3731                         }
3732                 }
3733
3734                 /* Enable writes to flash interface (unlock write-protect) */
3735                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3736                         goto nvram_write_end;
3737
3738                 /* Loop to write back the buffer data from page_start to
3739                  * data_start */
3740                 i = 0;
3741                 if (bp->flash_info->buffered == 0) {
3742                         /* Erase the page */
3743                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3744                                 goto nvram_write_end;
3745
3746                         /* Re-enable the write again for the actual write */
3747                         bnx2_enable_nvram_write(bp);
3748
3749                         for (addr = page_start; addr < data_start;
3750                                 addr += 4, i += 4) {
3751
3752                                 rc = bnx2_nvram_write_dword(bp, addr,
3753                                         &flash_buffer[i], cmd_flags);
3754
3755                                 if (rc != 0)
3756                                         goto nvram_write_end;
3757
3758                                 cmd_flags = 0;
3759                         }
3760                 }
3761
3762                 /* Loop to write the new data from data_start to data_end */
3763                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3764                         if ((addr == page_end - 4) ||
3765                                 ((bp->flash_info->buffered) &&
3766                                  (addr == data_end - 4))) {
3767
3768                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3769                         }
3770                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3771                                 cmd_flags);
3772
3773                         if (rc != 0)
3774                                 goto nvram_write_end;
3775
3776                         cmd_flags = 0;
3777                         buf += 4;
3778                 }
3779
3780                 /* Loop to write back the buffer data from data_end
3781                  * to page_end */
3782                 if (bp->flash_info->buffered == 0) {
3783                         for (addr = data_end; addr < page_end;
3784                                 addr += 4, i += 4) {
3785
3786                                 if (addr == page_end-4) {
3787                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3788                                 }
3789                                 rc = bnx2_nvram_write_dword(bp, addr,
3790                                         &flash_buffer[i], cmd_flags);
3791
3792                                 if (rc != 0)
3793                                         goto nvram_write_end;
3794
3795                                 cmd_flags = 0;
3796                         }
3797                 }
3798
3799                 /* Disable writes to flash interface (lock write-protect) */
3800                 bnx2_disable_nvram_write(bp);
3801
3802                 /* Disable access to flash interface */
3803                 bnx2_disable_nvram_access(bp);
3804                 bnx2_release_nvram_lock(bp);
3805
3806                 /* Increment written */
3807                 written += data_end - data_start;
3808         }
3809
3810 nvram_write_end:
3811         kfree(flash_buffer);
3812         kfree(align_buf);
3813         return rc;
3814 }
3815
3816 static void
3817 bnx2_init_remote_phy(struct bnx2 *bp)
3818 {
3819         u32 val;
3820
3821         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3822         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3823                 return;
3824
3825         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3826         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3827                 return;
3828
3829         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3830                 if (netif_running(bp->dev)) {
3831                         val = BNX2_DRV_ACK_CAP_SIGNATURE |
3832                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3833                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3834                                    val);
3835                 }
3836                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3837
3838                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3839                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3840                         bp->phy_port = PORT_FIBRE;
3841                 else
3842                         bp->phy_port = PORT_TP;
3843         }
3844 }
3845
3846 static int
3847 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3848 {
3849         u32 val;
3850         int i, rc = 0;
3851
3852         /* Wait for the current PCI transaction to complete before
3853          * issuing a reset. */
3854         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3855                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3856                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3857                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3858                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3859         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3860         udelay(5);
3861
3862         /* Wait for the firmware to tell us it is ok to issue a reset. */
3863         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3864
3865         /* Deposit a driver reset signature so the firmware knows that
3866          * this is a soft reset. */
3867         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3868                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3869
3870         /* Do a dummy read to force the chip to complete all current transaction
3871          * before we issue a reset. */
3872         val = REG_RD(bp, BNX2_MISC_ID);
3873
3874         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3875                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3876                 REG_RD(bp, BNX2_MISC_COMMAND);
3877                 udelay(5);
3878
3879                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3880                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3881
3882                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3883
3884         } else {
3885                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3886                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3887                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3888
3889                 /* Chip reset. */
3890                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3891
3892                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3893                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3894                         current->state = TASK_UNINTERRUPTIBLE;
3895                         schedule_timeout(HZ / 50);
3896                 }
3897
3898                 /* Reset takes approximate 30 usec */
3899                 for (i = 0; i < 10; i++) {
3900                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3901                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3902                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3903                                 break;
3904                         udelay(10);
3905                 }
3906
3907                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3908                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3909                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3910                         return -EBUSY;
3911                 }
3912         }
3913
3914         /* Make sure byte swapping is properly configured. */
3915         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3916         if (val != 0x01020304) {
3917                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3918                 return -ENODEV;
3919         }
3920
3921         /* Wait for the firmware to finish its initialization. */
3922         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3923         if (rc)
3924                 return rc;
3925
3926         spin_lock_bh(&bp->phy_lock);
3927         bnx2_init_remote_phy(bp);
3928         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3929                 bnx2_set_default_remote_link(bp);
3930         spin_unlock_bh(&bp->phy_lock);
3931
3932         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3933                 /* Adjust the voltage regular to two steps lower.  The default
3934                  * of this register is 0x0000000e. */
3935                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3936
3937                 /* Remove bad rbuf memory from the free pool. */
3938                 rc = bnx2_alloc_bad_rbuf(bp);
3939         }
3940
3941         return rc;
3942 }
3943
3944 static int
3945 bnx2_init_chip(struct bnx2 *bp)
3946 {
3947         u32 val;
3948         int rc;
3949
3950         /* Make sure the interrupt is not active. */
3951         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3952
3953         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3954               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3955 #ifdef __BIG_ENDIAN
3956               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3957 #endif
3958               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3959               DMA_READ_CHANS << 12 |
3960               DMA_WRITE_CHANS << 16;
3961
3962         val |= (0x2 << 20) | (1 << 11);
3963
3964         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3965                 val |= (1 << 23);
3966
3967         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3968             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3969                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3970
3971         REG_WR(bp, BNX2_DMA_CONFIG, val);
3972
3973         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3974                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3975                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3976                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3977         }
3978
3979         if (bp->flags & PCIX_FLAG) {
3980                 u16 val16;
3981
3982                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3983                                      &val16);
3984                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3985                                       val16 & ~PCI_X_CMD_ERO);
3986         }
3987
3988         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3989                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3990                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3991                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3992
3993         /* Initialize context mapping and zero out the quick contexts.  The
3994          * context block must have already been enabled. */
3995         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3996                 rc = bnx2_init_5709_context(bp);
3997                 if (rc)
3998                         return rc;
3999         } else
4000                 bnx2_init_context(bp);
4001
4002         if ((rc = bnx2_init_cpus(bp)) != 0)
4003                 return rc;
4004
4005         bnx2_init_nvram(bp);
4006
4007         bnx2_set_mac_addr(bp);
4008
4009         val = REG_RD(bp, BNX2_MQ_CONFIG);
4010         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4011         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4012         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4013                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4014
4015         REG_WR(bp, BNX2_MQ_CONFIG, val);
4016
4017         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4018         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4019         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4020
4021         val = (BCM_PAGE_BITS - 8) << 24;
4022         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4023
4024         /* Configure page size. */
4025         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4026         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4027         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4028         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4029
4030         val = bp->mac_addr[0] +
4031               (bp->mac_addr[1] << 8) +
4032               (bp->mac_addr[2] << 16) +
4033               bp->mac_addr[3] +
4034               (bp->mac_addr[4] << 8) +
4035               (bp->mac_addr[5] << 16);
4036         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4037
4038         /* Program the MTU.  Also include 4 bytes for CRC32. */
4039         val = bp->dev->mtu + ETH_HLEN + 4;
4040         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4041                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4042         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4043
4044         bp->last_status_idx = 0;
4045         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4046
4047         /* Set up how to generate a link change interrupt. */
4048         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4049
4050         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4051                (u64) bp->status_blk_mapping & 0xffffffff);
4052         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4053
4054         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4055                (u64) bp->stats_blk_mapping & 0xffffffff);
4056         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4057                (u64) bp->stats_blk_mapping >> 32);
4058
4059         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4060                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4061
4062         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4063                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4064
4065         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4066                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4067
4068         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4069
4070         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4071
4072         REG_WR(bp, BNX2_HC_COM_TICKS,
4073                (bp->com_ticks_int << 16) | bp->com_ticks);
4074
4075         REG_WR(bp, BNX2_HC_CMD_TICKS,
4076                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4077
4078         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4079                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4080         else
4081                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4082         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4083
4084         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4085                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4086         else {
4087                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4088                       BNX2_HC_CONFIG_COLLECT_STATS;
4089         }
4090
4091         if (bp->flags & ONE_SHOT_MSI_FLAG)
4092                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4093
4094         REG_WR(bp, BNX2_HC_CONFIG, val);
4095
4096         /* Clear internal stats counters. */
4097         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4098
4099         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4100
4101         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4102             BNX2_PORT_FEATURE_ASF_ENABLED)
4103                 bp->flags |= ASF_ENABLE_FLAG;
4104
4105         /* Initialize the receive filter. */
4106         bnx2_set_rx_mode(bp->dev);
4107
4108         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4109                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4110                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4111                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4112         }
4113         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4114                           0);
4115
4116         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4117         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4118
4119         udelay(20);
4120
4121         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4122
4123         return rc;
4124 }
4125
4126 static void
4127 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4128 {
4129         u32 val, offset0, offset1, offset2, offset3;
4130
4131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4132                 offset0 = BNX2_L2CTX_TYPE_XI;
4133                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4134                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4135                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4136         } else {
4137                 offset0 = BNX2_L2CTX_TYPE;
4138                 offset1 = BNX2_L2CTX_CMD_TYPE;
4139                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4140                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4141         }
4142         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4143         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4144
4145         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4146         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4147
4148         val = (u64) bp->tx_desc_mapping >> 32;
4149         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4150
4151         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4152         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4153 }
4154
4155 static void
4156 bnx2_init_tx_ring(struct bnx2 *bp)
4157 {
4158         struct tx_bd *txbd;
4159         u32 cid;
4160
4161         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4162
4163         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4164
4165         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4166         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4167
4168         bp->tx_prod = 0;
4169         bp->tx_cons = 0;
4170         bp->hw_tx_cons = 0;
4171         bp->tx_prod_bseq = 0;
4172
4173         cid = TX_CID;
4174         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4175         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4176
4177         bnx2_init_tx_context(bp, cid);
4178 }
4179
4180 static void
4181 bnx2_init_rx_ring(struct bnx2 *bp)
4182 {
4183         struct rx_bd *rxbd;
4184         int i;
4185         u16 prod, ring_prod;
4186         u32 val;
4187
4188         /* 8 for CRC and VLAN */
4189         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4190         /* hw alignment */
4191         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4192
4193         ring_prod = prod = bp->rx_prod = 0;
4194         bp->rx_cons = 0;
4195         bp->hw_rx_cons = 0;
4196         bp->rx_prod_bseq = 0;
4197
4198         for (i = 0; i < bp->rx_max_ring; i++) {
4199                 int j;
4200
4201                 rxbd = &bp->rx_desc_ring[i][0];
4202                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4203                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4204                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4205                 }
4206                 if (i == (bp->rx_max_ring - 1))
4207                         j = 0;
4208                 else
4209                         j = i + 1;
4210                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4211                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4212                                        0xffffffff;
4213         }
4214
4215         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4216         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4217         val |= 0x02 << 8;
4218         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4219
4220         val = (u64) bp->rx_desc_mapping[0] >> 32;
4221         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4222
4223         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4224         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4225
4226         for (i = 0; i < bp->rx_ring_size; i++) {
4227                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4228                         break;
4229                 }
4230                 prod = NEXT_RX_BD(prod);
4231                 ring_prod = RX_RING_IDX(prod);
4232         }
4233         bp->rx_prod = prod;
4234
4235         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4236
4237         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4238 }
4239
4240 static void
4241 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4242 {
4243         u32 num_rings, max;
4244
4245         bp->rx_ring_size = size;
4246         num_rings = 1;
4247         while (size > MAX_RX_DESC_CNT) {
4248                 size -= MAX_RX_DESC_CNT;
4249                 num_rings++;
4250         }
4251         /* round to next power of 2 */
4252         max = MAX_RX_RINGS;
4253         while ((max & num_rings) == 0)
4254                 max >>= 1;
4255
4256         if (num_rings != max)
4257                 max <<= 1;
4258
4259         bp->rx_max_ring = max;
4260         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4261 }
4262
4263 static void
4264 bnx2_free_tx_skbs(struct bnx2 *bp)
4265 {
4266         int i;
4267
4268         if (bp->tx_buf_ring == NULL)
4269                 return;
4270
4271         for (i = 0; i < TX_DESC_CNT; ) {
4272                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4273                 struct sk_buff *skb = tx_buf->skb;
4274                 int j, last;
4275
4276                 if (skb == NULL) {
4277                         i++;
4278                         continue;
4279                 }
4280
4281                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4282                         skb_headlen(skb), PCI_DMA_TODEVICE);
4283
4284                 tx_buf->skb = NULL;
4285
4286                 last = skb_shinfo(skb)->nr_frags;
4287                 for (j = 0; j < last; j++) {
4288                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4289                         pci_unmap_page(bp->pdev,
4290                                 pci_unmap_addr(tx_buf, mapping),
4291                                 skb_shinfo(skb)->frags[j].size,
4292                                 PCI_DMA_TODEVICE);
4293                 }
4294                 dev_kfree_skb(skb);
4295                 i += j + 1;
4296         }
4297
4298 }
4299
4300 static void
4301 bnx2_free_rx_skbs(struct bnx2 *bp)
4302 {
4303         int i;
4304
4305         if (bp->rx_buf_ring == NULL)
4306                 return;
4307
4308         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4309                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4310                 struct sk_buff *skb = rx_buf->skb;
4311
4312                 if (skb == NULL)
4313                         continue;
4314
4315                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4316                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4317
4318                 rx_buf->skb = NULL;
4319
4320                 dev_kfree_skb(skb);
4321         }
4322 }
4323
4324 static void
4325 bnx2_free_skbs(struct bnx2 *bp)
4326 {
4327         bnx2_free_tx_skbs(bp);
4328         bnx2_free_rx_skbs(bp);
4329 }
4330
4331 static int
4332 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4333 {
4334         int rc;
4335
4336         rc = bnx2_reset_chip(bp, reset_code);
4337         bnx2_free_skbs(bp);
4338         if (rc)
4339                 return rc;
4340
4341         if ((rc = bnx2_init_chip(bp)) != 0)
4342                 return rc;
4343
4344         bnx2_init_tx_ring(bp);
4345         bnx2_init_rx_ring(bp);
4346         return 0;
4347 }
4348
4349 static int
4350 bnx2_init_nic(struct bnx2 *bp)
4351 {
4352         int rc;
4353
4354         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4355                 return rc;
4356
4357         spin_lock_bh(&bp->phy_lock);
4358         bnx2_init_phy(bp);
4359         bnx2_set_link(bp);
4360         spin_unlock_bh(&bp->phy_lock);
4361         return 0;
4362 }
4363
4364 static int
4365 bnx2_test_registers(struct bnx2 *bp)
4366 {
4367         int ret;
4368         int i, is_5709;
4369         static const struct {
4370                 u16   offset;
4371                 u16   flags;
4372 #define BNX2_FL_NOT_5709        1
4373                 u32   rw_mask;
4374                 u32   ro_mask;
4375         } reg_tbl[] = {
4376                 { 0x006c, 0, 0x00000000, 0x0000003f },
4377                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4378                 { 0x0094, 0, 0x00000000, 0x00000000 },
4379
4380                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4381                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4383                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4384                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4385                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4386                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4387                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4388                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4389
4390                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4393                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4394                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4395                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4396
4397                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4398                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4399                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4400
4401                 { 0x1000, 0, 0x00000000, 0x00000001 },
4402                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4403
4404                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4405                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4406                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4407                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4408                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4409                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4410                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4411                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4412                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4413                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4414
4415                 { 0x1800, 0, 0x00000000, 0x00000001 },
4416                 { 0x1804, 0, 0x00000000, 0x00000003 },
4417
4418                 { 0x2800, 0, 0x00000000, 0x00000001 },
4419                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4420                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4421                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4422                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4423                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4424                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4425                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4426                 { 0x2840, 0, 0x00000000, 0xffffffff },
4427                 { 0x2844, 0, 0x00000000, 0xffffffff },
4428                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4429                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4430
4431                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4432                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4433
4434                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4435                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4436                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4437                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4438                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4439                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4440                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4441                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4442                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4443
4444                 { 0x5004, 0, 0x00000000, 0x0000007f },
4445                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4446
4447                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4448                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4449                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4450                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4451                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4452                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4453                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4454                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4455                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4456
4457                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4458                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4459                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4460                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4461                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4462                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4463                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4464                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4465                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4466                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4467                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4468                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4469                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4470                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4471                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4472                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4473                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4474                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4475                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4476                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4477                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4478                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4479                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4480
4481                 { 0xffff, 0, 0x00000000, 0x00000000 },
4482         };
4483
4484         ret = 0;
4485         is_5709 = 0;
4486         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4487                 is_5709 = 1;
4488
4489         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4490                 u32 offset, rw_mask, ro_mask, save_val, val;
4491                 u16 flags = reg_tbl[i].flags;
4492
4493                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4494                         continue;
4495
4496                 offset = (u32) reg_tbl[i].offset;
4497                 rw_mask = reg_tbl[i].rw_mask;
4498                 ro_mask = reg_tbl[i].ro_mask;
4499
4500                 save_val = readl(bp->regview + offset);
4501
4502                 writel(0, bp->regview + offset);
4503
4504                 val = readl(bp->regview + offset);
4505                 if ((val & rw_mask) != 0) {
4506                         goto reg_test_err;
4507                 }
4508
4509                 if ((val & ro_mask) != (save_val & ro_mask)) {
4510                         goto reg_test_err;
4511                 }
4512
4513                 writel(0xffffffff, bp->regview + offset);
4514
4515                 val = readl(bp->regview + offset);
4516                 if ((val & rw_mask) != rw_mask) {
4517                         goto reg_test_err;
4518                 }
4519
4520                 if ((val & ro_mask) != (save_val & ro_mask)) {
4521                         goto reg_test_err;
4522                 }
4523
4524                 writel(save_val, bp->regview + offset);
4525                 continue;
4526
4527 reg_test_err:
4528                 writel(save_val, bp->regview + offset);
4529                 ret = -ENODEV;
4530                 break;
4531         }
4532         return ret;
4533 }
4534
4535 static int
4536 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4537 {
4538         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4539                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4540         int i;
4541
4542         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4543                 u32 offset;
4544
4545                 for (offset = 0; offset < size; offset += 4) {
4546
4547                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4548
4549                         if (REG_RD_IND(bp, start + offset) !=
4550                                 test_pattern[i]) {
4551                                 return -ENODEV;
4552                         }
4553                 }
4554         }
4555         return 0;
4556 }
4557
4558 static int
4559 bnx2_test_memory(struct bnx2 *bp)
4560 {
4561         int ret = 0;
4562         int i;
4563         static struct mem_entry {
4564                 u32   offset;
4565                 u32   len;
4566         } mem_tbl_5706[] = {
4567                 { 0x60000,  0x4000 },
4568                 { 0xa0000,  0x3000 },
4569                 { 0xe0000,  0x4000 },
4570                 { 0x120000, 0x4000 },
4571                 { 0x1a0000, 0x4000 },
4572                 { 0x160000, 0x4000 },
4573                 { 0xffffffff, 0    },
4574         },
4575         mem_tbl_5709[] = {
4576                 { 0x60000,  0x4000 },
4577                 { 0xa0000,  0x3000 },
4578                 { 0xe0000,  0x4000 },
4579                 { 0x120000, 0x4000 },
4580                 { 0x1a0000, 0x4000 },
4581                 { 0xffffffff, 0    },
4582         };
4583         struct mem_entry *mem_tbl;
4584
4585         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4586                 mem_tbl = mem_tbl_5709;
4587         else
4588                 mem_tbl = mem_tbl_5706;
4589
4590         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4591                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4592                         mem_tbl[i].len)) != 0) {
4593                         return ret;
4594                 }
4595         }
4596
4597         return ret;
4598 }
4599
4600 #define BNX2_MAC_LOOPBACK       0
4601 #define BNX2_PHY_LOOPBACK       1
4602
4603 static int
4604 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4605 {
4606         unsigned int pkt_size, num_pkts, i;
4607         struct sk_buff *skb, *rx_skb;
4608         unsigned char *packet;
4609         u16 rx_start_idx, rx_idx;
4610         dma_addr_t map;
4611         struct tx_bd *txbd;
4612         struct sw_bd *rx_buf;
4613         struct l2_fhdr *rx_hdr;
4614         int ret = -ENODEV;
4615
4616         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4617                 bp->loopback = MAC_LOOPBACK;
4618                 bnx2_set_mac_loopback(bp);
4619         }
4620         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4621                 bp->loopback = PHY_LOOPBACK;
4622                 bnx2_set_phy_loopback(bp);
4623         }
4624         else
4625                 return -EINVAL;
4626
4627         pkt_size = 1514;
4628         skb = netdev_alloc_skb(bp->dev, pkt_size);
4629         if (!skb)
4630                 return -ENOMEM;
4631         packet = skb_put(skb, pkt_size);
4632         memcpy(packet, bp->dev->dev_addr, 6);
4633         memset(packet + 6, 0x0, 8);
4634         for (i = 14; i < pkt_size; i++)
4635                 packet[i] = (unsigned char) (i & 0xff);
4636
4637         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4638                 PCI_DMA_TODEVICE);
4639
4640         REG_WR(bp, BNX2_HC_COMMAND,
4641                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4642
4643         REG_RD(bp, BNX2_HC_COMMAND);
4644
4645         udelay(5);
4646         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4647
4648         num_pkts = 0;
4649
4650         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4651
4652         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4653         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4654         txbd->tx_bd_mss_nbytes = pkt_size;
4655         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4656
4657         num_pkts++;
4658         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4659         bp->tx_prod_bseq += pkt_size;
4660
4661         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4662         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4663
4664         udelay(100);
4665
4666         REG_WR(bp, BNX2_HC_COMMAND,
4667                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4668
4669         REG_RD(bp, BNX2_HC_COMMAND);
4670
4671         udelay(5);
4672
4673         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4674         dev_kfree_skb(skb);
4675
4676         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4677                 goto loopback_test_done;
4678         }
4679
4680         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4681         if (rx_idx != rx_start_idx + num_pkts) {
4682                 goto loopback_test_done;
4683         }
4684
4685         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4686         rx_skb = rx_buf->skb;
4687
4688         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4689         skb_reserve(rx_skb, bp->rx_offset);
4690
4691         pci_dma_sync_single_for_cpu(bp->pdev,
4692                 pci_unmap_addr(rx_buf, mapping),
4693                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4694
4695         if (rx_hdr->l2_fhdr_status &
4696                 (L2_FHDR_ERRORS_BAD_CRC |
4697                 L2_FHDR_ERRORS_PHY_DECODE |
4698                 L2_FHDR_ERRORS_ALIGNMENT |
4699                 L2_FHDR_ERRORS_TOO_SHORT |
4700                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4701
4702                 goto loopback_test_done;
4703         }
4704
4705         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4706                 goto loopback_test_done;
4707         }
4708
4709         for (i = 14; i < pkt_size; i++) {
4710                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4711                         goto loopback_test_done;
4712                 }
4713         }
4714
4715         ret = 0;
4716
4717 loopback_test_done:
4718         bp->loopback = 0;
4719         return ret;
4720 }
4721
4722 #define BNX2_MAC_LOOPBACK_FAILED        1
4723 #define BNX2_PHY_LOOPBACK_FAILED        2
4724 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4725                                          BNX2_PHY_LOOPBACK_FAILED)
4726
4727 static int
4728 bnx2_test_loopback(struct bnx2 *bp)
4729 {
4730         int rc = 0;
4731
4732         if (!netif_running(bp->dev))
4733                 return BNX2_LOOPBACK_FAILED;
4734
4735         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4736         spin_lock_bh(&bp->phy_lock);
4737         bnx2_init_phy(bp);
4738         spin_unlock_bh(&bp->phy_lock);
4739         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4740                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4741         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4742                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4743         return rc;
4744 }
4745
4746 #define NVRAM_SIZE 0x200
4747 #define CRC32_RESIDUAL 0xdebb20e3
4748
4749 static int
4750 bnx2_test_nvram(struct bnx2 *bp)
4751 {
4752         u32 buf[NVRAM_SIZE / 4];
4753         u8 *data = (u8 *) buf;
4754         int rc = 0;
4755         u32 magic, csum;
4756
4757         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4758                 goto test_nvram_done;
4759
4760         magic = be32_to_cpu(buf[0]);
4761         if (magic != 0x669955aa) {
4762                 rc = -ENODEV;
4763                 goto test_nvram_done;
4764         }
4765
4766         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4767                 goto test_nvram_done;
4768
4769         csum = ether_crc_le(0x100, data);
4770         if (csum != CRC32_RESIDUAL) {
4771                 rc = -ENODEV;
4772                 goto test_nvram_done;
4773         }
4774
4775         csum = ether_crc_le(0x100, data + 0x100);
4776         if (csum != CRC32_RESIDUAL) {
4777                 rc = -ENODEV;
4778         }
4779
4780 test_nvram_done:
4781         return rc;
4782 }
4783
4784 static int
4785 bnx2_test_link(struct bnx2 *bp)
4786 {
4787         u32 bmsr;
4788
4789         spin_lock_bh(&bp->phy_lock);
4790         bnx2_enable_bmsr1(bp);
4791         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4792         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793         bnx2_disable_bmsr1(bp);
4794         spin_unlock_bh(&bp->phy_lock);
4795
4796         if (bmsr & BMSR_LSTATUS) {
4797                 return 0;
4798         }
4799         return -ENODEV;
4800 }
4801
4802 static int
4803 bnx2_test_intr(struct bnx2 *bp)
4804 {
4805         int i;
4806         u16 status_idx;
4807
4808         if (!netif_running(bp->dev))
4809                 return -ENODEV;
4810
4811         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4812
4813         /* This register is not touched during run-time. */
4814         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4815         REG_RD(bp, BNX2_HC_COMMAND);
4816
4817         for (i = 0; i < 10; i++) {
4818                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4819                         status_idx) {
4820
4821                         break;
4822                 }
4823
4824                 msleep_interruptible(10);
4825         }
4826         if (i < 10)
4827                 return 0;
4828
4829         return -ENODEV;
4830 }
4831
4832 static void
4833 bnx2_5706_serdes_timer(struct bnx2 *bp)
4834 {
4835         spin_lock(&bp->phy_lock);
4836         if (bp->serdes_an_pending)
4837                 bp->serdes_an_pending--;
4838         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4839                 u32 bmcr;
4840
4841                 bp->current_interval = bp->timer_interval;
4842
4843                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4844
4845                 if (bmcr & BMCR_ANENABLE) {
4846                         u32 phy1, phy2;
4847
4848                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4849                         bnx2_read_phy(bp, 0x1c, &phy1);
4850
4851                         bnx2_write_phy(bp, 0x17, 0x0f01);
4852                         bnx2_read_phy(bp, 0x15, &phy2);
4853                         bnx2_write_phy(bp, 0x17, 0x0f01);
4854                         bnx2_read_phy(bp, 0x15, &phy2);
4855
4856                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4857                                 !(phy2 & 0x20)) {       /* no CONFIG */
4858
4859                                 bmcr &= ~BMCR_ANENABLE;
4860                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4861                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4862                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4863                         }
4864                 }
4865         }
4866         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4867                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4868                 u32 phy2;
4869
4870                 bnx2_write_phy(bp, 0x17, 0x0f01);
4871                 bnx2_read_phy(bp, 0x15, &phy2);
4872                 if (phy2 & 0x20) {
4873                         u32 bmcr;
4874
4875                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4876                         bmcr |= BMCR_ANENABLE;
4877                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4878
4879                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4880                 }
4881         } else
4882                 bp->current_interval = bp->timer_interval;
4883
4884         spin_unlock(&bp->phy_lock);
4885 }
4886
4887 static void
4888 bnx2_5708_serdes_timer(struct bnx2 *bp)
4889 {
4890         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4891                 return;
4892
4893         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4894                 bp->serdes_an_pending = 0;
4895                 return;
4896         }
4897
4898         spin_lock(&bp->phy_lock);
4899         if (bp->serdes_an_pending)
4900                 bp->serdes_an_pending--;
4901         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4902                 u32 bmcr;
4903
4904                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4905                 if (bmcr & BMCR_ANENABLE) {
4906                         bnx2_enable_forced_2g5(bp);
4907                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4908                 } else {
4909                         bnx2_disable_forced_2g5(bp);
4910                         bp->serdes_an_pending = 2;
4911                         bp->current_interval = bp->timer_interval;
4912                 }
4913
4914         } else
4915                 bp->current_interval = bp->timer_interval;
4916
4917         spin_unlock(&bp->phy_lock);
4918 }
4919
4920 static void
4921 bnx2_timer(unsigned long data)
4922 {
4923         struct bnx2 *bp = (struct bnx2 *) data;
4924         u32 msg;
4925
4926         if (!netif_running(bp->dev))
4927                 return;
4928
4929         if (atomic_read(&bp->intr_sem) != 0)
4930                 goto bnx2_restart_timer;
4931
4932         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4933         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4934
4935         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4936
4937         /* workaround occasional corrupted counters */
4938         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4939                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4940                                             BNX2_HC_COMMAND_STATS_NOW);
4941
4942         if (bp->phy_flags & PHY_SERDES_FLAG) {
4943                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4944                         bnx2_5706_serdes_timer(bp);
4945                 else
4946                         bnx2_5708_serdes_timer(bp);
4947         }
4948
4949 bnx2_restart_timer:
4950         mod_timer(&bp->timer, jiffies + bp->current_interval);
4951 }
4952
4953 static int
4954 bnx2_request_irq(struct bnx2 *bp)
4955 {
4956         struct net_device *dev = bp->dev;
4957         int rc = 0;
4958
4959         if (bp->flags & USING_MSI_FLAG) {
4960                 irq_handler_t   fn = bnx2_msi;
4961
4962                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4963                         fn = bnx2_msi_1shot;
4964
4965                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4966         } else
4967                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4968                                  IRQF_SHARED, dev->name, dev);
4969         return rc;
4970 }
4971
4972 static void
4973 bnx2_free_irq(struct bnx2 *bp)
4974 {
4975         struct net_device *dev = bp->dev;
4976
4977         if (bp->flags & USING_MSI_FLAG) {
4978                 free_irq(bp->pdev->irq, dev);
4979                 pci_disable_msi(bp->pdev);
4980                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4981         } else
4982                 free_irq(bp->pdev->irq, dev);
4983 }
4984
4985 /* Called with rtnl_lock */
4986 static int
4987 bnx2_open(struct net_device *dev)
4988 {
4989         struct bnx2 *bp = netdev_priv(dev);
4990         int rc;
4991
4992         netif_carrier_off(dev);
4993
4994         bnx2_set_power_state(bp, PCI_D0);
4995         bnx2_disable_int(bp);
4996
4997         rc = bnx2_alloc_mem(bp);
4998         if (rc)
4999                 return rc;
5000
5001         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5002                 if (pci_enable_msi(bp->pdev) == 0) {
5003                         bp->flags |= USING_MSI_FLAG;
5004                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5005                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5006                 }
5007         }
5008         rc = bnx2_request_irq(bp);
5009
5010         if (rc) {
5011                 bnx2_free_mem(bp);
5012                 return rc;
5013         }
5014
5015         rc = bnx2_init_nic(bp);
5016
5017         if (rc) {
5018                 bnx2_free_irq(bp);
5019                 bnx2_free_skbs(bp);
5020                 bnx2_free_mem(bp);
5021                 return rc;
5022         }
5023
5024         mod_timer(&bp->timer, jiffies + bp->current_interval);
5025
5026         atomic_set(&bp->intr_sem, 0);
5027
5028         bnx2_enable_int(bp);
5029
5030         if (bp->flags & USING_MSI_FLAG) {
5031                 /* Test MSI to make sure it is working
5032                  * If MSI test fails, go back to INTx mode
5033                  */
5034                 if (bnx2_test_intr(bp) != 0) {
5035                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5036                                " using MSI, switching to INTx mode. Please"
5037                                " report this failure to the PCI maintainer"
5038                                " and include system chipset information.\n",
5039                                bp->dev->name);
5040
5041                         bnx2_disable_int(bp);
5042                         bnx2_free_irq(bp);
5043
5044                         rc = bnx2_init_nic(bp);
5045
5046                         if (!rc)
5047                                 rc = bnx2_request_irq(bp);
5048
5049                         if (rc) {
5050                                 bnx2_free_skbs(bp);
5051                                 bnx2_free_mem(bp);
5052                                 del_timer_sync(&bp->timer);
5053                                 return rc;
5054                         }
5055                         bnx2_enable_int(bp);
5056                 }
5057         }
5058         if (bp->flags & USING_MSI_FLAG) {
5059                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5060         }
5061
5062         netif_start_queue(dev);
5063
5064         return 0;
5065 }
5066
5067 static void
5068 bnx2_reset_task(struct work_struct *work)
5069 {
5070         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5071
5072         if (!netif_running(bp->dev))
5073                 return;
5074
5075         bp->in_reset_task = 1;
5076         bnx2_netif_stop(bp);
5077
5078         bnx2_init_nic(bp);
5079
5080         atomic_set(&bp->intr_sem, 1);
5081         bnx2_netif_start(bp);
5082         bp->in_reset_task = 0;
5083 }
5084
5085 static void
5086 bnx2_tx_timeout(struct net_device *dev)
5087 {
5088         struct bnx2 *bp = netdev_priv(dev);
5089
5090         /* This allows the netif to be shutdown gracefully before resetting */
5091         schedule_work(&bp->reset_task);
5092 }
5093
5094 #ifdef BCM_VLAN
5095 /* Called with rtnl_lock */
5096 static void
5097 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5098 {
5099         struct bnx2 *bp = netdev_priv(dev);
5100
5101         bnx2_netif_stop(bp);
5102
5103         bp->vlgrp = vlgrp;
5104         bnx2_set_rx_mode(dev);
5105
5106         bnx2_netif_start(bp);
5107 }
5108 #endif
5109
5110 /* Called with netif_tx_lock.
5111  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5112  * netif_wake_queue().
5113  */
5114 static int
5115 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5116 {
5117         struct bnx2 *bp = netdev_priv(dev);
5118         dma_addr_t mapping;
5119         struct tx_bd *txbd;
5120         struct sw_bd *tx_buf;
5121         u32 len, vlan_tag_flags, last_frag, mss;
5122         u16 prod, ring_prod;
5123         int i;
5124
5125         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5126                 netif_stop_queue(dev);
5127                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5128                         dev->name);
5129
5130                 return NETDEV_TX_BUSY;
5131         }
5132         len = skb_headlen(skb);
5133         prod = bp->tx_prod;
5134         ring_prod = TX_RING_IDX(prod);
5135
5136         vlan_tag_flags = 0;
5137         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5138                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5139         }
5140
5141         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5142                 vlan_tag_flags |=
5143                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5144         }
5145         if ((mss = skb_shinfo(skb)->gso_size)) {
5146                 u32 tcp_opt_len, ip_tcp_len;
5147                 struct iphdr *iph;
5148
5149                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5150
5151                 tcp_opt_len = tcp_optlen(skb);
5152
5153                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5154                         u32 tcp_off = skb_transport_offset(skb) -
5155                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5156
5157                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5158                                           TX_BD_FLAGS_SW_FLAGS;
5159                         if (likely(tcp_off == 0))
5160                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5161                         else {
5162                                 tcp_off >>= 3;
5163                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5164                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5165                                                   ((tcp_off & 0x10) <<
5166                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5167                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5168                         }
5169                 } else {
5170                         if (skb_header_cloned(skb) &&
5171                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5172                                 dev_kfree_skb(skb);
5173                                 return NETDEV_TX_OK;
5174                         }
5175
5176                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5177
5178                         iph = ip_hdr(skb);
5179                         iph->check = 0;
5180                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5181                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5182                                                                  iph->daddr, 0,
5183                                                                  IPPROTO_TCP,
5184                                                                  0);
5185                         if (tcp_opt_len || (iph->ihl > 5)) {
5186                                 vlan_tag_flags |= ((iph->ihl - 5) +
5187                                                    (tcp_opt_len >> 2)) << 8;
5188                         }
5189                 }
5190         } else
5191                 mss = 0;
5192
5193         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5194
5195         tx_buf = &bp->tx_buf_ring[ring_prod];
5196         tx_buf->skb = skb;
5197         pci_unmap_addr_set(tx_buf, mapping, mapping);
5198
5199         txbd = &bp->tx_desc_ring[ring_prod];
5200
5201         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5202         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5203         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5204         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5205
5206         last_frag = skb_shinfo(skb)->nr_frags;
5207
5208         for (i = 0; i < last_frag; i++) {
5209                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5210
5211                 prod = NEXT_TX_BD(prod);
5212                 ring_prod = TX_RING_IDX(prod);
5213                 txbd = &bp->tx_desc_ring[ring_prod];
5214
5215                 len = frag->size;
5216                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5217                         len, PCI_DMA_TODEVICE);
5218                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5219                                 mapping, mapping);
5220
5221                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5222                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5223                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5224                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5225
5226         }
5227         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5228
5229         prod = NEXT_TX_BD(prod);
5230         bp->tx_prod_bseq += skb->len;
5231
5232         REG_WR16(bp, bp->tx_bidx_addr, prod);
5233         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5234
5235         mmiowb();
5236
5237         bp->tx_prod = prod;
5238         dev->trans_start = jiffies;
5239
5240         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5241                 netif_stop_queue(dev);
5242                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5243                         netif_wake_queue(dev);
5244         }
5245
5246         return NETDEV_TX_OK;
5247 }
5248
5249 /* Called with rtnl_lock */
5250 static int
5251 bnx2_close(struct net_device *dev)
5252 {
5253         struct bnx2 *bp = netdev_priv(dev);
5254         u32 reset_code;
5255
5256         /* Calling flush_scheduled_work() may deadlock because
5257          * linkwatch_event() may be on the workqueue and it will try to get
5258          * the rtnl_lock which we are holding.
5259          */
5260         while (bp->in_reset_task)
5261                 msleep(1);
5262
5263         bnx2_netif_stop(bp);
5264         del_timer_sync(&bp->timer);
5265         if (bp->flags & NO_WOL_FLAG)
5266                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5267         else if (bp->wol)
5268                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5269         else
5270                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5271         bnx2_reset_chip(bp, reset_code);
5272         bnx2_free_irq(bp);
5273         bnx2_free_skbs(bp);
5274         bnx2_free_mem(bp);
5275         bp->link_up = 0;
5276         netif_carrier_off(bp->dev);
5277         bnx2_set_power_state(bp, PCI_D3hot);
5278         return 0;
5279 }
5280
5281 #define GET_NET_STATS64(ctr)                                    \
5282         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5283         (unsigned long) (ctr##_lo)
5284
5285 #define GET_NET_STATS32(ctr)            \
5286         (ctr##_lo)
5287
5288 #if (BITS_PER_LONG == 64)
5289 #define GET_NET_STATS   GET_NET_STATS64
5290 #else
5291 #define GET_NET_STATS   GET_NET_STATS32
5292 #endif
5293
5294 static struct net_device_stats *
5295 bnx2_get_stats(struct net_device *dev)
5296 {
5297         struct bnx2 *bp = netdev_priv(dev);
5298         struct statistics_block *stats_blk = bp->stats_blk;
5299         struct net_device_stats *net_stats = &bp->net_stats;
5300
5301         if (bp->stats_blk == NULL) {
5302                 return net_stats;
5303         }
5304         net_stats->rx_packets =
5305                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5306                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5307                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5308
5309         net_stats->tx_packets =
5310                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5311                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5312                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5313
5314         net_stats->rx_bytes =
5315                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5316
5317         net_stats->tx_bytes =
5318                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5319
5320         net_stats->multicast =
5321                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5322
5323         net_stats->collisions =
5324                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5325
5326         net_stats->rx_length_errors =
5327                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5328                 stats_blk->stat_EtherStatsOverrsizePkts);
5329
5330         net_stats->rx_over_errors =
5331                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5332
5333         net_stats->rx_frame_errors =
5334                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5335
5336         net_stats->rx_crc_errors =
5337                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5338
5339         net_stats->rx_errors = net_stats->rx_length_errors +
5340                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5341                 net_stats->rx_crc_errors;
5342
5343         net_stats->tx_aborted_errors =
5344                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5345                 stats_blk->stat_Dot3StatsLateCollisions);
5346
5347         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5348             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5349                 net_stats->tx_carrier_errors = 0;
5350         else {
5351                 net_stats->tx_carrier_errors =
5352                         (unsigned long)
5353                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5354         }
5355
5356         net_stats->tx_errors =
5357                 (unsigned long)
5358                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5359                 +
5360                 net_stats->tx_aborted_errors +
5361                 net_stats->tx_carrier_errors;
5362
5363         net_stats->rx_missed_errors =
5364                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5365                 stats_blk->stat_FwRxDrop);
5366
5367         return net_stats;
5368 }
5369
5370 /* All ethtool functions called with rtnl_lock */
5371
5372 static int
5373 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5374 {
5375         struct bnx2 *bp = netdev_priv(dev);
5376         int support_serdes = 0, support_copper = 0;
5377
5378         cmd->supported = SUPPORTED_Autoneg;
5379         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5380                 support_serdes = 1;
5381                 support_copper = 1;
5382         } else if (bp->phy_port == PORT_FIBRE)
5383                 support_serdes = 1;
5384         else
5385                 support_copper = 1;
5386
5387         if (support_serdes) {
5388                 cmd->supported |= SUPPORTED_1000baseT_Full |
5389                         SUPPORTED_FIBRE;
5390                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5391                         cmd->supported |= SUPPORTED_2500baseX_Full;
5392
5393         }
5394         if (support_copper) {
5395                 cmd->supported |= SUPPORTED_10baseT_Half |
5396                         SUPPORTED_10baseT_Full |
5397                         SUPPORTED_100baseT_Half |
5398                         SUPPORTED_100baseT_Full |
5399                         SUPPORTED_1000baseT_Full |
5400                         SUPPORTED_TP;
5401
5402         }
5403
5404         spin_lock_bh(&bp->phy_lock);
5405         cmd->port = bp->phy_port;
5406         cmd->advertising = bp->advertising;
5407
5408         if (bp->autoneg & AUTONEG_SPEED) {
5409                 cmd->autoneg = AUTONEG_ENABLE;
5410         }
5411         else {
5412                 cmd->autoneg = AUTONEG_DISABLE;
5413         }
5414
5415         if (netif_carrier_ok(dev)) {
5416                 cmd->speed = bp->line_speed;
5417                 cmd->duplex = bp->duplex;
5418         }
5419         else {
5420                 cmd->speed = -1;
5421                 cmd->duplex = -1;
5422         }
5423         spin_unlock_bh(&bp->phy_lock);
5424
5425         cmd->transceiver = XCVR_INTERNAL;
5426         cmd->phy_address = bp->phy_addr;
5427
5428         return 0;
5429 }
5430
5431 static int
5432 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5433 {
5434         struct bnx2 *bp = netdev_priv(dev);
5435         u8 autoneg = bp->autoneg;
5436         u8 req_duplex = bp->req_duplex;
5437         u16 req_line_speed = bp->req_line_speed;
5438         u32 advertising = bp->advertising;
5439         int err = -EINVAL;
5440
5441         spin_lock_bh(&bp->phy_lock);
5442
5443         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5444                 goto err_out_unlock;
5445
5446         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5447                 goto err_out_unlock;
5448
5449         if (cmd->autoneg == AUTONEG_ENABLE) {
5450                 autoneg |= AUTONEG_SPEED;
5451
5452                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5453
5454                 /* allow advertising 1 speed */
5455                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5456                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5457                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5458                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5459
5460                         if (cmd->port == PORT_FIBRE)
5461                                 goto err_out_unlock;
5462
5463                         advertising = cmd->advertising;
5464
5465                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5466                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5467                             (cmd->port == PORT_TP))
5468                                 goto err_out_unlock;
5469                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5470                         advertising = cmd->advertising;
5471                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5472                         goto err_out_unlock;
5473                 else {
5474                         if (cmd->port == PORT_FIBRE)
5475                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5476                         else
5477                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5478                 }
5479                 advertising |= ADVERTISED_Autoneg;
5480         }
5481         else {
5482                 if (cmd->port == PORT_FIBRE) {
5483                         if ((cmd->speed != SPEED_1000 &&
5484                              cmd->speed != SPEED_2500) ||
5485                             (cmd->duplex != DUPLEX_FULL))
5486                                 goto err_out_unlock;
5487
5488                         if (cmd->speed == SPEED_2500 &&
5489                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5490                                 goto err_out_unlock;
5491                 }
5492                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5493                         goto err_out_unlock;
5494
5495                 autoneg &= ~AUTONEG_SPEED;
5496                 req_line_speed = cmd->speed;
5497                 req_duplex = cmd->duplex;
5498                 advertising = 0;
5499         }
5500
5501         bp->autoneg = autoneg;
5502         bp->advertising = advertising;
5503         bp->req_line_speed = req_line_speed;
5504         bp->req_duplex = req_duplex;
5505
5506         err = bnx2_setup_phy(bp, cmd->port);
5507
5508 err_out_unlock:
5509         spin_unlock_bh(&bp->phy_lock);
5510
5511         return err;
5512 }
5513
5514 static void
5515 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5516 {
5517         struct bnx2 *bp = netdev_priv(dev);
5518
5519         strcpy(info->driver, DRV_MODULE_NAME);
5520         strcpy(info->version, DRV_MODULE_VERSION);
5521         strcpy(info->bus_info, pci_name(bp->pdev));
5522         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5523         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5524         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5525         info->fw_version[1] = info->fw_version[3] = '.';
5526         info->fw_version[5] = 0;
5527 }
5528
5529 #define BNX2_REGDUMP_LEN                (32 * 1024)
5530
5531 static int
5532 bnx2_get_regs_len(struct net_device *dev)
5533 {
5534         return BNX2_REGDUMP_LEN;
5535 }
5536
5537 static void
5538 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5539 {
5540         u32 *p = _p, i, offset;
5541         u8 *orig_p = _p;
5542         struct bnx2 *bp = netdev_priv(dev);
5543         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5544                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5545                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5546                                  0x1040, 0x1048, 0x1080, 0x10a4,
5547                                  0x1400, 0x1490, 0x1498, 0x14f0,
5548                                  0x1500, 0x155c, 0x1580, 0x15dc,
5549                                  0x1600, 0x1658, 0x1680, 0x16d8,
5550                                  0x1800, 0x1820, 0x1840, 0x1854,
5551                                  0x1880, 0x1894, 0x1900, 0x1984,
5552                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5553                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5554                                  0x2000, 0x2030, 0x23c0, 0x2400,
5555                                  0x2800, 0x2820, 0x2830, 0x2850,
5556                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5557                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5558                                  0x4080, 0x4090, 0x43c0, 0x4458,
5559                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5560                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5561                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5562                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5563                                  0x6800, 0x6848, 0x684c, 0x6860,
5564                                  0x6888, 0x6910, 0x8000 };
5565
5566         regs->version = 0;
5567
5568         memset(p, 0, BNX2_REGDUMP_LEN);
5569
5570         if (!netif_running(bp->dev))
5571                 return;
5572
5573         i = 0;
5574         offset = reg_boundaries[0];
5575         p += offset;
5576         while (offset < BNX2_REGDUMP_LEN) {
5577                 *p++ = REG_RD(bp, offset);
5578                 offset += 4;
5579                 if (offset == reg_boundaries[i + 1]) {
5580                         offset = reg_boundaries[i + 2];
5581                         p = (u32 *) (orig_p + offset);
5582                         i += 2;
5583                 }
5584         }
5585 }
5586
5587 static void
5588 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5589 {
5590         struct bnx2 *bp = netdev_priv(dev);
5591
5592         if (bp->flags & NO_WOL_FLAG) {
5593                 wol->supported = 0;
5594                 wol->wolopts = 0;
5595         }
5596         else {
5597                 wol->supported = WAKE_MAGIC;
5598                 if (bp->wol)
5599                         wol->wolopts = WAKE_MAGIC;
5600                 else
5601                         wol->wolopts = 0;
5602         }
5603         memset(&wol->sopass, 0, sizeof(wol->sopass));
5604 }
5605
5606 static int
5607 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5608 {
5609         struct bnx2 *bp = netdev_priv(dev);
5610
5611         if (wol->wolopts & ~WAKE_MAGIC)
5612                 return -EINVAL;
5613
5614         if (wol->wolopts & WAKE_MAGIC) {
5615                 if (bp->flags & NO_WOL_FLAG)
5616                         return -EINVAL;
5617
5618                 bp->wol = 1;
5619         }
5620         else {
5621                 bp->wol = 0;
5622         }
5623         return 0;
5624 }
5625
5626 static int
5627 bnx2_nway_reset(struct net_device *dev)
5628 {
5629         struct bnx2 *bp = netdev_priv(dev);
5630         u32 bmcr;
5631
5632         if (!(bp->autoneg & AUTONEG_SPEED)) {
5633                 return -EINVAL;
5634         }
5635
5636         spin_lock_bh(&bp->phy_lock);
5637
5638         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5639                 int rc;
5640
5641                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5642                 spin_unlock_bh(&bp->phy_lock);
5643                 return rc;
5644         }
5645
5646         /* Force a link down visible on the other side */
5647         if (bp->phy_flags & PHY_SERDES_FLAG) {
5648                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5649                 spin_unlock_bh(&bp->phy_lock);
5650
5651                 msleep(20);
5652
5653                 spin_lock_bh(&bp->phy_lock);
5654
5655                 bp->current_interval = SERDES_AN_TIMEOUT;
5656                 bp->serdes_an_pending = 1;
5657                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5658         }
5659
5660         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5661         bmcr &= ~BMCR_LOOPBACK;
5662         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5663
5664         spin_unlock_bh(&bp->phy_lock);
5665
5666         return 0;
5667 }
5668
5669 static int
5670 bnx2_get_eeprom_len(struct net_device *dev)
5671 {
5672         struct bnx2 *bp = netdev_priv(dev);
5673
5674         if (bp->flash_info == NULL)
5675                 return 0;
5676
5677         return (int) bp->flash_size;
5678 }
5679
5680 static int
5681 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5682                 u8 *eebuf)
5683 {
5684         struct bnx2 *bp = netdev_priv(dev);
5685         int rc;
5686
5687         /* parameters already validated in ethtool_get_eeprom */
5688
5689         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5690
5691         return rc;
5692 }
5693
5694 static int
5695 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5696                 u8 *eebuf)
5697 {
5698         struct bnx2 *bp = netdev_priv(dev);
5699         int rc;
5700
5701         /* parameters already validated in ethtool_set_eeprom */
5702
5703         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5704
5705         return rc;
5706 }
5707
5708 static int
5709 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5710 {
5711         struct bnx2 *bp = netdev_priv(dev);
5712
5713         memset(coal, 0, sizeof(struct ethtool_coalesce));
5714
5715         coal->rx_coalesce_usecs = bp->rx_ticks;
5716         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5717         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5718         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5719
5720         coal->tx_coalesce_usecs = bp->tx_ticks;
5721         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5722         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5723         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5724
5725         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5726
5727         return 0;
5728 }
5729
5730 static int
5731 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5732 {
5733         struct bnx2 *bp = netdev_priv(dev);
5734
5735         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5736         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5737
5738         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5739         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5740
5741         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5742         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5743
5744         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5745         if (bp->rx_quick_cons_trip_int > 0xff)
5746                 bp->rx_quick_cons_trip_int = 0xff;
5747
5748         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5749         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5750
5751         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5752         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5753
5754         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5755         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5756
5757         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5758         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5759                 0xff;
5760
5761         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5762         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5763                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5764                         bp->stats_ticks = USEC_PER_SEC;
5765         }
5766         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5767         bp->stats_ticks &= 0xffff00;
5768
5769         if (netif_running(bp->dev)) {
5770                 bnx2_netif_stop(bp);
5771                 bnx2_init_nic(bp);
5772                 bnx2_netif_start(bp);
5773         }
5774
5775         return 0;
5776 }
5777
5778 static void
5779 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5780 {
5781         struct bnx2 *bp = netdev_priv(dev);
5782
5783         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5784         ering->rx_mini_max_pending = 0;
5785         ering->rx_jumbo_max_pending = 0;
5786
5787         ering->rx_pending = bp->rx_ring_size;
5788         ering->rx_mini_pending = 0;
5789         ering->rx_jumbo_pending = 0;
5790
5791         ering->tx_max_pending = MAX_TX_DESC_CNT;
5792         ering->tx_pending = bp->tx_ring_size;
5793 }
5794
5795 static int
5796 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5797 {
5798         struct bnx2 *bp = netdev_priv(dev);
5799
5800         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5801                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5802                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5803
5804                 return -EINVAL;
5805         }
5806         if (netif_running(bp->dev)) {
5807                 bnx2_netif_stop(bp);
5808                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5809                 bnx2_free_skbs(bp);
5810                 bnx2_free_mem(bp);
5811         }
5812
5813         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5814         bp->tx_ring_size = ering->tx_pending;
5815
5816         if (netif_running(bp->dev)) {
5817                 int rc;
5818
5819                 rc = bnx2_alloc_mem(bp);
5820                 if (rc)
5821                         return rc;
5822                 bnx2_init_nic(bp);
5823                 bnx2_netif_start(bp);
5824         }
5825
5826         return 0;
5827 }
5828
5829 static void
5830 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5831 {
5832         struct bnx2 *bp = netdev_priv(dev);
5833
5834         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5835         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5836         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5837 }
5838
5839 static int
5840 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5841 {
5842         struct bnx2 *bp = netdev_priv(dev);
5843
5844         bp->req_flow_ctrl = 0;
5845         if (epause->rx_pause)
5846                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5847         if (epause->tx_pause)
5848                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5849
5850         if (epause->autoneg) {
5851                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5852         }
5853         else {
5854                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5855         }
5856
5857         spin_lock_bh(&bp->phy_lock);
5858
5859         bnx2_setup_phy(bp, bp->phy_port);
5860
5861         spin_unlock_bh(&bp->phy_lock);
5862
5863         return 0;
5864 }
5865
5866 static u32
5867 bnx2_get_rx_csum(struct net_device *dev)
5868 {
5869         struct bnx2 *bp = netdev_priv(dev);
5870
5871         return bp->rx_csum;
5872 }
5873
5874 static int
5875 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5876 {
5877         struct bnx2 *bp = netdev_priv(dev);
5878
5879         bp->rx_csum = data;
5880         return 0;
5881 }
5882
5883 static int
5884 bnx2_set_tso(struct net_device *dev, u32 data)
5885 {
5886         struct bnx2 *bp = netdev_priv(dev);
5887
5888         if (data) {
5889                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5890                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5891                         dev->features |= NETIF_F_TSO6;
5892         } else
5893                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5894                                    NETIF_F_TSO_ECN);
5895         return 0;
5896 }
5897
5898 #define BNX2_NUM_STATS 46
5899
5900 static struct {
5901         char string[ETH_GSTRING_LEN];
5902 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5903         { "rx_bytes" },
5904         { "rx_error_bytes" },
5905         { "tx_bytes" },
5906         { "tx_error_bytes" },
5907         { "rx_ucast_packets" },
5908         { "rx_mcast_packets" },
5909         { "rx_bcast_packets" },
5910         { "tx_ucast_packets" },
5911         { "tx_mcast_packets" },
5912         { "tx_bcast_packets" },
5913         { "tx_mac_errors" },
5914         { "tx_carrier_errors" },
5915         { "rx_crc_errors" },
5916         { "rx_align_errors" },
5917         { "tx_single_collisions" },
5918         { "tx_multi_collisions" },
5919         { "tx_deferred" },
5920         { "tx_excess_collisions" },
5921         { "tx_late_collisions" },
5922         { "tx_total_collisions" },
5923         { "rx_fragments" },
5924         { "rx_jabbers" },
5925         { "rx_undersize_packets" },
5926         { "rx_oversize_packets" },
5927         { "rx_64_byte_packets" },
5928         { "rx_65_to_127_byte_packets" },
5929         { "rx_128_to_255_byte_packets" },
5930         { "rx_256_to_511_byte_packets" },
5931         { "rx_512_to_1023_byte_packets" },
5932         { "rx_1024_to_1522_byte_packets" },
5933         { "rx_1523_to_9022_byte_packets" },
5934         { "tx_64_byte_packets" },
5935         { "tx_65_to_127_byte_packets" },
5936         { "tx_128_to_255_byte_packets" },
5937         { "tx_256_to_511_byte_packets" },
5938         { "tx_512_to_1023_byte_packets" },
5939         { "tx_1024_to_1522_byte_packets" },
5940         { "tx_1523_to_9022_byte_packets" },
5941         { "rx_xon_frames" },
5942         { "rx_xoff_frames" },
5943         { "tx_xon_frames" },
5944         { "tx_xoff_frames" },
5945         { "rx_mac_ctrl_frames" },
5946         { "rx_filtered_packets" },
5947         { "rx_discards" },
5948         { "rx_fw_discards" },
5949 };
5950
5951 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5952
5953 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5954     STATS_OFFSET32(stat_IfHCInOctets_hi),
5955     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5956     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5957     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5958     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5959     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5960     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5961     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5962     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5963     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5964     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5965     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5966     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5967     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5968     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5969     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5970     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5971     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5972     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5973     STATS_OFFSET32(stat_EtherStatsCollisions),
5974     STATS_OFFSET32(stat_EtherStatsFragments),
5975     STATS_OFFSET32(stat_EtherStatsJabbers),
5976     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5977     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5978     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5979     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5980     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5981     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5982     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5983     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5984     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5985     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5986     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5987     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5988     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5989     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5990     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5991     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5992     STATS_OFFSET32(stat_XonPauseFramesReceived),
5993     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5994     STATS_OFFSET32(stat_OutXonSent),
5995     STATS_OFFSET32(stat_OutXoffSent),
5996     STATS_OFFSET32(stat_MacControlFramesReceived),
5997     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5998     STATS_OFFSET32(stat_IfInMBUFDiscards),
5999     STATS_OFFSET32(stat_FwRxDrop),
6000 };
6001
6002 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6003  * skipped because of errata.
6004  */
6005 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6006         8,0,8,8,8,8,8,8,8,8,
6007         4,0,4,4,4,4,4,4,4,4,
6008         4,4,4,4,4,4,4,4,4,4,
6009         4,4,4,4,4,4,4,4,4,4,
6010         4,4,4,4,4,4,
6011 };
6012
6013 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6014         8,0,8,8,8,8,8,8,8,8,
6015         4,4,4,4,4,4,4,4,4,4,
6016         4,4,4,4,4,4,4,4,4,4,
6017         4,4,4,4,4,4,4,4,4,4,
6018         4,4,4,4,4,4,
6019 };
6020
6021 #define BNX2_NUM_TESTS 6
6022
6023 static struct {
6024         char string[ETH_GSTRING_LEN];
6025 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6026         { "register_test (offline)" },
6027         { "memory_test (offline)" },
6028         { "loopback_test (offline)" },
6029         { "nvram_test (online)" },
6030         { "interrupt_test (online)" },
6031         { "link_test (online)" },
6032 };
6033
6034 static int
6035 bnx2_self_test_count(struct net_device *dev)
6036 {
6037         return BNX2_NUM_TESTS;
6038 }
6039
6040 static void
6041 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6042 {
6043         struct bnx2 *bp = netdev_priv(dev);
6044
6045         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6046         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6047                 int i;
6048
6049                 bnx2_netif_stop(bp);
6050                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6051                 bnx2_free_skbs(bp);
6052
6053                 if (bnx2_test_registers(bp) != 0) {
6054                         buf[0] = 1;
6055                         etest->flags |= ETH_TEST_FL_FAILED;
6056                 }
6057                 if (bnx2_test_memory(bp) != 0) {
6058                         buf[1] = 1;
6059                         etest->flags |= ETH_TEST_FL_FAILED;
6060                 }
6061                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6062                         etest->flags |= ETH_TEST_FL_FAILED;
6063
6064                 if (!netif_running(bp->dev)) {
6065                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6066                 }
6067                 else {
6068                         bnx2_init_nic(bp);
6069                         bnx2_netif_start(bp);
6070                 }
6071
6072                 /* wait for link up */
6073                 for (i = 0; i < 7; i++) {
6074                         if (bp->link_up)
6075                                 break;
6076                         msleep_interruptible(1000);
6077                 }
6078         }
6079
6080         if (bnx2_test_nvram(bp) != 0) {
6081                 buf[3] = 1;
6082                 etest->flags |= ETH_TEST_FL_FAILED;
6083         }
6084         if (bnx2_test_intr(bp) != 0) {
6085                 buf[4] = 1;
6086                 etest->flags |= ETH_TEST_FL_FAILED;
6087         }
6088
6089         if (bnx2_test_link(bp) != 0) {
6090                 buf[5] = 1;
6091                 etest->flags |= ETH_TEST_FL_FAILED;
6092
6093         }
6094 }
6095
6096 static void
6097 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6098 {
6099         switch (stringset) {
6100         case ETH_SS_STATS:
6101                 memcpy(buf, bnx2_stats_str_arr,
6102                         sizeof(bnx2_stats_str_arr));
6103                 break;
6104         case ETH_SS_TEST:
6105                 memcpy(buf, bnx2_tests_str_arr,
6106                         sizeof(bnx2_tests_str_arr));
6107                 break;
6108         }
6109 }
6110
6111 static int
6112 bnx2_get_stats_count(struct net_device *dev)
6113 {
6114         return BNX2_NUM_STATS;
6115 }
6116
6117 static void
6118 bnx2_get_ethtool_stats(struct net_device *dev,
6119                 struct ethtool_stats *stats, u64 *buf)
6120 {
6121         struct bnx2 *bp = netdev_priv(dev);
6122         int i;
6123         u32 *hw_stats = (u32 *) bp->stats_blk;
6124         u8 *stats_len_arr = NULL;
6125
6126         if (hw_stats == NULL) {
6127                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6128                 return;
6129         }
6130
6131         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6132             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6133             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6134             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6135                 stats_len_arr = bnx2_5706_stats_len_arr;
6136         else
6137                 stats_len_arr = bnx2_5708_stats_len_arr;
6138
6139         for (i = 0; i < BNX2_NUM_STATS; i++) {
6140                 if (stats_len_arr[i] == 0) {
6141                         /* skip this counter */
6142                         buf[i] = 0;
6143                         continue;
6144                 }
6145                 if (stats_len_arr[i] == 4) {
6146                         /* 4-byte counter */
6147                         buf[i] = (u64)
6148                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6149                         continue;
6150                 }
6151                 /* 8-byte counter */
6152                 buf[i] = (((u64) *(hw_stats +
6153                                         bnx2_stats_offset_arr[i])) << 32) +
6154                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6155         }
6156 }
6157
6158 static int
6159 bnx2_phys_id(struct net_device *dev, u32 data)
6160 {
6161         struct bnx2 *bp = netdev_priv(dev);
6162         int i;
6163         u32 save;
6164
6165         if (data == 0)
6166                 data = 2;
6167
6168         save = REG_RD(bp, BNX2_MISC_CFG);
6169         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6170
6171         for (i = 0; i < (data * 2); i++) {
6172                 if ((i % 2) == 0) {
6173                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6174                 }
6175                 else {
6176                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6177                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6178                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6179                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6180                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6181                                 BNX2_EMAC_LED_TRAFFIC);
6182                 }
6183                 msleep_interruptible(500);
6184                 if (signal_pending(current))
6185                         break;
6186         }
6187         REG_WR(bp, BNX2_EMAC_LED, 0);
6188         REG_WR(bp, BNX2_MISC_CFG, save);
6189         return 0;
6190 }
6191
6192 static int
6193 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6194 {
6195         struct bnx2 *bp = netdev_priv(dev);
6196
6197         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6198                 return (ethtool_op_set_tx_hw_csum(dev, data));
6199         else
6200                 return (ethtool_op_set_tx_csum(dev, data));
6201 }
6202
6203 static const struct ethtool_ops bnx2_ethtool_ops = {
6204         .get_settings           = bnx2_get_settings,
6205         .set_settings           = bnx2_set_settings,
6206         .get_drvinfo            = bnx2_get_drvinfo,
6207         .get_regs_len           = bnx2_get_regs_len,
6208         .get_regs               = bnx2_get_regs,
6209         .get_wol                = bnx2_get_wol,
6210         .set_wol                = bnx2_set_wol,
6211         .nway_reset             = bnx2_nway_reset,
6212         .get_link               = ethtool_op_get_link,
6213         .get_eeprom_len         = bnx2_get_eeprom_len,
6214         .get_eeprom             = bnx2_get_eeprom,
6215         .set_eeprom             = bnx2_set_eeprom,
6216         .get_coalesce           = bnx2_get_coalesce,
6217         .set_coalesce           = bnx2_set_coalesce,
6218         .get_ringparam          = bnx2_get_ringparam,
6219         .set_ringparam          = bnx2_set_ringparam,
6220         .get_pauseparam         = bnx2_get_pauseparam,
6221         .set_pauseparam         = bnx2_set_pauseparam,
6222         .get_rx_csum            = bnx2_get_rx_csum,
6223         .set_rx_csum            = bnx2_set_rx_csum,
6224         .get_tx_csum            = ethtool_op_get_tx_csum,
6225         .set_tx_csum            = bnx2_set_tx_csum,
6226         .get_sg                 = ethtool_op_get_sg,
6227         .set_sg                 = ethtool_op_set_sg,
6228         .get_tso                = ethtool_op_get_tso,
6229         .set_tso                = bnx2_set_tso,
6230         .self_test_count        = bnx2_self_test_count,
6231         .self_test              = bnx2_self_test,
6232         .get_strings            = bnx2_get_strings,
6233         .phys_id                = bnx2_phys_id,
6234         .get_stats_count        = bnx2_get_stats_count,
6235         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6236         .get_perm_addr          = ethtool_op_get_perm_addr,
6237 };
6238
6239 /* Called with rtnl_lock */
6240 static int
6241 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6242 {
6243         struct mii_ioctl_data *data = if_mii(ifr);
6244         struct bnx2 *bp = netdev_priv(dev);
6245         int err;
6246
6247         switch(cmd) {
6248         case SIOCGMIIPHY:
6249                 data->phy_id = bp->phy_addr;
6250
6251                 /* fallthru */
6252         case SIOCGMIIREG: {
6253                 u32 mii_regval;
6254
6255                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6256                         return -EOPNOTSUPP;
6257
6258                 if (!netif_running(dev))
6259                         return -EAGAIN;
6260
6261                 spin_lock_bh(&bp->phy_lock);
6262                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6263                 spin_unlock_bh(&bp->phy_lock);
6264
6265                 data->val_out = mii_regval;
6266
6267                 return err;
6268         }
6269
6270         case SIOCSMIIREG:
6271                 if (!capable(CAP_NET_ADMIN))
6272                         return -EPERM;
6273
6274                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6275                         return -EOPNOTSUPP;
6276
6277                 if (!netif_running(dev))
6278                         return -EAGAIN;
6279
6280                 spin_lock_bh(&bp->phy_lock);
6281                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6282                 spin_unlock_bh(&bp->phy_lock);
6283
6284                 return err;
6285
6286         default:
6287                 /* do nothing */
6288                 break;
6289         }
6290         return -EOPNOTSUPP;
6291 }
6292
6293 /* Called with rtnl_lock */
6294 static int
6295 bnx2_change_mac_addr(struct net_device *dev, void *p)
6296 {
6297         struct sockaddr *addr = p;
6298         struct bnx2 *bp = netdev_priv(dev);
6299
6300         if (!is_valid_ether_addr(addr->sa_data))
6301                 return -EINVAL;
6302
6303         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6304         if (netif_running(dev))
6305                 bnx2_set_mac_addr(bp);
6306
6307         return 0;
6308 }
6309
6310 /* Called with rtnl_lock */
6311 static int
6312 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6313 {
6314         struct bnx2 *bp = netdev_priv(dev);
6315
6316         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6317                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6318                 return -EINVAL;
6319
6320         dev->mtu = new_mtu;
6321         if (netif_running(dev)) {
6322                 bnx2_netif_stop(bp);
6323
6324                 bnx2_init_nic(bp);
6325
6326                 bnx2_netif_start(bp);
6327         }
6328         return 0;
6329 }
6330
6331 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6332 static void
6333 poll_bnx2(struct net_device *dev)
6334 {
6335         struct bnx2 *bp = netdev_priv(dev);
6336
6337         disable_irq(bp->pdev->irq);
6338         bnx2_interrupt(bp->pdev->irq, dev);
6339         enable_irq(bp->pdev->irq);
6340 }
6341 #endif
6342
6343 static void __devinit
6344 bnx2_get_5709_media(struct bnx2 *bp)
6345 {
6346         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6347         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6348         u32 strap;
6349
6350         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6351                 return;
6352         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6353                 bp->phy_flags |= PHY_SERDES_FLAG;
6354                 return;
6355         }
6356
6357         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6358                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6359         else
6360                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6361
6362         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6363                 switch (strap) {
6364                 case 0x4:
6365                 case 0x5:
6366                 case 0x6:
6367                         bp->phy_flags |= PHY_SERDES_FLAG;
6368                         return;
6369                 }
6370         } else {
6371                 switch (strap) {
6372                 case 0x1:
6373                 case 0x2:
6374                 case 0x4:
6375                         bp->phy_flags |= PHY_SERDES_FLAG;
6376                         return;
6377                 }
6378         }
6379 }
6380
6381 static void __devinit
6382 bnx2_get_pci_speed(struct bnx2 *bp)
6383 {
6384         u32 reg;
6385
6386         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6387         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6388                 u32 clkreg;
6389
6390                 bp->flags |= PCIX_FLAG;
6391
6392                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6393
6394                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6395                 switch (clkreg) {
6396                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6397                         bp->bus_speed_mhz = 133;
6398                         break;
6399
6400                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6401                         bp->bus_speed_mhz = 100;
6402                         break;
6403
6404                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6405                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6406                         bp->bus_speed_mhz = 66;
6407                         break;
6408
6409                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6410                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6411                         bp->bus_speed_mhz = 50;
6412                         break;
6413
6414                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6415                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6416                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6417                         bp->bus_speed_mhz = 33;
6418                         break;
6419                 }
6420         }
6421         else {
6422                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6423                         bp->bus_speed_mhz = 66;
6424                 else
6425                         bp->bus_speed_mhz = 33;
6426         }
6427
6428         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6429                 bp->flags |= PCI_32BIT_FLAG;
6430
6431 }
6432
6433 static int __devinit
6434 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6435 {
6436         struct bnx2 *bp;
6437         unsigned long mem_len;
6438         int rc;
6439         u32 reg;
6440         u64 dma_mask, persist_dma_mask;
6441
6442         SET_MODULE_OWNER(dev);
6443         SET_NETDEV_DEV(dev, &pdev->dev);
6444         bp = netdev_priv(dev);
6445
6446         bp->flags = 0;
6447         bp->phy_flags = 0;
6448
6449         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6450         rc = pci_enable_device(pdev);
6451         if (rc) {
6452                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6453                 goto err_out;
6454         }
6455
6456         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6457                 dev_err(&pdev->dev,
6458                         "Cannot find PCI device base address, aborting.\n");
6459                 rc = -ENODEV;
6460                 goto err_out_disable;
6461         }
6462
6463         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6464         if (rc) {
6465                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6466                 goto err_out_disable;
6467         }
6468
6469         pci_set_master(pdev);
6470
6471         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6472         if (bp->pm_cap == 0) {
6473                 dev_err(&pdev->dev,
6474                         "Cannot find power management capability, aborting.\n");
6475                 rc = -EIO;
6476                 goto err_out_release;
6477         }
6478
6479         bp->dev = dev;
6480         bp->pdev = pdev;
6481
6482         spin_lock_init(&bp->phy_lock);
6483         spin_lock_init(&bp->indirect_lock);
6484         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6485
6486         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6487         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6488         dev->mem_end = dev->mem_start + mem_len;
6489         dev->irq = pdev->irq;
6490
6491         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6492
6493         if (!bp->regview) {
6494                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6495                 rc = -ENOMEM;
6496                 goto err_out_release;
6497         }
6498
6499         /* Configure byte swap and enable write to the reg_window registers.
6500          * Rely on CPU to do target byte swapping on big endian systems
6501          * The chip's target access swapping will not swap all accesses
6502          */
6503         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6504                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6505                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6506
6507         bnx2_set_power_state(bp, PCI_D0);
6508
6509         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6510
6511         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6512                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6513                         dev_err(&pdev->dev,
6514                                 "Cannot find PCIE capability, aborting.\n");
6515                         rc = -EIO;
6516                         goto err_out_unmap;
6517                 }
6518                 bp->flags |= PCIE_FLAG;
6519         } else {
6520                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6521                 if (bp->pcix_cap == 0) {
6522                         dev_err(&pdev->dev,
6523                                 "Cannot find PCIX capability, aborting.\n");
6524                         rc = -EIO;
6525                         goto err_out_unmap;
6526                 }
6527         }
6528
6529         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6530                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6531                         bp->flags |= MSI_CAP_FLAG;
6532         }
6533
6534         /* 5708 cannot support DMA addresses > 40-bit.  */
6535         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6536                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6537         else
6538                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6539
6540         /* Configure DMA attributes. */
6541         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6542                 dev->features |= NETIF_F_HIGHDMA;
6543                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6544                 if (rc) {
6545                         dev_err(&pdev->dev,
6546                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6547                         goto err_out_unmap;
6548                 }
6549         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6550                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6551                 goto err_out_unmap;
6552         }
6553
6554         if (!(bp->flags & PCIE_FLAG))
6555                 bnx2_get_pci_speed(bp);
6556
6557         /* 5706A0 may falsely detect SERR and PERR. */
6558         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6559                 reg = REG_RD(bp, PCI_COMMAND);
6560                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6561                 REG_WR(bp, PCI_COMMAND, reg);
6562         }
6563         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6564                 !(bp->flags & PCIX_FLAG)) {
6565
6566                 dev_err(&pdev->dev,
6567                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6568                 goto err_out_unmap;
6569         }
6570
6571         bnx2_init_nvram(bp);
6572
6573         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6574
6575         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6576             BNX2_SHM_HDR_SIGNATURE_SIG) {
6577                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6578
6579                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6580         } else
6581                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6582
6583         /* Get the permanent MAC address.  First we need to make sure the
6584          * firmware is actually running.
6585          */
6586         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6587
6588         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6589             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6590                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6591                 rc = -ENODEV;
6592                 goto err_out_unmap;
6593         }
6594
6595         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6596
6597         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6598         bp->mac_addr[0] = (u8) (reg >> 8);
6599         bp->mac_addr[1] = (u8) reg;
6600
6601         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6602         bp->mac_addr[2] = (u8) (reg >> 24);
6603         bp->mac_addr[3] = (u8) (reg >> 16);
6604         bp->mac_addr[4] = (u8) (reg >> 8);
6605         bp->mac_addr[5] = (u8) reg;
6606
6607         bp->tx_ring_size = MAX_TX_DESC_CNT;
6608         bnx2_set_rx_ring_size(bp, 255);
6609
6610         bp->rx_csum = 1;
6611
6612         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6613
6614         bp->tx_quick_cons_trip_int = 20;
6615         bp->tx_quick_cons_trip = 20;
6616         bp->tx_ticks_int = 80;
6617         bp->tx_ticks = 80;
6618
6619         bp->rx_quick_cons_trip_int = 6;
6620         bp->rx_quick_cons_trip = 6;
6621         bp->rx_ticks_int = 18;
6622         bp->rx_ticks = 18;
6623
6624         bp->stats_ticks = 1000000 & 0xffff00;
6625
6626         bp->timer_interval =  HZ;
6627         bp->current_interval =  HZ;
6628
6629         bp->phy_addr = 1;
6630
6631         /* Disable WOL support if we are running on a SERDES chip. */
6632         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6633                 bnx2_get_5709_media(bp);
6634         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6635                 bp->phy_flags |= PHY_SERDES_FLAG;
6636
6637         bp->phy_port = PORT_TP;
6638         if (bp->phy_flags & PHY_SERDES_FLAG) {
6639                 bp->phy_port = PORT_FIBRE;
6640                 bp->flags |= NO_WOL_FLAG;
6641                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6642                         bp->phy_addr = 2;
6643                         reg = REG_RD_IND(bp, bp->shmem_base +
6644                                          BNX2_SHARED_HW_CFG_CONFIG);
6645                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6646                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6647                 }
6648                 bnx2_init_remote_phy(bp);
6649
6650         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6651                    CHIP_NUM(bp) == CHIP_NUM_5708)
6652                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6653         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6654                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6655
6656         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6657             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6658             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6659                 bp->flags |= NO_WOL_FLAG;
6660
6661         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6662                 bp->tx_quick_cons_trip_int =
6663                         bp->tx_quick_cons_trip;
6664                 bp->tx_ticks_int = bp->tx_ticks;
6665                 bp->rx_quick_cons_trip_int =
6666                         bp->rx_quick_cons_trip;
6667                 bp->rx_ticks_int = bp->rx_ticks;
6668                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6669                 bp->com_ticks_int = bp->com_ticks;
6670                 bp->cmd_ticks_int = bp->cmd_ticks;
6671         }
6672
6673         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6674          *
6675          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6676          * with byte enables disabled on the unused 32-bit word.  This is legal
6677          * but causes problems on the AMD 8132 which will eventually stop
6678          * responding after a while.
6679          *
6680          * AMD believes this incompatibility is unique to the 5706, and
6681          * prefers to locally disable MSI rather than globally disabling it.
6682          */
6683         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6684                 struct pci_dev *amd_8132 = NULL;
6685
6686                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6687                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6688                                                   amd_8132))) {
6689                         u8 rev;
6690
6691                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6692                         if (rev >= 0x10 && rev <= 0x13) {
6693                                 disable_msi = 1;
6694                                 pci_dev_put(amd_8132);
6695                                 break;
6696                         }
6697                 }
6698         }
6699
6700         bnx2_set_default_link(bp);
6701         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6702
6703         init_timer(&bp->timer);
6704         bp->timer.expires = RUN_AT(bp->timer_interval);
6705         bp->timer.data = (unsigned long) bp;
6706         bp->timer.function = bnx2_timer;
6707
6708         return 0;
6709
6710 err_out_unmap:
6711         if (bp->regview) {
6712                 iounmap(bp->regview);
6713                 bp->regview = NULL;
6714         }
6715
6716 err_out_release:
6717         pci_release_regions(pdev);
6718
6719 err_out_disable:
6720         pci_disable_device(pdev);
6721         pci_set_drvdata(pdev, NULL);
6722
6723 err_out:
6724         return rc;
6725 }
6726
6727 static char * __devinit
6728 bnx2_bus_string(struct bnx2 *bp, char *str)
6729 {
6730         char *s = str;
6731
6732         if (bp->flags & PCIE_FLAG) {
6733                 s += sprintf(s, "PCI Express");
6734         } else {
6735                 s += sprintf(s, "PCI");
6736                 if (bp->flags & PCIX_FLAG)
6737                         s += sprintf(s, "-X");
6738                 if (bp->flags & PCI_32BIT_FLAG)
6739                         s += sprintf(s, " 32-bit");
6740                 else
6741                         s += sprintf(s, " 64-bit");
6742                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6743         }
6744         return str;
6745 }
6746
6747 static int __devinit
6748 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6749 {
6750         static int version_printed = 0;
6751         struct net_device *dev = NULL;
6752         struct bnx2 *bp;
6753         int rc, i;
6754         char str[40];
6755
6756         if (version_printed++ == 0)
6757                 printk(KERN_INFO "%s", version);
6758
6759         /* dev zeroed in init_etherdev */
6760         dev = alloc_etherdev(sizeof(*bp));
6761
6762         if (!dev)
6763                 return -ENOMEM;
6764
6765         rc = bnx2_init_board(pdev, dev);
6766         if (rc < 0) {
6767                 free_netdev(dev);
6768                 return rc;
6769         }
6770
6771         dev->open = bnx2_open;
6772         dev->hard_start_xmit = bnx2_start_xmit;
6773         dev->stop = bnx2_close;
6774         dev->get_stats = bnx2_get_stats;
6775         dev->set_multicast_list = bnx2_set_rx_mode;
6776         dev->do_ioctl = bnx2_ioctl;
6777         dev->set_mac_address = bnx2_change_mac_addr;
6778         dev->change_mtu = bnx2_change_mtu;
6779         dev->tx_timeout = bnx2_tx_timeout;
6780         dev->watchdog_timeo = TX_TIMEOUT;
6781 #ifdef BCM_VLAN
6782         dev->vlan_rx_register = bnx2_vlan_rx_register;
6783 #endif
6784         dev->poll = bnx2_poll;
6785         dev->ethtool_ops = &bnx2_ethtool_ops;
6786         dev->weight = 64;
6787
6788         bp = netdev_priv(dev);
6789
6790 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6791         dev->poll_controller = poll_bnx2;
6792 #endif
6793
6794         pci_set_drvdata(pdev, dev);
6795
6796         memcpy(dev->dev_addr, bp->mac_addr, 6);
6797         memcpy(dev->perm_addr, bp->mac_addr, 6);
6798         bp->name = board_info[ent->driver_data].name;
6799
6800         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6801         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6802                 dev->features |= NETIF_F_IPV6_CSUM;
6803
6804 #ifdef BCM_VLAN
6805         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6806 #endif
6807         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6808         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6809                 dev->features |= NETIF_F_TSO6;
6810
6811         if ((rc = register_netdev(dev))) {
6812                 dev_err(&pdev->dev, "Cannot register net device\n");
6813                 if (bp->regview)
6814                         iounmap(bp->regview);
6815                 pci_release_regions(pdev);
6816                 pci_disable_device(pdev);
6817                 pci_set_drvdata(pdev, NULL);
6818                 free_netdev(dev);
6819                 return rc;
6820         }
6821
6822         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6823                 "IRQ %d, ",
6824                 dev->name,
6825                 bp->name,
6826                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6827                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6828                 bnx2_bus_string(bp, str),
6829                 dev->base_addr,
6830                 bp->pdev->irq);
6831
6832         printk("node addr ");
6833         for (i = 0; i < 6; i++)
6834                 printk("%2.2x", dev->dev_addr[i]);
6835         printk("\n");
6836
6837         return 0;
6838 }
6839
6840 static void __devexit
6841 bnx2_remove_one(struct pci_dev *pdev)
6842 {
6843         struct net_device *dev = pci_get_drvdata(pdev);
6844         struct bnx2 *bp = netdev_priv(dev);
6845
6846         flush_scheduled_work();
6847
6848         unregister_netdev(dev);
6849
6850         if (bp->regview)
6851                 iounmap(bp->regview);
6852
6853         free_netdev(dev);
6854         pci_release_regions(pdev);
6855         pci_disable_device(pdev);
6856         pci_set_drvdata(pdev, NULL);
6857 }
6858
6859 static int
6860 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6861 {
6862         struct net_device *dev = pci_get_drvdata(pdev);
6863         struct bnx2 *bp = netdev_priv(dev);
6864         u32 reset_code;
6865
6866         if (!netif_running(dev))
6867                 return 0;
6868
6869         flush_scheduled_work();
6870         bnx2_netif_stop(bp);
6871         netif_device_detach(dev);
6872         del_timer_sync(&bp->timer);
6873         if (bp->flags & NO_WOL_FLAG)
6874                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6875         else if (bp->wol)
6876                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6877         else
6878                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6879         bnx2_reset_chip(bp, reset_code);
6880         bnx2_free_skbs(bp);
6881         pci_save_state(pdev);
6882         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6883         return 0;
6884 }
6885
6886 static int
6887 bnx2_resume(struct pci_dev *pdev)
6888 {
6889         struct net_device *dev = pci_get_drvdata(pdev);
6890         struct bnx2 *bp = netdev_priv(dev);
6891
6892         if (!netif_running(dev))
6893                 return 0;
6894
6895         pci_restore_state(pdev);
6896         bnx2_set_power_state(bp, PCI_D0);
6897         netif_device_attach(dev);
6898         bnx2_init_nic(bp);
6899         bnx2_netif_start(bp);
6900         return 0;
6901 }
6902
6903 static struct pci_driver bnx2_pci_driver = {
6904         .name           = DRV_MODULE_NAME,
6905         .id_table       = bnx2_pci_tbl,
6906         .probe          = bnx2_init_one,
6907         .remove         = __devexit_p(bnx2_remove_one),
6908         .suspend        = bnx2_suspend,
6909         .resume         = bnx2_resume,
6910 };
6911
6912 static int __init bnx2_init(void)
6913 {
6914         return pci_register_driver(&bnx2_pci_driver);
6915 }
6916
6917 static void __exit bnx2_cleanup(void)
6918 {
6919         pci_unregister_driver(&bnx2_pci_driver);
6920 }
6921
6922 module_init(bnx2_init);
6923 module_exit(bnx2_cleanup);
6924
6925
6926