bnx2: Use one handler for all MSI-X vectors.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.6"
60 #define DRV_MODULE_RELDATE      "May 16, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static void
519 bnx2_free_rx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_rx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526                 int j;
527
528                 for (j = 0; j < bp->rx_max_ring; j++) {
529                         if (rxr->rx_desc_ring[j])
530                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531                                                     rxr->rx_desc_ring[j],
532                                                     rxr->rx_desc_mapping[j]);
533                         rxr->rx_desc_ring[j] = NULL;
534                 }
535                 if (rxr->rx_buf_ring)
536                         vfree(rxr->rx_buf_ring);
537                 rxr->rx_buf_ring = NULL;
538
539                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540                         if (rxr->rx_pg_desc_ring[j])
541                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542                                                     rxr->rx_pg_desc_ring[i],
543                                                     rxr->rx_pg_desc_mapping[i]);
544                         rxr->rx_pg_desc_ring[i] = NULL;
545                 }
546                 if (rxr->rx_pg_ring)
547                         vfree(rxr->rx_pg_ring);
548                 rxr->rx_pg_ring = NULL;
549         }
550 }
551
552 static int
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
554 {
555         int i;
556
557         for (i = 0; i < bp->num_tx_rings; i++) {
558                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
560
561                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562                 if (txr->tx_buf_ring == NULL)
563                         return -ENOMEM;
564
565                 txr->tx_desc_ring =
566                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567                                              &txr->tx_desc_mapping);
568                 if (txr->tx_desc_ring == NULL)
569                         return -ENOMEM;
570         }
571         return 0;
572 }
573
574 static int
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
576 {
577         int i;
578
579         for (i = 0; i < bp->num_rx_rings; i++) {
580                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582                 int j;
583
584                 rxr->rx_buf_ring =
585                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586                 if (rxr->rx_buf_ring == NULL)
587                         return -ENOMEM;
588
589                 memset(rxr->rx_buf_ring, 0,
590                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592                 for (j = 0; j < bp->rx_max_ring; j++) {
593                         rxr->rx_desc_ring[j] =
594                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595                                                      &rxr->rx_desc_mapping[j]);
596                         if (rxr->rx_desc_ring[j] == NULL)
597                                 return -ENOMEM;
598
599                 }
600
601                 if (bp->rx_pg_ring_size) {
602                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603                                                   bp->rx_max_pg_ring);
604                         if (rxr->rx_pg_ring == NULL)
605                                 return -ENOMEM;
606
607                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608                                bp->rx_max_pg_ring);
609                 }
610
611                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612                         rxr->rx_pg_desc_ring[j] =
613                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614                                                 &rxr->rx_pg_desc_mapping[j]);
615                         if (rxr->rx_pg_desc_ring[j] == NULL)
616                                 return -ENOMEM;
617
618                 }
619         }
620         return 0;
621 }
622
623 static void
624 bnx2_free_mem(struct bnx2 *bp)
625 {
626         int i;
627         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
628
629         bnx2_free_tx_mem(bp);
630         bnx2_free_rx_mem(bp);
631
632         for (i = 0; i < bp->ctx_pages; i++) {
633                 if (bp->ctx_blk[i]) {
634                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
635                                             bp->ctx_blk[i],
636                                             bp->ctx_blk_mapping[i]);
637                         bp->ctx_blk[i] = NULL;
638                 }
639         }
640         if (bnapi->status_blk.msi) {
641                 pci_free_consistent(bp->pdev, bp->status_stats_size,
642                                     bnapi->status_blk.msi,
643                                     bp->status_blk_mapping);
644                 bnapi->status_blk.msi = NULL;
645                 bp->stats_blk = NULL;
646         }
647 }
648
649 static int
650 bnx2_alloc_mem(struct bnx2 *bp)
651 {
652         int i, status_blk_size, err;
653         struct bnx2_napi *bnapi;
654         void *status_blk;
655
656         /* Combine status and statistics blocks into one allocation. */
657         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
658         if (bp->flags & BNX2_FLAG_MSIX_CAP)
659                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
660                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
661         bp->status_stats_size = status_blk_size +
662                                 sizeof(struct statistics_block);
663
664         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
665                                           &bp->status_blk_mapping);
666         if (status_blk == NULL)
667                 goto alloc_mem_err;
668
669         memset(status_blk, 0, bp->status_stats_size);
670
671         bnapi = &bp->bnx2_napi[0];
672         bnapi->status_blk.msi = status_blk;
673         bnapi->hw_tx_cons_ptr =
674                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
675         bnapi->hw_rx_cons_ptr =
676                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
677         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
678                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
679                         struct status_block_msix *sblk;
680
681                         bnapi = &bp->bnx2_napi[i];
682
683                         sblk = (void *) (status_blk +
684                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
685                         bnapi->status_blk.msix = sblk;
686                         bnapi->hw_tx_cons_ptr =
687                                 &sblk->status_tx_quick_consumer_index;
688                         bnapi->hw_rx_cons_ptr =
689                                 &sblk->status_rx_quick_consumer_index;
690                         bnapi->int_num = i << 24;
691                 }
692         }
693
694         bp->stats_blk = status_blk + status_blk_size;
695
696         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
697
698         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
699                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
700                 if (bp->ctx_pages == 0)
701                         bp->ctx_pages = 1;
702                 for (i = 0; i < bp->ctx_pages; i++) {
703                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
704                                                 BCM_PAGE_SIZE,
705                                                 &bp->ctx_blk_mapping[i]);
706                         if (bp->ctx_blk[i] == NULL)
707                                 goto alloc_mem_err;
708                 }
709         }
710
711         err = bnx2_alloc_rx_mem(bp);
712         if (err)
713                 goto alloc_mem_err;
714
715         err = bnx2_alloc_tx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         return 0;
720
721 alloc_mem_err:
722         bnx2_free_mem(bp);
723         return -ENOMEM;
724 }
725
726 static void
727 bnx2_report_fw_link(struct bnx2 *bp)
728 {
729         u32 fw_link_status = 0;
730
731         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
732                 return;
733
734         if (bp->link_up) {
735                 u32 bmsr;
736
737                 switch (bp->line_speed) {
738                 case SPEED_10:
739                         if (bp->duplex == DUPLEX_HALF)
740                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
741                         else
742                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
743                         break;
744                 case SPEED_100:
745                         if (bp->duplex == DUPLEX_HALF)
746                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
747                         else
748                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
749                         break;
750                 case SPEED_1000:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
755                         break;
756                 case SPEED_2500:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
761                         break;
762                 }
763
764                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
765
766                 if (bp->autoneg) {
767                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
768
769                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
770                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
771
772                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
773                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
774                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
775                         else
776                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
777                 }
778         }
779         else
780                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
781
782         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
783 }
784
785 static char *
786 bnx2_xceiver_str(struct bnx2 *bp)
787 {
788         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
789                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
790                  "Copper"));
791 }
792
793 static void
794 bnx2_report_link(struct bnx2 *bp)
795 {
796         if (bp->link_up) {
797                 netif_carrier_on(bp->dev);
798                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
799                        bnx2_xceiver_str(bp));
800
801                 printk("%d Mbps ", bp->line_speed);
802
803                 if (bp->duplex == DUPLEX_FULL)
804                         printk("full duplex");
805                 else
806                         printk("half duplex");
807
808                 if (bp->flow_ctrl) {
809                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
810                                 printk(", receive ");
811                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
812                                         printk("& transmit ");
813                         }
814                         else {
815                                 printk(", transmit ");
816                         }
817                         printk("flow control ON");
818                 }
819                 printk("\n");
820         }
821         else {
822                 netif_carrier_off(bp->dev);
823                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
824                        bnx2_xceiver_str(bp));
825         }
826
827         bnx2_report_fw_link(bp);
828 }
829
830 static void
831 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
832 {
833         u32 local_adv, remote_adv;
834
835         bp->flow_ctrl = 0;
836         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
837                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
838
839                 if (bp->duplex == DUPLEX_FULL) {
840                         bp->flow_ctrl = bp->req_flow_ctrl;
841                 }
842                 return;
843         }
844
845         if (bp->duplex != DUPLEX_FULL) {
846                 return;
847         }
848
849         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
850             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
851                 u32 val;
852
853                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
854                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
855                         bp->flow_ctrl |= FLOW_CTRL_TX;
856                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
857                         bp->flow_ctrl |= FLOW_CTRL_RX;
858                 return;
859         }
860
861         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
862         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863
864         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
865                 u32 new_local_adv = 0;
866                 u32 new_remote_adv = 0;
867
868                 if (local_adv & ADVERTISE_1000XPAUSE)
869                         new_local_adv |= ADVERTISE_PAUSE_CAP;
870                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
871                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
872                 if (remote_adv & ADVERTISE_1000XPAUSE)
873                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
874                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
876
877                 local_adv = new_local_adv;
878                 remote_adv = new_remote_adv;
879         }
880
881         /* See Table 28B-3 of 802.3ab-1999 spec. */
882         if (local_adv & ADVERTISE_PAUSE_CAP) {
883                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
884                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
885                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
886                         }
887                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
888                                 bp->flow_ctrl = FLOW_CTRL_RX;
889                         }
890                 }
891                 else {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                 }
896         }
897         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
898                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
899                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
900
901                         bp->flow_ctrl = FLOW_CTRL_TX;
902                 }
903         }
904 }
905
906 static int
907 bnx2_5709s_linkup(struct bnx2 *bp)
908 {
909         u32 val, speed;
910
911         bp->link_up = 1;
912
913         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
914         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
915         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
916
917         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
918                 bp->line_speed = bp->req_line_speed;
919                 bp->duplex = bp->req_duplex;
920                 return 0;
921         }
922         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
923         switch (speed) {
924                 case MII_BNX2_GP_TOP_AN_SPEED_10:
925                         bp->line_speed = SPEED_10;
926                         break;
927                 case MII_BNX2_GP_TOP_AN_SPEED_100:
928                         bp->line_speed = SPEED_100;
929                         break;
930                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
931                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
932                         bp->line_speed = SPEED_1000;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
935                         bp->line_speed = SPEED_2500;
936                         break;
937         }
938         if (val & MII_BNX2_GP_TOP_AN_FD)
939                 bp->duplex = DUPLEX_FULL;
940         else
941                 bp->duplex = DUPLEX_HALF;
942         return 0;
943 }
944
945 static int
946 bnx2_5708s_linkup(struct bnx2 *bp)
947 {
948         u32 val;
949
950         bp->link_up = 1;
951         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
952         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
953                 case BCM5708S_1000X_STAT1_SPEED_10:
954                         bp->line_speed = SPEED_10;
955                         break;
956                 case BCM5708S_1000X_STAT1_SPEED_100:
957                         bp->line_speed = SPEED_100;
958                         break;
959                 case BCM5708S_1000X_STAT1_SPEED_1G:
960                         bp->line_speed = SPEED_1000;
961                         break;
962                 case BCM5708S_1000X_STAT1_SPEED_2G5:
963                         bp->line_speed = SPEED_2500;
964                         break;
965         }
966         if (val & BCM5708S_1000X_STAT1_FD)
967                 bp->duplex = DUPLEX_FULL;
968         else
969                 bp->duplex = DUPLEX_HALF;
970
971         return 0;
972 }
973
974 static int
975 bnx2_5706s_linkup(struct bnx2 *bp)
976 {
977         u32 bmcr, local_adv, remote_adv, common;
978
979         bp->link_up = 1;
980         bp->line_speed = SPEED_1000;
981
982         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
983         if (bmcr & BMCR_FULLDPLX) {
984                 bp->duplex = DUPLEX_FULL;
985         }
986         else {
987                 bp->duplex = DUPLEX_HALF;
988         }
989
990         if (!(bmcr & BMCR_ANENABLE)) {
991                 return 0;
992         }
993
994         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
995         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
996
997         common = local_adv & remote_adv;
998         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
999
1000                 if (common & ADVERTISE_1000XFULL) {
1001                         bp->duplex = DUPLEX_FULL;
1002                 }
1003                 else {
1004                         bp->duplex = DUPLEX_HALF;
1005                 }
1006         }
1007
1008         return 0;
1009 }
1010
1011 static int
1012 bnx2_copper_linkup(struct bnx2 *bp)
1013 {
1014         u32 bmcr;
1015
1016         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1017         if (bmcr & BMCR_ANENABLE) {
1018                 u32 local_adv, remote_adv, common;
1019
1020                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1021                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1022
1023                 common = local_adv & (remote_adv >> 2);
1024                 if (common & ADVERTISE_1000FULL) {
1025                         bp->line_speed = SPEED_1000;
1026                         bp->duplex = DUPLEX_FULL;
1027                 }
1028                 else if (common & ADVERTISE_1000HALF) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_HALF;
1031                 }
1032                 else {
1033                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1035
1036                         common = local_adv & remote_adv;
1037                         if (common & ADVERTISE_100FULL) {
1038                                 bp->line_speed = SPEED_100;
1039                                 bp->duplex = DUPLEX_FULL;
1040                         }
1041                         else if (common & ADVERTISE_100HALF) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_HALF;
1044                         }
1045                         else if (common & ADVERTISE_10FULL) {
1046                                 bp->line_speed = SPEED_10;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_10HALF) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else {
1054                                 bp->line_speed = 0;
1055                                 bp->link_up = 0;
1056                         }
1057                 }
1058         }
1059         else {
1060                 if (bmcr & BMCR_SPEED100) {
1061                         bp->line_speed = SPEED_100;
1062                 }
1063                 else {
1064                         bp->line_speed = SPEED_10;
1065                 }
1066                 if (bmcr & BMCR_FULLDPLX) {
1067                         bp->duplex = DUPLEX_FULL;
1068                 }
1069                 else {
1070                         bp->duplex = DUPLEX_HALF;
1071                 }
1072         }
1073
1074         return 0;
1075 }
1076
1077 static void
1078 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1079 {
1080         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1081
1082         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1083         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1084         val |= 0x02 << 8;
1085
1086         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1087                 u32 lo_water, hi_water;
1088
1089                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1090                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1091                 else
1092                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1093                 if (lo_water >= bp->rx_ring_size)
1094                         lo_water = 0;
1095
1096                 hi_water = bp->rx_ring_size / 4;
1097
1098                 if (hi_water <= lo_water)
1099                         lo_water = 0;
1100
1101                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1102                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1103
1104                 if (hi_water > 0xf)
1105                         hi_water = 0xf;
1106                 else if (hi_water == 0)
1107                         lo_water = 0;
1108                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1109         }
1110         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1111 }
1112
1113 static void
1114 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1115 {
1116         int i;
1117         u32 cid;
1118
1119         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1120                 if (i == 1)
1121                         cid = RX_RSS_CID;
1122                 bnx2_init_rx_context(bp, cid);
1123         }
1124 }
1125
1126 static int
1127 bnx2_set_mac_link(struct bnx2 *bp)
1128 {
1129         u32 val;
1130
1131         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1132         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1133                 (bp->duplex == DUPLEX_HALF)) {
1134                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1135         }
1136
1137         /* Configure the EMAC mode register. */
1138         val = REG_RD(bp, BNX2_EMAC_MODE);
1139
1140         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1141                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1142                 BNX2_EMAC_MODE_25G_MODE);
1143
1144         if (bp->link_up) {
1145                 switch (bp->line_speed) {
1146                         case SPEED_10:
1147                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1148                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1149                                         break;
1150                                 }
1151                                 /* fall through */
1152                         case SPEED_100:
1153                                 val |= BNX2_EMAC_MODE_PORT_MII;
1154                                 break;
1155                         case SPEED_2500:
1156                                 val |= BNX2_EMAC_MODE_25G_MODE;
1157                                 /* fall through */
1158                         case SPEED_1000:
1159                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1160                                 break;
1161                 }
1162         }
1163         else {
1164                 val |= BNX2_EMAC_MODE_PORT_GMII;
1165         }
1166
1167         /* Set the MAC to operate in the appropriate duplex mode. */
1168         if (bp->duplex == DUPLEX_HALF)
1169                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1170         REG_WR(bp, BNX2_EMAC_MODE, val);
1171
1172         /* Enable/disable rx PAUSE. */
1173         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1174
1175         if (bp->flow_ctrl & FLOW_CTRL_RX)
1176                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1177         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1178
1179         /* Enable/disable tx PAUSE. */
1180         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1181         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_TX)
1184                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1186
1187         /* Acknowledge the interrupt. */
1188         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1189
1190         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191                 bnx2_init_all_rx_contexts(bp);
1192
1193         return 0;
1194 }
1195
1196 static void
1197 bnx2_enable_bmsr1(struct bnx2 *bp)
1198 {
1199         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1200             (CHIP_NUM(bp) == CHIP_NUM_5709))
1201                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202                                MII_BNX2_BLK_ADDR_GP_STATUS);
1203 }
1204
1205 static void
1206 bnx2_disable_bmsr1(struct bnx2 *bp)
1207 {
1208         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1209             (CHIP_NUM(bp) == CHIP_NUM_5709))
1210                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1211                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1212 }
1213
1214 static int
1215 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1216 {
1217         u32 up1;
1218         int ret = 1;
1219
1220         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1221                 return 0;
1222
1223         if (bp->autoneg & AUTONEG_SPEED)
1224                 bp->advertising |= ADVERTISED_2500baseX_Full;
1225
1226         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1227                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1228
1229         bnx2_read_phy(bp, bp->mii_up1, &up1);
1230         if (!(up1 & BCM5708S_UP1_2G5)) {
1231                 up1 |= BCM5708S_UP1_2G5;
1232                 bnx2_write_phy(bp, bp->mii_up1, up1);
1233                 ret = 0;
1234         }
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1238                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1239
1240         return ret;
1241 }
1242
1243 static int
1244 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1245 {
1246         u32 up1;
1247         int ret = 0;
1248
1249         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1250                 return 0;
1251
1252         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1253                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1254
1255         bnx2_read_phy(bp, bp->mii_up1, &up1);
1256         if (up1 & BCM5708S_UP1_2G5) {
1257                 up1 &= ~BCM5708S_UP1_2G5;
1258                 bnx2_write_phy(bp, bp->mii_up1, up1);
1259                 ret = 1;
1260         }
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1264                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1265
1266         return ret;
1267 }
1268
1269 static void
1270 bnx2_enable_forced_2g5(struct bnx2 *bp)
1271 {
1272         u32 bmcr;
1273
1274         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1275                 return;
1276
1277         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1278                 u32 val;
1279
1280                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1281                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1282                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1283                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1284                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1285                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1286
1287                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1288                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1289                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1290
1291         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1294         }
1295
1296         if (bp->autoneg & AUTONEG_SPEED) {
1297                 bmcr &= ~BMCR_ANENABLE;
1298                 if (bp->req_duplex == DUPLEX_FULL)
1299                         bmcr |= BMCR_FULLDPLX;
1300         }
1301         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1302 }
1303
1304 static void
1305 bnx2_disable_forced_2g5(struct bnx2 *bp)
1306 {
1307         u32 bmcr;
1308
1309         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1310                 return;
1311
1312         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1313                 u32 val;
1314
1315                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1316                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1317                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1318                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1319                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1323                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1324
1325         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1326                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1327                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1328         }
1329
1330         if (bp->autoneg & AUTONEG_SPEED)
1331                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1332         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1333 }
1334
1335 static void
1336 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1337 {
1338         u32 val;
1339
1340         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1341         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1342         if (start)
1343                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1344         else
1345                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1346 }
1347
1348 static int
1349 bnx2_set_link(struct bnx2 *bp)
1350 {
1351         u32 bmsr;
1352         u8 link_up;
1353
1354         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1355                 bp->link_up = 1;
1356                 return 0;
1357         }
1358
1359         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1360                 return 0;
1361
1362         link_up = bp->link_up;
1363
1364         bnx2_enable_bmsr1(bp);
1365         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1366         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1367         bnx2_disable_bmsr1(bp);
1368
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1371                 u32 val, an_dbg;
1372
1373                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1374                         bnx2_5706s_force_link_dn(bp, 0);
1375                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1376                 }
1377                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1378
1379                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1380                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1381                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1382
1383                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1384                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1385                         bmsr |= BMSR_LSTATUS;
1386                 else
1387                         bmsr &= ~BMSR_LSTATUS;
1388         }
1389
1390         if (bmsr & BMSR_LSTATUS) {
1391                 bp->link_up = 1;
1392
1393                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1394                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                                 bnx2_5706s_linkup(bp);
1396                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                                 bnx2_5708s_linkup(bp);
1398                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                                 bnx2_5709s_linkup(bp);
1400                 }
1401                 else {
1402                         bnx2_copper_linkup(bp);
1403                 }
1404                 bnx2_resolve_flow_ctrl(bp);
1405         }
1406         else {
1407                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1408                     (bp->autoneg & AUTONEG_SPEED))
1409                         bnx2_disable_forced_2g5(bp);
1410
1411                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1412                         u32 bmcr;
1413
1414                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1415                         bmcr |= BMCR_ANENABLE;
1416                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1417
1418                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1419                 }
1420                 bp->link_up = 0;
1421         }
1422
1423         if (bp->link_up != link_up) {
1424                 bnx2_report_link(bp);
1425         }
1426
1427         bnx2_set_mac_link(bp);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 bnx2_reset_phy(struct bnx2 *bp)
1434 {
1435         int i;
1436         u32 reg;
1437
1438         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1439
1440 #define PHY_RESET_MAX_WAIT 100
1441         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1442                 udelay(10);
1443
1444                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1445                 if (!(reg & BMCR_RESET)) {
1446                         udelay(20);
1447                         break;
1448                 }
1449         }
1450         if (i == PHY_RESET_MAX_WAIT) {
1451                 return -EBUSY;
1452         }
1453         return 0;
1454 }
1455
1456 static u32
1457 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1458 {
1459         u32 adv = 0;
1460
1461         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1462                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1463
1464                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1465                         adv = ADVERTISE_1000XPAUSE;
1466                 }
1467                 else {
1468                         adv = ADVERTISE_PAUSE_CAP;
1469                 }
1470         }
1471         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1472                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1473                         adv = ADVERTISE_1000XPSE_ASYM;
1474                 }
1475                 else {
1476                         adv = ADVERTISE_PAUSE_ASYM;
1477                 }
1478         }
1479         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1480                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1481                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1482                 }
1483                 else {
1484                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1485                 }
1486         }
1487         return adv;
1488 }
1489
1490 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1491
1492 static int
1493 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1494 {
1495         u32 speed_arg = 0, pause_adv;
1496
1497         pause_adv = bnx2_phy_get_pause_adv(bp);
1498
1499         if (bp->autoneg & AUTONEG_SPEED) {
1500                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1501                 if (bp->advertising & ADVERTISED_10baseT_Half)
1502                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1503                 if (bp->advertising & ADVERTISED_10baseT_Full)
1504                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1505                 if (bp->advertising & ADVERTISED_100baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1507                 if (bp->advertising & ADVERTISED_100baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1509                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1511                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1513         } else {
1514                 if (bp->req_line_speed == SPEED_2500)
1515                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1516                 else if (bp->req_line_speed == SPEED_1000)
1517                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1518                 else if (bp->req_line_speed == SPEED_100) {
1519                         if (bp->req_duplex == DUPLEX_FULL)
1520                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                         else
1522                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1523                 } else if (bp->req_line_speed == SPEED_10) {
1524                         if (bp->req_duplex == DUPLEX_FULL)
1525                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1526                         else
1527                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1528                 }
1529         }
1530
1531         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1532                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1533         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1534                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1535
1536         if (port == PORT_TP)
1537                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1538                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1539
1540         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1541
1542         spin_unlock_bh(&bp->phy_lock);
1543         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1544         spin_lock_bh(&bp->phy_lock);
1545
1546         return 0;
1547 }
1548
1549 static int
1550 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1551 {
1552         u32 adv, bmcr;
1553         u32 new_adv = 0;
1554
1555         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556                 return (bnx2_setup_remote_phy(bp, port));
1557
1558         if (!(bp->autoneg & AUTONEG_SPEED)) {
1559                 u32 new_bmcr;
1560                 int force_link_down = 0;
1561
1562                 if (bp->req_line_speed == SPEED_2500) {
1563                         if (!bnx2_test_and_enable_2g5(bp))
1564                                 force_link_down = 1;
1565                 } else if (bp->req_line_speed == SPEED_1000) {
1566                         if (bnx2_test_and_disable_2g5(bp))
1567                                 force_link_down = 1;
1568                 }
1569                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1570                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1571
1572                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1573                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1574                 new_bmcr |= BMCR_SPEED1000;
1575
1576                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1577                         if (bp->req_line_speed == SPEED_2500)
1578                                 bnx2_enable_forced_2g5(bp);
1579                         else if (bp->req_line_speed == SPEED_1000) {
1580                                 bnx2_disable_forced_2g5(bp);
1581                                 new_bmcr &= ~0x2000;
1582                         }
1583
1584                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1585                         if (bp->req_line_speed == SPEED_2500)
1586                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1587                         else
1588                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1589                 }
1590
1591                 if (bp->req_duplex == DUPLEX_FULL) {
1592                         adv |= ADVERTISE_1000XFULL;
1593                         new_bmcr |= BMCR_FULLDPLX;
1594                 }
1595                 else {
1596                         adv |= ADVERTISE_1000XHALF;
1597                         new_bmcr &= ~BMCR_FULLDPLX;
1598                 }
1599                 if ((new_bmcr != bmcr) || (force_link_down)) {
1600                         /* Force a link down visible on the other side */
1601                         if (bp->link_up) {
1602                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1603                                                ~(ADVERTISE_1000XFULL |
1604                                                  ADVERTISE_1000XHALF));
1605                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1606                                         BMCR_ANRESTART | BMCR_ANENABLE);
1607
1608                                 bp->link_up = 0;
1609                                 netif_carrier_off(bp->dev);
1610                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1611                                 bnx2_report_link(bp);
1612                         }
1613                         bnx2_write_phy(bp, bp->mii_adv, adv);
1614                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                 } else {
1616                         bnx2_resolve_flow_ctrl(bp);
1617                         bnx2_set_mac_link(bp);
1618                 }
1619                 return 0;
1620         }
1621
1622         bnx2_test_and_enable_2g5(bp);
1623
1624         if (bp->advertising & ADVERTISED_1000baseT_Full)
1625                 new_adv |= ADVERTISE_1000XFULL;
1626
1627         new_adv |= bnx2_phy_get_pause_adv(bp);
1628
1629         bnx2_read_phy(bp, bp->mii_adv, &adv);
1630         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1631
1632         bp->serdes_an_pending = 0;
1633         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1634                 /* Force a link down visible on the other side */
1635                 if (bp->link_up) {
1636                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1637                         spin_unlock_bh(&bp->phy_lock);
1638                         msleep(20);
1639                         spin_lock_bh(&bp->phy_lock);
1640                 }
1641
1642                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1643                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1644                         BMCR_ANENABLE);
1645                 /* Speed up link-up time when the link partner
1646                  * does not autonegotiate which is very common
1647                  * in blade servers. Some blade servers use
1648                  * IPMI for kerboard input and it's important
1649                  * to minimize link disruptions. Autoneg. involves
1650                  * exchanging base pages plus 3 next pages and
1651                  * normally completes in about 120 msec.
1652                  */
1653                 bp->current_interval = SERDES_AN_TIMEOUT;
1654                 bp->serdes_an_pending = 1;
1655                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1656         } else {
1657                 bnx2_resolve_flow_ctrl(bp);
1658                 bnx2_set_mac_link(bp);
1659         }
1660
1661         return 0;
1662 }
1663
1664 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1665         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1666                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1667                 (ADVERTISED_1000baseT_Full)
1668
1669 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1670         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1671         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1672         ADVERTISED_1000baseT_Full)
1673
1674 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1675         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1676
1677 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1678
1679 static void
1680 bnx2_set_default_remote_link(struct bnx2 *bp)
1681 {
1682         u32 link;
1683
1684         if (bp->phy_port == PORT_TP)
1685                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1686         else
1687                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1688
1689         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1690                 bp->req_line_speed = 0;
1691                 bp->autoneg |= AUTONEG_SPEED;
1692                 bp->advertising = ADVERTISED_Autoneg;
1693                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1694                         bp->advertising |= ADVERTISED_10baseT_Half;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1696                         bp->advertising |= ADVERTISED_10baseT_Full;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1698                         bp->advertising |= ADVERTISED_100baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1700                         bp->advertising |= ADVERTISED_100baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1702                         bp->advertising |= ADVERTISED_1000baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1704                         bp->advertising |= ADVERTISED_2500baseX_Full;
1705         } else {
1706                 bp->autoneg = 0;
1707                 bp->advertising = 0;
1708                 bp->req_duplex = DUPLEX_FULL;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1710                         bp->req_line_speed = SPEED_10;
1711                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1712                                 bp->req_duplex = DUPLEX_HALF;
1713                 }
1714                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1715                         bp->req_line_speed = SPEED_100;
1716                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1717                                 bp->req_duplex = DUPLEX_HALF;
1718                 }
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1720                         bp->req_line_speed = SPEED_1000;
1721                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1722                         bp->req_line_speed = SPEED_2500;
1723         }
1724 }
1725
1726 static void
1727 bnx2_set_default_link(struct bnx2 *bp)
1728 {
1729         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1730                 bnx2_set_default_remote_link(bp);
1731                 return;
1732         }
1733
1734         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1735         bp->req_line_speed = 0;
1736         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1737                 u32 reg;
1738
1739                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1740
1741                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1742                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1743                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1744                         bp->autoneg = 0;
1745                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1746                         bp->req_duplex = DUPLEX_FULL;
1747                 }
1748         } else
1749                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1750 }
1751
1752 static void
1753 bnx2_send_heart_beat(struct bnx2 *bp)
1754 {
1755         u32 msg;
1756         u32 addr;
1757
1758         spin_lock(&bp->indirect_lock);
1759         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1760         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1761         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1762         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1763         spin_unlock(&bp->indirect_lock);
1764 }
1765
1766 static void
1767 bnx2_remote_phy_event(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u8 link_up = bp->link_up;
1771         u8 old_port;
1772
1773         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1774
1775         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1776                 bnx2_send_heart_beat(bp);
1777
1778         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1779
1780         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1781                 bp->link_up = 0;
1782         else {
1783                 u32 speed;
1784
1785                 bp->link_up = 1;
1786                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1787                 bp->duplex = DUPLEX_FULL;
1788                 switch (speed) {
1789                         case BNX2_LINK_STATUS_10HALF:
1790                                 bp->duplex = DUPLEX_HALF;
1791                         case BNX2_LINK_STATUS_10FULL:
1792                                 bp->line_speed = SPEED_10;
1793                                 break;
1794                         case BNX2_LINK_STATUS_100HALF:
1795                                 bp->duplex = DUPLEX_HALF;
1796                         case BNX2_LINK_STATUS_100BASE_T4:
1797                         case BNX2_LINK_STATUS_100FULL:
1798                                 bp->line_speed = SPEED_100;
1799                                 break;
1800                         case BNX2_LINK_STATUS_1000HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_1000FULL:
1803                                 bp->line_speed = SPEED_1000;
1804                                 break;
1805                         case BNX2_LINK_STATUS_2500HALF:
1806                                 bp->duplex = DUPLEX_HALF;
1807                         case BNX2_LINK_STATUS_2500FULL:
1808                                 bp->line_speed = SPEED_2500;
1809                                 break;
1810                         default:
1811                                 bp->line_speed = 0;
1812                                 break;
1813                 }
1814
1815                 bp->flow_ctrl = 0;
1816                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1817                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1818                         if (bp->duplex == DUPLEX_FULL)
1819                                 bp->flow_ctrl = bp->req_flow_ctrl;
1820                 } else {
1821                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1822                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1823                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1824                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1825                 }
1826
1827                 old_port = bp->phy_port;
1828                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1829                         bp->phy_port = PORT_FIBRE;
1830                 else
1831                         bp->phy_port = PORT_TP;
1832
1833                 if (old_port != bp->phy_port)
1834                         bnx2_set_default_link(bp);
1835
1836         }
1837         if (bp->link_up != link_up)
1838                 bnx2_report_link(bp);
1839
1840         bnx2_set_mac_link(bp);
1841 }
1842
1843 static int
1844 bnx2_set_remote_link(struct bnx2 *bp)
1845 {
1846         u32 evt_code;
1847
1848         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1849         switch (evt_code) {
1850                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1851                         bnx2_remote_phy_event(bp);
1852                         break;
1853                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1854                 default:
1855                         bnx2_send_heart_beat(bp);
1856                         break;
1857         }
1858         return 0;
1859 }
1860
1861 static int
1862 bnx2_setup_copper_phy(struct bnx2 *bp)
1863 {
1864         u32 bmcr;
1865         u32 new_bmcr;
1866
1867         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1868
1869         if (bp->autoneg & AUTONEG_SPEED) {
1870                 u32 adv_reg, adv1000_reg;
1871                 u32 new_adv_reg = 0;
1872                 u32 new_adv1000_reg = 0;
1873
1874                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1875                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1876                         ADVERTISE_PAUSE_ASYM);
1877
1878                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1879                 adv1000_reg &= PHY_ALL_1000_SPEED;
1880
1881                 if (bp->advertising & ADVERTISED_10baseT_Half)
1882                         new_adv_reg |= ADVERTISE_10HALF;
1883                 if (bp->advertising & ADVERTISED_10baseT_Full)
1884                         new_adv_reg |= ADVERTISE_10FULL;
1885                 if (bp->advertising & ADVERTISED_100baseT_Half)
1886                         new_adv_reg |= ADVERTISE_100HALF;
1887                 if (bp->advertising & ADVERTISED_100baseT_Full)
1888                         new_adv_reg |= ADVERTISE_100FULL;
1889                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1890                         new_adv1000_reg |= ADVERTISE_1000FULL;
1891
1892                 new_adv_reg |= ADVERTISE_CSMA;
1893
1894                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1895
1896                 if ((adv1000_reg != new_adv1000_reg) ||
1897                         (adv_reg != new_adv_reg) ||
1898                         ((bmcr & BMCR_ANENABLE) == 0)) {
1899
1900                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1901                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1902                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1903                                 BMCR_ANENABLE);
1904                 }
1905                 else if (bp->link_up) {
1906                         /* Flow ctrl may have changed from auto to forced */
1907                         /* or vice-versa. */
1908
1909                         bnx2_resolve_flow_ctrl(bp);
1910                         bnx2_set_mac_link(bp);
1911                 }
1912                 return 0;
1913         }
1914
1915         new_bmcr = 0;
1916         if (bp->req_line_speed == SPEED_100) {
1917                 new_bmcr |= BMCR_SPEED100;
1918         }
1919         if (bp->req_duplex == DUPLEX_FULL) {
1920                 new_bmcr |= BMCR_FULLDPLX;
1921         }
1922         if (new_bmcr != bmcr) {
1923                 u32 bmsr;
1924
1925                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1926                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1927
1928                 if (bmsr & BMSR_LSTATUS) {
1929                         /* Force link down */
1930                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1931                         spin_unlock_bh(&bp->phy_lock);
1932                         msleep(50);
1933                         spin_lock_bh(&bp->phy_lock);
1934
1935                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1936                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1937                 }
1938
1939                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1940
1941                 /* Normally, the new speed is setup after the link has
1942                  * gone down and up again. In some cases, link will not go
1943                  * down so we need to set up the new speed here.
1944                  */
1945                 if (bmsr & BMSR_LSTATUS) {
1946                         bp->line_speed = bp->req_line_speed;
1947                         bp->duplex = bp->req_duplex;
1948                         bnx2_resolve_flow_ctrl(bp);
1949                         bnx2_set_mac_link(bp);
1950                 }
1951         } else {
1952                 bnx2_resolve_flow_ctrl(bp);
1953                 bnx2_set_mac_link(bp);
1954         }
1955         return 0;
1956 }
1957
1958 static int
1959 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1960 {
1961         if (bp->loopback == MAC_LOOPBACK)
1962                 return 0;
1963
1964         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1965                 return (bnx2_setup_serdes_phy(bp, port));
1966         }
1967         else {
1968                 return (bnx2_setup_copper_phy(bp));
1969         }
1970 }
1971
1972 static int
1973 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1974 {
1975         u32 val;
1976
1977         bp->mii_bmcr = MII_BMCR + 0x10;
1978         bp->mii_bmsr = MII_BMSR + 0x10;
1979         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1980         bp->mii_adv = MII_ADVERTISE + 0x10;
1981         bp->mii_lpa = MII_LPA + 0x10;
1982         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1983
1984         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1985         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1986
1987         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1988         if (reset_phy)
1989                 bnx2_reset_phy(bp);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1992
1993         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1994         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1995         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1996         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1997
1998         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1999         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2000         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2001                 val |= BCM5708S_UP1_2G5;
2002         else
2003                 val &= ~BCM5708S_UP1_2G5;
2004         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2005
2006         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2007         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2008         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2009         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2010
2011         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2012
2013         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2014               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2015         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2018
2019         return 0;
2020 }
2021
2022 static int
2023 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2024 {
2025         u32 val;
2026
2027         if (reset_phy)
2028                 bnx2_reset_phy(bp);
2029
2030         bp->mii_up1 = BCM5708S_UP1;
2031
2032         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2033         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2034         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2035
2036         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2037         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2038         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2041         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2045                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2046                 val |= BCM5708S_UP1_2G5;
2047                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2048         }
2049
2050         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2051             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2052             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2053                 /* increase tx signal amplitude */
2054                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2055                                BCM5708S_BLK_ADDR_TX_MISC);
2056                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2057                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2058                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2059                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2060         }
2061
2062         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2063               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2064
2065         if (val) {
2066                 u32 is_backplane;
2067
2068                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2069                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2070                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2071                                        BCM5708S_BLK_ADDR_TX_MISC);
2072                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2073                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074                                        BCM5708S_BLK_ADDR_DIG);
2075                 }
2076         }
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2082 {
2083         if (reset_phy)
2084                 bnx2_reset_phy(bp);
2085
2086         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2087
2088         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2089                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2090
2091         if (bp->dev->mtu > 1500) {
2092                 u32 val;
2093
2094                 /* Set extended packet length bit */
2095                 bnx2_write_phy(bp, 0x18, 0x7);
2096                 bnx2_read_phy(bp, 0x18, &val);
2097                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2098
2099                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2100                 bnx2_read_phy(bp, 0x1c, &val);
2101                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2102         }
2103         else {
2104                 u32 val;
2105
2106                 bnx2_write_phy(bp, 0x18, 0x7);
2107                 bnx2_read_phy(bp, 0x18, &val);
2108                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2109
2110                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2111                 bnx2_read_phy(bp, 0x1c, &val);
2112                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2113         }
2114
2115         return 0;
2116 }
2117
2118 static int
2119 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2120 {
2121         u32 val;
2122
2123         if (reset_phy)
2124                 bnx2_reset_phy(bp);
2125
2126         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2127                 bnx2_write_phy(bp, 0x18, 0x0c00);
2128                 bnx2_write_phy(bp, 0x17, 0x000a);
2129                 bnx2_write_phy(bp, 0x15, 0x310b);
2130                 bnx2_write_phy(bp, 0x17, 0x201f);
2131                 bnx2_write_phy(bp, 0x15, 0x9506);
2132                 bnx2_write_phy(bp, 0x17, 0x401f);
2133                 bnx2_write_phy(bp, 0x15, 0x14e2);
2134                 bnx2_write_phy(bp, 0x18, 0x0400);
2135         }
2136
2137         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2138                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2139                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2140                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2141                 val &= ~(1 << 8);
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2143         }
2144
2145         if (bp->dev->mtu > 1500) {
2146                 /* Set extended packet length bit */
2147                 bnx2_write_phy(bp, 0x18, 0x7);
2148                 bnx2_read_phy(bp, 0x18, &val);
2149                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2150
2151                 bnx2_read_phy(bp, 0x10, &val);
2152                 bnx2_write_phy(bp, 0x10, val | 0x1);
2153         }
2154         else {
2155                 bnx2_write_phy(bp, 0x18, 0x7);
2156                 bnx2_read_phy(bp, 0x18, &val);
2157                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2158
2159                 bnx2_read_phy(bp, 0x10, &val);
2160                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2161         }
2162
2163         /* ethernet@wirespeed */
2164         bnx2_write_phy(bp, 0x18, 0x7007);
2165         bnx2_read_phy(bp, 0x18, &val);
2166         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2167         return 0;
2168 }
2169
2170
2171 static int
2172 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2173 {
2174         u32 val;
2175         int rc = 0;
2176
2177         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2178         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2179
2180         bp->mii_bmcr = MII_BMCR;
2181         bp->mii_bmsr = MII_BMSR;
2182         bp->mii_bmsr1 = MII_BMSR;
2183         bp->mii_adv = MII_ADVERTISE;
2184         bp->mii_lpa = MII_LPA;
2185
2186         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2187
2188         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2189                 goto setup_phy;
2190
2191         bnx2_read_phy(bp, MII_PHYSID1, &val);
2192         bp->phy_id = val << 16;
2193         bnx2_read_phy(bp, MII_PHYSID2, &val);
2194         bp->phy_id |= val & 0xffff;
2195
2196         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2197                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2198                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2199                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2200                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2201                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2202                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2203         }
2204         else {
2205                 rc = bnx2_init_copper_phy(bp, reset_phy);
2206         }
2207
2208 setup_phy:
2209         if (!rc)
2210                 rc = bnx2_setup_phy(bp, bp->phy_port);
2211
2212         return rc;
2213 }
2214
2215 static int
2216 bnx2_set_mac_loopback(struct bnx2 *bp)
2217 {
2218         u32 mac_mode;
2219
2220         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2221         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2222         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2223         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2224         bp->link_up = 1;
2225         return 0;
2226 }
2227
2228 static int bnx2_test_link(struct bnx2 *);
2229
2230 static int
2231 bnx2_set_phy_loopback(struct bnx2 *bp)
2232 {
2233         u32 mac_mode;
2234         int rc, i;
2235
2236         spin_lock_bh(&bp->phy_lock);
2237         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2238                             BMCR_SPEED1000);
2239         spin_unlock_bh(&bp->phy_lock);
2240         if (rc)
2241                 return rc;
2242
2243         for (i = 0; i < 10; i++) {
2244                 if (bnx2_test_link(bp) == 0)
2245                         break;
2246                 msleep(100);
2247         }
2248
2249         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2250         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2251                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2252                       BNX2_EMAC_MODE_25G_MODE);
2253
2254         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2255         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2256         bp->link_up = 1;
2257         return 0;
2258 }
2259
2260 static int
2261 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2262 {
2263         int i;
2264         u32 val;
2265
2266         bp->fw_wr_seq++;
2267         msg_data |= bp->fw_wr_seq;
2268
2269         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2270
2271         /* wait for an acknowledgement. */
2272         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2273                 msleep(10);
2274
2275                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2276
2277                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2278                         break;
2279         }
2280         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2281                 return 0;
2282
2283         /* If we timed out, inform the firmware that this is the case. */
2284         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2285                 if (!silent)
2286                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2287                                             "%x\n", msg_data);
2288
2289                 msg_data &= ~BNX2_DRV_MSG_CODE;
2290                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2291
2292                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2293
2294                 return -EBUSY;
2295         }
2296
2297         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2298                 return -EIO;
2299
2300         return 0;
2301 }
2302
2303 static int
2304 bnx2_init_5709_context(struct bnx2 *bp)
2305 {
2306         int i, ret = 0;
2307         u32 val;
2308
2309         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2310         val |= (BCM_PAGE_BITS - 8) << 16;
2311         REG_WR(bp, BNX2_CTX_COMMAND, val);
2312         for (i = 0; i < 10; i++) {
2313                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2314                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2315                         break;
2316                 udelay(2);
2317         }
2318         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2319                 return -EBUSY;
2320
2321         for (i = 0; i < bp->ctx_pages; i++) {
2322                 int j;
2323
2324                 if (bp->ctx_blk[i])
2325                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2326                 else
2327                         return -ENOMEM;
2328
2329                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2330                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2331                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2332                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2333                        (u64) bp->ctx_blk_mapping[i] >> 32);
2334                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2335                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2336                 for (j = 0; j < 10; j++) {
2337
2338                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2339                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2340                                 break;
2341                         udelay(5);
2342                 }
2343                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2344                         ret = -EBUSY;
2345                         break;
2346                 }
2347         }
2348         return ret;
2349 }
2350
2351 static void
2352 bnx2_init_context(struct bnx2 *bp)
2353 {
2354         u32 vcid;
2355
2356         vcid = 96;
2357         while (vcid) {
2358                 u32 vcid_addr, pcid_addr, offset;
2359                 int i;
2360
2361                 vcid--;
2362
2363                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2364                         u32 new_vcid;
2365
2366                         vcid_addr = GET_PCID_ADDR(vcid);
2367                         if (vcid & 0x8) {
2368                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2369                         }
2370                         else {
2371                                 new_vcid = vcid;
2372                         }
2373                         pcid_addr = GET_PCID_ADDR(new_vcid);
2374                 }
2375                 else {
2376                         vcid_addr = GET_CID_ADDR(vcid);
2377                         pcid_addr = vcid_addr;
2378                 }
2379
2380                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2381                         vcid_addr += (i << PHY_CTX_SHIFT);
2382                         pcid_addr += (i << PHY_CTX_SHIFT);
2383
2384                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2385                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2386
2387                         /* Zero out the context. */
2388                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2389                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2390                 }
2391         }
2392 }
2393
2394 static int
2395 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2396 {
2397         u16 *good_mbuf;
2398         u32 good_mbuf_cnt;
2399         u32 val;
2400
2401         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2402         if (good_mbuf == NULL) {
2403                 printk(KERN_ERR PFX "Failed to allocate memory in "
2404                                     "bnx2_alloc_bad_rbuf\n");
2405                 return -ENOMEM;
2406         }
2407
2408         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2409                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2410
2411         good_mbuf_cnt = 0;
2412
2413         /* Allocate a bunch of mbufs and save the good ones in an array. */
2414         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2415         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2416                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2417                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2418
2419                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2420
2421                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2422
2423                 /* The addresses with Bit 9 set are bad memory blocks. */
2424                 if (!(val & (1 << 9))) {
2425                         good_mbuf[good_mbuf_cnt] = (u16) val;
2426                         good_mbuf_cnt++;
2427                 }
2428
2429                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2430         }
2431
2432         /* Free the good ones back to the mbuf pool thus discarding
2433          * all the bad ones. */
2434         while (good_mbuf_cnt) {
2435                 good_mbuf_cnt--;
2436
2437                 val = good_mbuf[good_mbuf_cnt];
2438                 val = (val << 9) | val | 1;
2439
2440                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2441         }
2442         kfree(good_mbuf);
2443         return 0;
2444 }
2445
2446 static void
2447 bnx2_set_mac_addr(struct bnx2 *bp)
2448 {
2449         u32 val;
2450         u8 *mac_addr = bp->dev->dev_addr;
2451
2452         val = (mac_addr[0] << 8) | mac_addr[1];
2453
2454         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2455
2456         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2457                 (mac_addr[4] << 8) | mac_addr[5];
2458
2459         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2460 }
2461
2462 static inline int
2463 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2464 {
2465         dma_addr_t mapping;
2466         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2467         struct rx_bd *rxbd =
2468                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2469         struct page *page = alloc_page(GFP_ATOMIC);
2470
2471         if (!page)
2472                 return -ENOMEM;
2473         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2474                                PCI_DMA_FROMDEVICE);
2475         rx_pg->page = page;
2476         pci_unmap_addr_set(rx_pg, mapping, mapping);
2477         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2478         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2479         return 0;
2480 }
2481
2482 static void
2483 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2484 {
2485         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2486         struct page *page = rx_pg->page;
2487
2488         if (!page)
2489                 return;
2490
2491         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2492                        PCI_DMA_FROMDEVICE);
2493
2494         __free_page(page);
2495         rx_pg->page = NULL;
2496 }
2497
2498 static inline int
2499 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2500 {
2501         struct sk_buff *skb;
2502         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2503         dma_addr_t mapping;
2504         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2505         unsigned long align;
2506
2507         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2508         if (skb == NULL) {
2509                 return -ENOMEM;
2510         }
2511
2512         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2513                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2514
2515         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2516                 PCI_DMA_FROMDEVICE);
2517
2518         rx_buf->skb = skb;
2519         pci_unmap_addr_set(rx_buf, mapping, mapping);
2520
2521         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2522         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2523
2524         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2525
2526         return 0;
2527 }
2528
2529 static int
2530 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2531 {
2532         struct status_block *sblk = bnapi->status_blk.msi;
2533         u32 new_link_state, old_link_state;
2534         int is_set = 1;
2535
2536         new_link_state = sblk->status_attn_bits & event;
2537         old_link_state = sblk->status_attn_bits_ack & event;
2538         if (new_link_state != old_link_state) {
2539                 if (new_link_state)
2540                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2541                 else
2542                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2543         } else
2544                 is_set = 0;
2545
2546         return is_set;
2547 }
2548
2549 static void
2550 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2551 {
2552         spin_lock(&bp->phy_lock);
2553
2554         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2555                 bnx2_set_link(bp);
2556         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2557                 bnx2_set_remote_link(bp);
2558
2559         spin_unlock(&bp->phy_lock);
2560
2561 }
2562
2563 static inline u16
2564 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2565 {
2566         u16 cons;
2567
2568         /* Tell compiler that status block fields can change. */
2569         barrier();
2570         cons = *bnapi->hw_tx_cons_ptr;
2571         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2572                 cons++;
2573         return cons;
2574 }
2575
2576 static int
2577 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2578 {
2579         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2580         u16 hw_cons, sw_cons, sw_ring_cons;
2581         int tx_pkt = 0;
2582
2583         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2584         sw_cons = txr->tx_cons;
2585
2586         while (sw_cons != hw_cons) {
2587                 struct sw_bd *tx_buf;
2588                 struct sk_buff *skb;
2589                 int i, last;
2590
2591                 sw_ring_cons = TX_RING_IDX(sw_cons);
2592
2593                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2594                 skb = tx_buf->skb;
2595
2596                 /* partial BD completions possible with TSO packets */
2597                 if (skb_is_gso(skb)) {
2598                         u16 last_idx, last_ring_idx;
2599
2600                         last_idx = sw_cons +
2601                                 skb_shinfo(skb)->nr_frags + 1;
2602                         last_ring_idx = sw_ring_cons +
2603                                 skb_shinfo(skb)->nr_frags + 1;
2604                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2605                                 last_idx++;
2606                         }
2607                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2608                                 break;
2609                         }
2610                 }
2611
2612                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2613                         skb_headlen(skb), PCI_DMA_TODEVICE);
2614
2615                 tx_buf->skb = NULL;
2616                 last = skb_shinfo(skb)->nr_frags;
2617
2618                 for (i = 0; i < last; i++) {
2619                         sw_cons = NEXT_TX_BD(sw_cons);
2620
2621                         pci_unmap_page(bp->pdev,
2622                                 pci_unmap_addr(
2623                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2624                                         mapping),
2625                                 skb_shinfo(skb)->frags[i].size,
2626                                 PCI_DMA_TODEVICE);
2627                 }
2628
2629                 sw_cons = NEXT_TX_BD(sw_cons);
2630
2631                 dev_kfree_skb(skb);
2632                 tx_pkt++;
2633                 if (tx_pkt == budget)
2634                         break;
2635
2636                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2637         }
2638
2639         txr->hw_tx_cons = hw_cons;
2640         txr->tx_cons = sw_cons;
2641         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2642          * before checking for netif_queue_stopped().  Without the
2643          * memory barrier, there is a small possibility that bnx2_start_xmit()
2644          * will miss it and cause the queue to be stopped forever.
2645          */
2646         smp_mb();
2647
2648         if (unlikely(netif_queue_stopped(bp->dev)) &&
2649                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2650                 netif_tx_lock(bp->dev);
2651                 if ((netif_queue_stopped(bp->dev)) &&
2652                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2653                         netif_wake_queue(bp->dev);
2654                 netif_tx_unlock(bp->dev);
2655         }
2656         return tx_pkt;
2657 }
2658
2659 static void
2660 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2661                         struct sk_buff *skb, int count)
2662 {
2663         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2664         struct rx_bd *cons_bd, *prod_bd;
2665         dma_addr_t mapping;
2666         int i;
2667         u16 hw_prod = rxr->rx_pg_prod, prod;
2668         u16 cons = rxr->rx_pg_cons;
2669
2670         for (i = 0; i < count; i++) {
2671                 prod = RX_PG_RING_IDX(hw_prod);
2672
2673                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2674                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2675                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2676                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2677
2678                 if (i == 0 && skb) {
2679                         struct page *page;
2680                         struct skb_shared_info *shinfo;
2681
2682                         shinfo = skb_shinfo(skb);
2683                         shinfo->nr_frags--;
2684                         page = shinfo->frags[shinfo->nr_frags].page;
2685                         shinfo->frags[shinfo->nr_frags].page = NULL;
2686                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2687                                                PCI_DMA_FROMDEVICE);
2688                         cons_rx_pg->page = page;
2689                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2690                         dev_kfree_skb(skb);
2691                 }
2692                 if (prod != cons) {
2693                         prod_rx_pg->page = cons_rx_pg->page;
2694                         cons_rx_pg->page = NULL;
2695                         pci_unmap_addr_set(prod_rx_pg, mapping,
2696                                 pci_unmap_addr(cons_rx_pg, mapping));
2697
2698                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2699                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2700
2701                 }
2702                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2703                 hw_prod = NEXT_RX_BD(hw_prod);
2704         }
2705         rxr->rx_pg_prod = hw_prod;
2706         rxr->rx_pg_cons = cons;
2707 }
2708
2709 static inline void
2710 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2711                   struct sk_buff *skb, u16 cons, u16 prod)
2712 {
2713         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2714         struct rx_bd *cons_bd, *prod_bd;
2715
2716         cons_rx_buf = &rxr->rx_buf_ring[cons];
2717         prod_rx_buf = &rxr->rx_buf_ring[prod];
2718
2719         pci_dma_sync_single_for_device(bp->pdev,
2720                 pci_unmap_addr(cons_rx_buf, mapping),
2721                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         prod_rx_buf->skb = skb;
2726
2727         if (cons == prod)
2728                 return;
2729
2730         pci_unmap_addr_set(prod_rx_buf, mapping,
2731                         pci_unmap_addr(cons_rx_buf, mapping));
2732
2733         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2734         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2735         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2737 }
2738
2739 static int
2740 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2741             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2742             u32 ring_idx)
2743 {
2744         int err;
2745         u16 prod = ring_idx & 0xffff;
2746
2747         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2748         if (unlikely(err)) {
2749                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2750                 if (hdr_len) {
2751                         unsigned int raw_len = len + 4;
2752                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2753
2754                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2755                 }
2756                 return err;
2757         }
2758
2759         skb_reserve(skb, BNX2_RX_OFFSET);
2760         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2761                          PCI_DMA_FROMDEVICE);
2762
2763         if (hdr_len == 0) {
2764                 skb_put(skb, len);
2765                 return 0;
2766         } else {
2767                 unsigned int i, frag_len, frag_size, pages;
2768                 struct sw_pg *rx_pg;
2769                 u16 pg_cons = rxr->rx_pg_cons;
2770                 u16 pg_prod = rxr->rx_pg_prod;
2771
2772                 frag_size = len + 4 - hdr_len;
2773                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2774                 skb_put(skb, hdr_len);
2775
2776                 for (i = 0; i < pages; i++) {
2777                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2778                         if (unlikely(frag_len <= 4)) {
2779                                 unsigned int tail = 4 - frag_len;
2780
2781                                 rxr->rx_pg_cons = pg_cons;
2782                                 rxr->rx_pg_prod = pg_prod;
2783                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2784                                                         pages - i);
2785                                 skb->len -= tail;
2786                                 if (i == 0) {
2787                                         skb->tail -= tail;
2788                                 } else {
2789                                         skb_frag_t *frag =
2790                                                 &skb_shinfo(skb)->frags[i - 1];
2791                                         frag->size -= tail;
2792                                         skb->data_len -= tail;
2793                                         skb->truesize -= tail;
2794                                 }
2795                                 return 0;
2796                         }
2797                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2798
2799                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2800                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2801
2802                         if (i == pages - 1)
2803                                 frag_len -= 4;
2804
2805                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2806                         rx_pg->page = NULL;
2807
2808                         err = bnx2_alloc_rx_page(bp, rxr,
2809                                                  RX_PG_RING_IDX(pg_prod));
2810                         if (unlikely(err)) {
2811                                 rxr->rx_pg_cons = pg_cons;
2812                                 rxr->rx_pg_prod = pg_prod;
2813                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2814                                                         pages - i);
2815                                 return err;
2816                         }
2817
2818                         frag_size -= frag_len;
2819                         skb->data_len += frag_len;
2820                         skb->truesize += frag_len;
2821                         skb->len += frag_len;
2822
2823                         pg_prod = NEXT_RX_BD(pg_prod);
2824                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2825                 }
2826                 rxr->rx_pg_prod = pg_prod;
2827                 rxr->rx_pg_cons = pg_cons;
2828         }
2829         return 0;
2830 }
2831
2832 static inline u16
2833 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2834 {
2835         u16 cons;
2836
2837         /* Tell compiler that status block fields can change. */
2838         barrier();
2839         cons = *bnapi->hw_rx_cons_ptr;
2840         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2841                 cons++;
2842         return cons;
2843 }
2844
2845 static int
2846 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2847 {
2848         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2849         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2850         struct l2_fhdr *rx_hdr;
2851         int rx_pkt = 0, pg_ring_used = 0;
2852
2853         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2854         sw_cons = rxr->rx_cons;
2855         sw_prod = rxr->rx_prod;
2856
2857         /* Memory barrier necessary as speculative reads of the rx
2858          * buffer can be ahead of the index in the status block
2859          */
2860         rmb();
2861         while (sw_cons != hw_cons) {
2862                 unsigned int len, hdr_len;
2863                 u32 status;
2864                 struct sw_bd *rx_buf;
2865                 struct sk_buff *skb;
2866                 dma_addr_t dma_addr;
2867
2868                 sw_ring_cons = RX_RING_IDX(sw_cons);
2869                 sw_ring_prod = RX_RING_IDX(sw_prod);
2870
2871                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2872                 skb = rx_buf->skb;
2873
2874                 rx_buf->skb = NULL;
2875
2876                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2877
2878                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2879                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2880                         PCI_DMA_FROMDEVICE);
2881
2882                 rx_hdr = (struct l2_fhdr *) skb->data;
2883                 len = rx_hdr->l2_fhdr_pkt_len;
2884
2885                 if ((status = rx_hdr->l2_fhdr_status) &
2886                         (L2_FHDR_ERRORS_BAD_CRC |
2887                         L2_FHDR_ERRORS_PHY_DECODE |
2888                         L2_FHDR_ERRORS_ALIGNMENT |
2889                         L2_FHDR_ERRORS_TOO_SHORT |
2890                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2891
2892                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2893                                           sw_ring_prod);
2894                         goto next_rx;
2895                 }
2896                 hdr_len = 0;
2897                 if (status & L2_FHDR_STATUS_SPLIT) {
2898                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2899                         pg_ring_used = 1;
2900                 } else if (len > bp->rx_jumbo_thresh) {
2901                         hdr_len = bp->rx_jumbo_thresh;
2902                         pg_ring_used = 1;
2903                 }
2904
2905                 len -= 4;
2906
2907                 if (len <= bp->rx_copy_thresh) {
2908                         struct sk_buff *new_skb;
2909
2910                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2911                         if (new_skb == NULL) {
2912                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2913                                                   sw_ring_prod);
2914                                 goto next_rx;
2915                         }
2916
2917                         /* aligned copy */
2918                         skb_copy_from_linear_data_offset(skb,
2919                                                          BNX2_RX_OFFSET - 2,
2920                                       new_skb->data, len + 2);
2921                         skb_reserve(new_skb, 2);
2922                         skb_put(new_skb, len);
2923
2924                         bnx2_reuse_rx_skb(bp, rxr, skb,
2925                                 sw_ring_cons, sw_ring_prod);
2926
2927                         skb = new_skb;
2928                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2929                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2930                         goto next_rx;
2931
2932                 skb->protocol = eth_type_trans(skb, bp->dev);
2933
2934                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2935                         (ntohs(skb->protocol) != 0x8100)) {
2936
2937                         dev_kfree_skb(skb);
2938                         goto next_rx;
2939
2940                 }
2941
2942                 skb->ip_summed = CHECKSUM_NONE;
2943                 if (bp->rx_csum &&
2944                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2945                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2946
2947                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2948                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2949                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2950                 }
2951
2952 #ifdef BCM_VLAN
2953                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2954                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2955                                 rx_hdr->l2_fhdr_vlan_tag);
2956                 }
2957                 else
2958 #endif
2959                         netif_receive_skb(skb);
2960
2961                 bp->dev->last_rx = jiffies;
2962                 rx_pkt++;
2963
2964 next_rx:
2965                 sw_cons = NEXT_RX_BD(sw_cons);
2966                 sw_prod = NEXT_RX_BD(sw_prod);
2967
2968                 if ((rx_pkt == budget))
2969                         break;
2970
2971                 /* Refresh hw_cons to see if there is new work */
2972                 if (sw_cons == hw_cons) {
2973                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2974                         rmb();
2975                 }
2976         }
2977         rxr->rx_cons = sw_cons;
2978         rxr->rx_prod = sw_prod;
2979
2980         if (pg_ring_used)
2981                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2982
2983         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2984
2985         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2986
2987         mmiowb();
2988
2989         return rx_pkt;
2990
2991 }
2992
2993 /* MSI ISR - The only difference between this and the INTx ISR
2994  * is that the MSI interrupt is always serviced.
2995  */
2996 static irqreturn_t
2997 bnx2_msi(int irq, void *dev_instance)
2998 {
2999         struct bnx2_napi *bnapi = dev_instance;
3000         struct bnx2 *bp = bnapi->bp;
3001         struct net_device *dev = bp->dev;
3002
3003         prefetch(bnapi->status_blk.msi);
3004         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3006                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3007
3008         /* Return here if interrupt is disabled. */
3009         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3010                 return IRQ_HANDLED;
3011
3012         netif_rx_schedule(dev, &bnapi->napi);
3013
3014         return IRQ_HANDLED;
3015 }
3016
3017 static irqreturn_t
3018 bnx2_msi_1shot(int irq, void *dev_instance)
3019 {
3020         struct bnx2_napi *bnapi = dev_instance;
3021         struct bnx2 *bp = bnapi->bp;
3022         struct net_device *dev = bp->dev;
3023
3024         prefetch(bnapi->status_blk.msi);
3025
3026         /* Return here if interrupt is disabled. */
3027         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3028                 return IRQ_HANDLED;
3029
3030         netif_rx_schedule(dev, &bnapi->napi);
3031
3032         return IRQ_HANDLED;
3033 }
3034
3035 static irqreturn_t
3036 bnx2_interrupt(int irq, void *dev_instance)
3037 {
3038         struct bnx2_napi *bnapi = dev_instance;
3039         struct bnx2 *bp = bnapi->bp;
3040         struct net_device *dev = bp->dev;
3041         struct status_block *sblk = bnapi->status_blk.msi;
3042
3043         /* When using INTx, it is possible for the interrupt to arrive
3044          * at the CPU before the status block posted prior to the
3045          * interrupt. Reading a register will flush the status block.
3046          * When using MSI, the MSI message will always complete after
3047          * the status block write.
3048          */
3049         if ((sblk->status_idx == bnapi->last_status_idx) &&
3050             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3051              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3052                 return IRQ_NONE;
3053
3054         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3055                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3056                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3057
3058         /* Read back to deassert IRQ immediately to avoid too many
3059          * spurious interrupts.
3060          */
3061         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3062
3063         /* Return here if interrupt is shared and is disabled. */
3064         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3065                 return IRQ_HANDLED;
3066
3067         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3068                 bnapi->last_status_idx = sblk->status_idx;
3069                 __netif_rx_schedule(dev, &bnapi->napi);
3070         }
3071
3072         return IRQ_HANDLED;
3073 }
3074
3075 static inline int
3076 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3077 {
3078         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3079         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3080
3081         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3082             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3083                 return 1;
3084         return 0;
3085 }
3086
3087 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3088                                  STATUS_ATTN_BITS_TIMER_ABORT)
3089
3090 static inline int
3091 bnx2_has_work(struct bnx2_napi *bnapi)
3092 {
3093         struct status_block *sblk = bnapi->status_blk.msi;
3094
3095         if (bnx2_has_fast_work(bnapi))
3096                 return 1;
3097
3098         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3099             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3100                 return 1;
3101
3102         return 0;
3103 }
3104
3105 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3106 {
3107         struct status_block *sblk = bnapi->status_blk.msi;
3108         u32 status_attn_bits = sblk->status_attn_bits;
3109         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3110
3111         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3112             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3113
3114                 bnx2_phy_int(bp, bnapi);
3115
3116                 /* This is needed to take care of transient status
3117                  * during link changes.
3118                  */
3119                 REG_WR(bp, BNX2_HC_COMMAND,
3120                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3121                 REG_RD(bp, BNX2_HC_COMMAND);
3122         }
3123 }
3124
3125 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3126                           int work_done, int budget)
3127 {
3128         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3129         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3130
3131         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3132                 bnx2_tx_int(bp, bnapi, 0);
3133
3134         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3135                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3136
3137         return work_done;
3138 }
3139
3140 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3141 {
3142         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3143         struct bnx2 *bp = bnapi->bp;
3144         int work_done = 0;
3145         struct status_block_msix *sblk = bnapi->status_blk.msix;
3146
3147         while (1) {
3148                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3149                 if (unlikely(work_done >= budget))
3150                         break;
3151
3152                 bnapi->last_status_idx = sblk->status_idx;
3153                 /* status idx must be read before checking for more work. */
3154                 rmb();
3155                 if (likely(!bnx2_has_fast_work(bnapi))) {
3156
3157                         netif_rx_complete(bp->dev, napi);
3158                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3159                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3160                                bnapi->last_status_idx);
3161                         break;
3162                 }
3163         }
3164         return work_done;
3165 }
3166
3167 static int bnx2_poll(struct napi_struct *napi, int budget)
3168 {
3169         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3170         struct bnx2 *bp = bnapi->bp;
3171         int work_done = 0;
3172         struct status_block *sblk = bnapi->status_blk.msi;
3173
3174         while (1) {
3175                 bnx2_poll_link(bp, bnapi);
3176
3177                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3178
3179                 if (unlikely(work_done >= budget))
3180                         break;
3181
3182                 /* bnapi->last_status_idx is used below to tell the hw how
3183                  * much work has been processed, so we must read it before
3184                  * checking for more work.
3185                  */
3186                 bnapi->last_status_idx = sblk->status_idx;
3187                 rmb();
3188                 if (likely(!bnx2_has_work(bnapi))) {
3189                         netif_rx_complete(bp->dev, napi);
3190                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3191                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3192                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3193                                        bnapi->last_status_idx);
3194                                 break;
3195                         }
3196                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3197                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3198                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3199                                bnapi->last_status_idx);
3200
3201                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3202                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3203                                bnapi->last_status_idx);
3204                         break;
3205                 }
3206         }
3207
3208         return work_done;
3209 }
3210
3211 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3212  * from set_multicast.
3213  */
3214 static void
3215 bnx2_set_rx_mode(struct net_device *dev)
3216 {
3217         struct bnx2 *bp = netdev_priv(dev);
3218         u32 rx_mode, sort_mode;
3219         int i;
3220
3221         spin_lock_bh(&bp->phy_lock);
3222
3223         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3224                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3225         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3226 #ifdef BCM_VLAN
3227         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3228                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3229 #else
3230         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3231                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3232 #endif
3233         if (dev->flags & IFF_PROMISC) {
3234                 /* Promiscuous mode. */
3235                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3236                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3237                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3238         }
3239         else if (dev->flags & IFF_ALLMULTI) {
3240                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3241                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3242                                0xffffffff);
3243                 }
3244                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3245         }
3246         else {
3247                 /* Accept one or more multicast(s). */
3248                 struct dev_mc_list *mclist;
3249                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3250                 u32 regidx;
3251                 u32 bit;
3252                 u32 crc;
3253
3254                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3255
3256                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3257                      i++, mclist = mclist->next) {
3258
3259                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3260                         bit = crc & 0xff;
3261                         regidx = (bit & 0xe0) >> 5;
3262                         bit &= 0x1f;
3263                         mc_filter[regidx] |= (1 << bit);
3264                 }
3265
3266                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3267                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3268                                mc_filter[i]);
3269                 }
3270
3271                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3272         }
3273
3274         if (rx_mode != bp->rx_mode) {
3275                 bp->rx_mode = rx_mode;
3276                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3277         }
3278
3279         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3280         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3281         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3282
3283         spin_unlock_bh(&bp->phy_lock);
3284 }
3285
3286 static void
3287 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3288         u32 rv2p_proc)
3289 {
3290         int i;
3291         u32 val;
3292
3293         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3294                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3295                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3296                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3297                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3298         }
3299
3300         for (i = 0; i < rv2p_code_len; i += 8) {
3301                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3302                 rv2p_code++;
3303                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3304                 rv2p_code++;
3305
3306                 if (rv2p_proc == RV2P_PROC1) {
3307                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3308                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3309                 }
3310                 else {
3311                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3312                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3313                 }
3314         }
3315
3316         /* Reset the processor, un-stall is done later. */
3317         if (rv2p_proc == RV2P_PROC1) {
3318                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3319         }
3320         else {
3321                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3322         }
3323 }
3324
3325 static int
3326 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3327 {
3328         u32 offset;
3329         u32 val;
3330         int rc;
3331
3332         /* Halt the CPU. */
3333         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3334         val |= cpu_reg->mode_value_halt;
3335         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3336         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3337
3338         /* Load the Text area. */
3339         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3340         if (fw->gz_text) {
3341                 int j;
3342
3343                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3344                                        fw->gz_text_len);
3345                 if (rc < 0)
3346                         return rc;
3347
3348                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3349                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3350                 }
3351         }
3352
3353         /* Load the Data area. */
3354         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3355         if (fw->data) {
3356                 int j;
3357
3358                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3359                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3360                 }
3361         }
3362
3363         /* Load the SBSS area. */
3364         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3365         if (fw->sbss_len) {
3366                 int j;
3367
3368                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3369                         bnx2_reg_wr_ind(bp, offset, 0);
3370                 }
3371         }
3372
3373         /* Load the BSS area. */
3374         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3375         if (fw->bss_len) {
3376                 int j;
3377
3378                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3379                         bnx2_reg_wr_ind(bp, offset, 0);
3380                 }
3381         }
3382
3383         /* Load the Read-Only area. */
3384         offset = cpu_reg->spad_base +
3385                 (fw->rodata_addr - cpu_reg->mips_view_base);
3386         if (fw->rodata) {
3387                 int j;
3388
3389                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3390                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3391                 }
3392         }
3393
3394         /* Clear the pre-fetch instruction. */
3395         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3396         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3397
3398         /* Start the CPU. */
3399         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3400         val &= ~cpu_reg->mode_value_halt;
3401         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3402         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3403
3404         return 0;
3405 }
3406
3407 static int
3408 bnx2_init_cpus(struct bnx2 *bp)
3409 {
3410         struct fw_info *fw;
3411         int rc, rv2p_len;
3412         void *text, *rv2p;
3413
3414         /* Initialize the RV2P processor. */
3415         text = vmalloc(FW_BUF_SIZE);
3416         if (!text)
3417                 return -ENOMEM;
3418         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3419                 rv2p = bnx2_xi_rv2p_proc1;
3420                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3421         } else {
3422                 rv2p = bnx2_rv2p_proc1;
3423                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3424         }
3425         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3426         if (rc < 0)
3427                 goto init_cpu_err;
3428
3429         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3430
3431         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3432                 rv2p = bnx2_xi_rv2p_proc2;
3433                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3434         } else {
3435                 rv2p = bnx2_rv2p_proc2;
3436                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3437         }
3438         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3439         if (rc < 0)
3440                 goto init_cpu_err;
3441
3442         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3443
3444         /* Initialize the RX Processor. */
3445         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446                 fw = &bnx2_rxp_fw_09;
3447         else
3448                 fw = &bnx2_rxp_fw_06;
3449
3450         fw->text = text;
3451         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3452         if (rc)
3453                 goto init_cpu_err;
3454
3455         /* Initialize the TX Processor. */
3456         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457                 fw = &bnx2_txp_fw_09;
3458         else
3459                 fw = &bnx2_txp_fw_06;
3460
3461         fw->text = text;
3462         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3463         if (rc)
3464                 goto init_cpu_err;
3465
3466         /* Initialize the TX Patch-up Processor. */
3467         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468                 fw = &bnx2_tpat_fw_09;
3469         else
3470                 fw = &bnx2_tpat_fw_06;
3471
3472         fw->text = text;
3473         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3474         if (rc)
3475                 goto init_cpu_err;
3476
3477         /* Initialize the Completion Processor. */
3478         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479                 fw = &bnx2_com_fw_09;
3480         else
3481                 fw = &bnx2_com_fw_06;
3482
3483         fw->text = text;
3484         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3485         if (rc)
3486                 goto init_cpu_err;
3487
3488         /* Initialize the Command Processor. */
3489         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3490                 fw = &bnx2_cp_fw_09;
3491         else
3492                 fw = &bnx2_cp_fw_06;
3493
3494         fw->text = text;
3495         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3496
3497 init_cpu_err:
3498         vfree(text);
3499         return rc;
3500 }
3501
3502 static int
3503 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3504 {
3505         u16 pmcsr;
3506
3507         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3508
3509         switch (state) {
3510         case PCI_D0: {
3511                 u32 val;
3512
3513                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3514                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3515                         PCI_PM_CTRL_PME_STATUS);
3516
3517                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3518                         /* delay required during transition out of D3hot */
3519                         msleep(20);
3520
3521                 val = REG_RD(bp, BNX2_EMAC_MODE);
3522                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3523                 val &= ~BNX2_EMAC_MODE_MPKT;
3524                 REG_WR(bp, BNX2_EMAC_MODE, val);
3525
3526                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3527                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3528                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3529                 break;
3530         }
3531         case PCI_D3hot: {
3532                 int i;
3533                 u32 val, wol_msg;
3534
3535                 if (bp->wol) {
3536                         u32 advertising;
3537                         u8 autoneg;
3538
3539                         autoneg = bp->autoneg;
3540                         advertising = bp->advertising;
3541
3542                         if (bp->phy_port == PORT_TP) {
3543                                 bp->autoneg = AUTONEG_SPEED;
3544                                 bp->advertising = ADVERTISED_10baseT_Half |
3545                                         ADVERTISED_10baseT_Full |
3546                                         ADVERTISED_100baseT_Half |
3547                                         ADVERTISED_100baseT_Full |
3548                                         ADVERTISED_Autoneg;
3549                         }
3550
3551                         spin_lock_bh(&bp->phy_lock);
3552                         bnx2_setup_phy(bp, bp->phy_port);
3553                         spin_unlock_bh(&bp->phy_lock);
3554
3555                         bp->autoneg = autoneg;
3556                         bp->advertising = advertising;
3557
3558                         bnx2_set_mac_addr(bp);
3559
3560                         val = REG_RD(bp, BNX2_EMAC_MODE);
3561
3562                         /* Enable port mode. */
3563                         val &= ~BNX2_EMAC_MODE_PORT;
3564                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3565                                BNX2_EMAC_MODE_ACPI_RCVD |
3566                                BNX2_EMAC_MODE_MPKT;
3567                         if (bp->phy_port == PORT_TP)
3568                                 val |= BNX2_EMAC_MODE_PORT_MII;
3569                         else {
3570                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3571                                 if (bp->line_speed == SPEED_2500)
3572                                         val |= BNX2_EMAC_MODE_25G_MODE;
3573                         }
3574
3575                         REG_WR(bp, BNX2_EMAC_MODE, val);
3576
3577                         /* receive all multicast */
3578                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3579                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3580                                        0xffffffff);
3581                         }
3582                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3583                                BNX2_EMAC_RX_MODE_SORT_MODE);
3584
3585                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3586                               BNX2_RPM_SORT_USER0_MC_EN;
3587                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3588                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3589                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3590                                BNX2_RPM_SORT_USER0_ENA);
3591
3592                         /* Need to enable EMAC and RPM for WOL. */
3593                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3594                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3595                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3596                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3597
3598                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3599                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3600                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3601
3602                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3603                 }
3604                 else {
3605                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3606                 }
3607
3608                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3609                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3610
3611                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3612                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3613                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3614
3615                         if (bp->wol)
3616                                 pmcsr |= 3;
3617                 }
3618                 else {
3619                         pmcsr |= 3;
3620                 }
3621                 if (bp->wol) {
3622                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3623                 }
3624                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3625                                       pmcsr);
3626
3627                 /* No more memory access after this point until
3628                  * device is brought back to D0.
3629                  */
3630                 udelay(50);
3631                 break;
3632         }
3633         default:
3634                 return -EINVAL;
3635         }
3636         return 0;
3637 }
3638
3639 static int
3640 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3641 {
3642         u32 val;
3643         int j;
3644
3645         /* Request access to the flash interface. */
3646         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3647         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3648                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3649                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3650                         break;
3651
3652                 udelay(5);
3653         }
3654
3655         if (j >= NVRAM_TIMEOUT_COUNT)
3656                 return -EBUSY;
3657
3658         return 0;
3659 }
3660
3661 static int
3662 bnx2_release_nvram_lock(struct bnx2 *bp)
3663 {
3664         int j;
3665         u32 val;
3666
3667         /* Relinquish nvram interface. */
3668         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3669
3670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3672                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3673                         break;
3674
3675                 udelay(5);
3676         }
3677
3678         if (j >= NVRAM_TIMEOUT_COUNT)
3679                 return -EBUSY;
3680
3681         return 0;
3682 }
3683
3684
3685 static int
3686 bnx2_enable_nvram_write(struct bnx2 *bp)
3687 {
3688         u32 val;
3689
3690         val = REG_RD(bp, BNX2_MISC_CFG);
3691         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3692
3693         if (bp->flash_info->flags & BNX2_NV_WREN) {
3694                 int j;
3695
3696                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3697                 REG_WR(bp, BNX2_NVM_COMMAND,
3698                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3699
3700                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701                         udelay(5);
3702
3703                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3704                         if (val & BNX2_NVM_COMMAND_DONE)
3705                                 break;
3706                 }
3707
3708                 if (j >= NVRAM_TIMEOUT_COUNT)
3709                         return -EBUSY;
3710         }
3711         return 0;
3712 }
3713
3714 static void
3715 bnx2_disable_nvram_write(struct bnx2 *bp)
3716 {
3717         u32 val;
3718
3719         val = REG_RD(bp, BNX2_MISC_CFG);
3720         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3721 }
3722
3723
3724 static void
3725 bnx2_enable_nvram_access(struct bnx2 *bp)
3726 {
3727         u32 val;
3728
3729         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730         /* Enable both bits, even on read. */
3731         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3733 }
3734
3735 static void
3736 bnx2_disable_nvram_access(struct bnx2 *bp)
3737 {
3738         u32 val;
3739
3740         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3741         /* Disable both bits, even after read. */
3742         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3743                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3744                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3745 }
3746
3747 static int
3748 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3749 {
3750         u32 cmd;
3751         int j;
3752
3753         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3754                 /* Buffered flash, no erase needed */
3755                 return 0;
3756
3757         /* Build an erase command */
3758         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3759               BNX2_NVM_COMMAND_DOIT;
3760
3761         /* Need to clear DONE bit separately. */
3762         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3763
3764         /* Address of the NVRAM to read from. */
3765         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3766
3767         /* Issue an erase command. */
3768         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3769
3770         /* Wait for completion. */
3771         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3772                 u32 val;
3773
3774                 udelay(5);
3775
3776                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3777                 if (val & BNX2_NVM_COMMAND_DONE)
3778                         break;
3779         }
3780
3781         if (j >= NVRAM_TIMEOUT_COUNT)
3782                 return -EBUSY;
3783
3784         return 0;
3785 }
3786
3787 static int
3788 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3789 {
3790         u32 cmd;
3791         int j;
3792
3793         /* Build the command word. */
3794         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3795
3796         /* Calculate an offset of a buffered flash, not needed for 5709. */
3797         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3798                 offset = ((offset / bp->flash_info->page_size) <<
3799                            bp->flash_info->page_bits) +
3800                           (offset % bp->flash_info->page_size);
3801         }
3802
3803         /* Need to clear DONE bit separately. */
3804         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3805
3806         /* Address of the NVRAM to read from. */
3807         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3808
3809         /* Issue a read command. */
3810         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3811
3812         /* Wait for completion. */
3813         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3814                 u32 val;
3815
3816                 udelay(5);
3817
3818                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3819                 if (val & BNX2_NVM_COMMAND_DONE) {
3820                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3821                         memcpy(ret_val, &v, 4);
3822                         break;
3823                 }
3824         }
3825         if (j >= NVRAM_TIMEOUT_COUNT)
3826                 return -EBUSY;
3827
3828         return 0;
3829 }
3830
3831
3832 static int
3833 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3834 {
3835         u32 cmd;
3836         __be32 val32;
3837         int j;
3838
3839         /* Build the command word. */
3840         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3841
3842         /* Calculate an offset of a buffered flash, not needed for 5709. */
3843         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3844                 offset = ((offset / bp->flash_info->page_size) <<
3845                           bp->flash_info->page_bits) +
3846                          (offset % bp->flash_info->page_size);
3847         }
3848
3849         /* Need to clear DONE bit separately. */
3850         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3851
3852         memcpy(&val32, val, 4);
3853
3854         /* Write the data. */
3855         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3856
3857         /* Address of the NVRAM to write to. */
3858         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3859
3860         /* Issue the write command. */
3861         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3862
3863         /* Wait for completion. */
3864         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3865                 udelay(5);
3866
3867                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3868                         break;
3869         }
3870         if (j >= NVRAM_TIMEOUT_COUNT)
3871                 return -EBUSY;
3872
3873         return 0;
3874 }
3875
3876 static int
3877 bnx2_init_nvram(struct bnx2 *bp)
3878 {
3879         u32 val;
3880         int j, entry_count, rc = 0;
3881         struct flash_spec *flash;
3882
3883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884                 bp->flash_info = &flash_5709;
3885                 goto get_flash_size;
3886         }
3887
3888         /* Determine the selected interface. */
3889         val = REG_RD(bp, BNX2_NVM_CFG1);
3890
3891         entry_count = ARRAY_SIZE(flash_table);
3892
3893         if (val & 0x40000000) {
3894
3895                 /* Flash interface has been reconfigured */
3896                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3897                      j++, flash++) {
3898                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3899                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3900                                 bp->flash_info = flash;
3901                                 break;
3902                         }
3903                 }
3904         }
3905         else {
3906                 u32 mask;
3907                 /* Not yet been reconfigured */
3908
3909                 if (val & (1 << 23))
3910                         mask = FLASH_BACKUP_STRAP_MASK;
3911                 else
3912                         mask = FLASH_STRAP_MASK;
3913
3914                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3915                         j++, flash++) {
3916
3917                         if ((val & mask) == (flash->strapping & mask)) {
3918                                 bp->flash_info = flash;
3919
3920                                 /* Request access to the flash interface. */
3921                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3922                                         return rc;
3923
3924                                 /* Enable access to flash interface */
3925                                 bnx2_enable_nvram_access(bp);
3926
3927                                 /* Reconfigure the flash interface */
3928                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3929                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3930                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3931                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3932
3933                                 /* Disable access to flash interface */
3934                                 bnx2_disable_nvram_access(bp);
3935                                 bnx2_release_nvram_lock(bp);
3936
3937                                 break;
3938                         }
3939                 }
3940         } /* if (val & 0x40000000) */
3941
3942         if (j == entry_count) {
3943                 bp->flash_info = NULL;
3944                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3945                 return -ENODEV;
3946         }
3947
3948 get_flash_size:
3949         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3950         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3951         if (val)
3952                 bp->flash_size = val;
3953         else
3954                 bp->flash_size = bp->flash_info->total_size;
3955
3956         return rc;
3957 }
3958
3959 static int
3960 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3961                 int buf_size)
3962 {
3963         int rc = 0;
3964         u32 cmd_flags, offset32, len32, extra;
3965
3966         if (buf_size == 0)
3967                 return 0;
3968
3969         /* Request access to the flash interface. */
3970         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3971                 return rc;
3972
3973         /* Enable access to flash interface */
3974         bnx2_enable_nvram_access(bp);
3975
3976         len32 = buf_size;
3977         offset32 = offset;
3978         extra = 0;
3979
3980         cmd_flags = 0;
3981
3982         if (offset32 & 3) {
3983                 u8 buf[4];
3984                 u32 pre_len;
3985
3986                 offset32 &= ~3;
3987                 pre_len = 4 - (offset & 3);
3988
3989                 if (pre_len >= len32) {
3990                         pre_len = len32;
3991                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3992                                     BNX2_NVM_COMMAND_LAST;
3993                 }
3994                 else {
3995                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3996                 }
3997
3998                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3999
4000                 if (rc)
4001                         return rc;
4002
4003                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4004
4005                 offset32 += 4;
4006                 ret_buf += pre_len;
4007                 len32 -= pre_len;
4008         }
4009         if (len32 & 3) {
4010                 extra = 4 - (len32 & 3);
4011                 len32 = (len32 + 4) & ~3;
4012         }
4013
4014         if (len32 == 4) {
4015                 u8 buf[4];
4016
4017                 if (cmd_flags)
4018                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4019                 else
4020                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4021                                     BNX2_NVM_COMMAND_LAST;
4022
4023                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4024
4025                 memcpy(ret_buf, buf, 4 - extra);
4026         }
4027         else if (len32 > 0) {
4028                 u8 buf[4];
4029
4030                 /* Read the first word. */
4031                 if (cmd_flags)
4032                         cmd_flags = 0;
4033                 else
4034                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4035
4036                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4037
4038                 /* Advance to the next dword. */
4039                 offset32 += 4;
4040                 ret_buf += 4;
4041                 len32 -= 4;
4042
4043                 while (len32 > 4 && rc == 0) {
4044                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4045
4046                         /* Advance to the next dword. */
4047                         offset32 += 4;
4048                         ret_buf += 4;
4049                         len32 -= 4;
4050                 }
4051
4052                 if (rc)
4053                         return rc;
4054
4055                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4056                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4057
4058                 memcpy(ret_buf, buf, 4 - extra);
4059         }
4060
4061         /* Disable access to flash interface */
4062         bnx2_disable_nvram_access(bp);
4063
4064         bnx2_release_nvram_lock(bp);
4065
4066         return rc;
4067 }
4068
4069 static int
4070 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4071                 int buf_size)
4072 {
4073         u32 written, offset32, len32;
4074         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4075         int rc = 0;
4076         int align_start, align_end;
4077
4078         buf = data_buf;
4079         offset32 = offset;
4080         len32 = buf_size;
4081         align_start = align_end = 0;
4082
4083         if ((align_start = (offset32 & 3))) {
4084                 offset32 &= ~3;
4085                 len32 += align_start;
4086                 if (len32 < 4)
4087                         len32 = 4;
4088                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4089                         return rc;
4090         }
4091
4092         if (len32 & 3) {
4093                 align_end = 4 - (len32 & 3);
4094                 len32 += align_end;
4095                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4096                         return rc;
4097         }
4098
4099         if (align_start || align_end) {
4100                 align_buf = kmalloc(len32, GFP_KERNEL);
4101                 if (align_buf == NULL)
4102                         return -ENOMEM;
4103                 if (align_start) {
4104                         memcpy(align_buf, start, 4);
4105                 }
4106                 if (align_end) {
4107                         memcpy(align_buf + len32 - 4, end, 4);
4108                 }
4109                 memcpy(align_buf + align_start, data_buf, buf_size);
4110                 buf = align_buf;
4111         }
4112
4113         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4114                 flash_buffer = kmalloc(264, GFP_KERNEL);
4115                 if (flash_buffer == NULL) {
4116                         rc = -ENOMEM;
4117                         goto nvram_write_end;
4118                 }
4119         }
4120
4121         written = 0;
4122         while ((written < len32) && (rc == 0)) {
4123                 u32 page_start, page_end, data_start, data_end;
4124                 u32 addr, cmd_flags;
4125                 int i;
4126
4127                 /* Find the page_start addr */
4128                 page_start = offset32 + written;
4129                 page_start -= (page_start % bp->flash_info->page_size);
4130                 /* Find the page_end addr */
4131                 page_end = page_start + bp->flash_info->page_size;
4132                 /* Find the data_start addr */
4133                 data_start = (written == 0) ? offset32 : page_start;
4134                 /* Find the data_end addr */
4135                 data_end = (page_end > offset32 + len32) ?
4136                         (offset32 + len32) : page_end;
4137
4138                 /* Request access to the flash interface. */
4139                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4140                         goto nvram_write_end;
4141
4142                 /* Enable access to flash interface */
4143                 bnx2_enable_nvram_access(bp);
4144
4145                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4146                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4147                         int j;
4148
4149                         /* Read the whole page into the buffer
4150                          * (non-buffer flash only) */
4151                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4152                                 if (j == (bp->flash_info->page_size - 4)) {
4153                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4154                                 }
4155                                 rc = bnx2_nvram_read_dword(bp,
4156                                         page_start + j,
4157                                         &flash_buffer[j],
4158                                         cmd_flags);
4159
4160                                 if (rc)
4161                                         goto nvram_write_end;
4162
4163                                 cmd_flags = 0;
4164                         }
4165                 }
4166
4167                 /* Enable writes to flash interface (unlock write-protect) */
4168                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4169                         goto nvram_write_end;
4170
4171                 /* Loop to write back the buffer data from page_start to
4172                  * data_start */
4173                 i = 0;
4174                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4175                         /* Erase the page */
4176                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4177                                 goto nvram_write_end;
4178
4179                         /* Re-enable the write again for the actual write */
4180                         bnx2_enable_nvram_write(bp);
4181
4182                         for (addr = page_start; addr < data_start;
4183                                 addr += 4, i += 4) {
4184
4185                                 rc = bnx2_nvram_write_dword(bp, addr,
4186                                         &flash_buffer[i], cmd_flags);
4187
4188                                 if (rc != 0)
4189                                         goto nvram_write_end;
4190
4191                                 cmd_flags = 0;
4192                         }
4193                 }
4194
4195                 /* Loop to write the new data from data_start to data_end */
4196                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4197                         if ((addr == page_end - 4) ||
4198                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4199                                  (addr == data_end - 4))) {
4200
4201                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4202                         }
4203                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4204                                 cmd_flags);
4205
4206                         if (rc != 0)
4207                                 goto nvram_write_end;
4208
4209                         cmd_flags = 0;
4210                         buf += 4;
4211                 }
4212
4213                 /* Loop to write back the buffer data from data_end
4214                  * to page_end */
4215                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4216                         for (addr = data_end; addr < page_end;
4217                                 addr += 4, i += 4) {
4218
4219                                 if (addr == page_end-4) {
4220                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4221                                 }
4222                                 rc = bnx2_nvram_write_dword(bp, addr,
4223                                         &flash_buffer[i], cmd_flags);
4224
4225                                 if (rc != 0)
4226                                         goto nvram_write_end;
4227
4228                                 cmd_flags = 0;
4229                         }
4230                 }
4231
4232                 /* Disable writes to flash interface (lock write-protect) */
4233                 bnx2_disable_nvram_write(bp);
4234
4235                 /* Disable access to flash interface */
4236                 bnx2_disable_nvram_access(bp);
4237                 bnx2_release_nvram_lock(bp);
4238
4239                 /* Increment written */
4240                 written += data_end - data_start;
4241         }
4242
4243 nvram_write_end:
4244         kfree(flash_buffer);
4245         kfree(align_buf);
4246         return rc;
4247 }
4248
4249 static void
4250 bnx2_init_remote_phy(struct bnx2 *bp)
4251 {
4252         u32 val;
4253
4254         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4255         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4256                 return;
4257
4258         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4259         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4260                 return;
4261
4262         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4263                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4264
4265                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4266                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4267                         bp->phy_port = PORT_FIBRE;
4268                 else
4269                         bp->phy_port = PORT_TP;
4270
4271                 if (netif_running(bp->dev)) {
4272                         u32 sig;
4273
4274                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4275                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4276                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4277                 }
4278         }
4279 }
4280
4281 static void
4282 bnx2_setup_msix_tbl(struct bnx2 *bp)
4283 {
4284         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4285
4286         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4287         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4288 }
4289
4290 static int
4291 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4292 {
4293         u32 val;
4294         int i, rc = 0;
4295         u8 old_port;
4296
4297         /* Wait for the current PCI transaction to complete before
4298          * issuing a reset. */
4299         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4300                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4301                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4302                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4303                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4304         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4305         udelay(5);
4306
4307         /* Wait for the firmware to tell us it is ok to issue a reset. */
4308         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4309
4310         /* Deposit a driver reset signature so the firmware knows that
4311          * this is a soft reset. */
4312         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4313                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4314
4315         /* Do a dummy read to force the chip to complete all current transaction
4316          * before we issue a reset. */
4317         val = REG_RD(bp, BNX2_MISC_ID);
4318
4319         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4320                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4321                 REG_RD(bp, BNX2_MISC_COMMAND);
4322                 udelay(5);
4323
4324                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4325                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4326
4327                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4328
4329         } else {
4330                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4331                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4332                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4333
4334                 /* Chip reset. */
4335                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4336
4337                 /* Reading back any register after chip reset will hang the
4338                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4339                  * of margin for write posting.
4340                  */
4341                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4342                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4343                         msleep(20);
4344
4345                 /* Reset takes approximate 30 usec */
4346                 for (i = 0; i < 10; i++) {
4347                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4348                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4349                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4350                                 break;
4351                         udelay(10);
4352                 }
4353
4354                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4355                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4356                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4357                         return -EBUSY;
4358                 }
4359         }
4360
4361         /* Make sure byte swapping is properly configured. */
4362         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4363         if (val != 0x01020304) {
4364                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4365                 return -ENODEV;
4366         }
4367
4368         /* Wait for the firmware to finish its initialization. */
4369         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4370         if (rc)
4371                 return rc;
4372
4373         spin_lock_bh(&bp->phy_lock);
4374         old_port = bp->phy_port;
4375         bnx2_init_remote_phy(bp);
4376         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4377             old_port != bp->phy_port)
4378                 bnx2_set_default_remote_link(bp);
4379         spin_unlock_bh(&bp->phy_lock);
4380
4381         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4382                 /* Adjust the voltage regular to two steps lower.  The default
4383                  * of this register is 0x0000000e. */
4384                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4385
4386                 /* Remove bad rbuf memory from the free pool. */
4387                 rc = bnx2_alloc_bad_rbuf(bp);
4388         }
4389
4390         if (bp->flags & BNX2_FLAG_USING_MSIX)
4391                 bnx2_setup_msix_tbl(bp);
4392
4393         return rc;
4394 }
4395
4396 static int
4397 bnx2_init_chip(struct bnx2 *bp)
4398 {
4399         u32 val;
4400         int rc, i;
4401
4402         /* Make sure the interrupt is not active. */
4403         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4404
4405         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4406               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4407 #ifdef __BIG_ENDIAN
4408               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4409 #endif
4410               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4411               DMA_READ_CHANS << 12 |
4412               DMA_WRITE_CHANS << 16;
4413
4414         val |= (0x2 << 20) | (1 << 11);
4415
4416         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4417                 val |= (1 << 23);
4418
4419         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4420             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4421                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4422
4423         REG_WR(bp, BNX2_DMA_CONFIG, val);
4424
4425         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4426                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4427                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4428                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4429         }
4430
4431         if (bp->flags & BNX2_FLAG_PCIX) {
4432                 u16 val16;
4433
4434                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4435                                      &val16);
4436                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4437                                       val16 & ~PCI_X_CMD_ERO);
4438         }
4439
4440         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4441                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4442                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4443                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4444
4445         /* Initialize context mapping and zero out the quick contexts.  The
4446          * context block must have already been enabled. */
4447         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448                 rc = bnx2_init_5709_context(bp);
4449                 if (rc)
4450                         return rc;
4451         } else
4452                 bnx2_init_context(bp);
4453
4454         if ((rc = bnx2_init_cpus(bp)) != 0)
4455                 return rc;
4456
4457         bnx2_init_nvram(bp);
4458
4459         bnx2_set_mac_addr(bp);
4460
4461         val = REG_RD(bp, BNX2_MQ_CONFIG);
4462         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4463         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4464         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4465                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4466
4467         REG_WR(bp, BNX2_MQ_CONFIG, val);
4468
4469         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4470         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4471         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4472
4473         val = (BCM_PAGE_BITS - 8) << 24;
4474         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4475
4476         /* Configure page size. */
4477         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4478         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4479         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4480         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4481
4482         val = bp->mac_addr[0] +
4483               (bp->mac_addr[1] << 8) +
4484               (bp->mac_addr[2] << 16) +
4485               bp->mac_addr[3] +
4486               (bp->mac_addr[4] << 8) +
4487               (bp->mac_addr[5] << 16);
4488         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4489
4490         /* Program the MTU.  Also include 4 bytes for CRC32. */
4491         val = bp->dev->mtu + ETH_HLEN + 4;
4492         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4493                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4494         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4495
4496         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4497                 bp->bnx2_napi[i].last_status_idx = 0;
4498
4499         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4500
4501         /* Set up how to generate a link change interrupt. */
4502         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4503
4504         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4505                (u64) bp->status_blk_mapping & 0xffffffff);
4506         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4507
4508         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4509                (u64) bp->stats_blk_mapping & 0xffffffff);
4510         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4511                (u64) bp->stats_blk_mapping >> 32);
4512
4513         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4514                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4515
4516         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4517                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4518
4519         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4520                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4521
4522         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4523
4524         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4525
4526         REG_WR(bp, BNX2_HC_COM_TICKS,
4527                (bp->com_ticks_int << 16) | bp->com_ticks);
4528
4529         REG_WR(bp, BNX2_HC_CMD_TICKS,
4530                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4531
4532         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4533                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4534         else
4535                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4536         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4537
4538         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4539                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4540         else {
4541                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4542                       BNX2_HC_CONFIG_COLLECT_STATS;
4543         }
4544
4545         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4546                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4547                            BNX2_HC_SB_CONFIG_1;
4548
4549                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4550                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4551
4552                 REG_WR(bp, base,
4553                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4554                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4555
4556                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4557                         (bp->tx_quick_cons_trip_int << 16) |
4558                          bp->tx_quick_cons_trip);
4559
4560                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4561                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4562
4563                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4564         }
4565
4566         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4567                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4568
4569         REG_WR(bp, BNX2_HC_CONFIG, val);
4570
4571         /* Clear internal stats counters. */
4572         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4573
4574         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4575
4576         /* Initialize the receive filter. */
4577         bnx2_set_rx_mode(bp->dev);
4578
4579         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4580                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4581                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4582                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4583         }
4584         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4585                           0);
4586
4587         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4588         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4589
4590         udelay(20);
4591
4592         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4593
4594         return rc;
4595 }
4596
4597 static void
4598 bnx2_clear_ring_states(struct bnx2 *bp)
4599 {
4600         struct bnx2_napi *bnapi;
4601         struct bnx2_tx_ring_info *txr;
4602         struct bnx2_rx_ring_info *rxr;
4603         int i;
4604
4605         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4606                 bnapi = &bp->bnx2_napi[i];
4607                 txr = &bnapi->tx_ring;
4608                 rxr = &bnapi->rx_ring;
4609
4610                 txr->tx_cons = 0;
4611                 txr->hw_tx_cons = 0;
4612                 rxr->rx_prod_bseq = 0;
4613                 rxr->rx_prod = 0;
4614                 rxr->rx_cons = 0;
4615                 rxr->rx_pg_prod = 0;
4616                 rxr->rx_pg_cons = 0;
4617         }
4618 }
4619
4620 static void
4621 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4622 {
4623         u32 val, offset0, offset1, offset2, offset3;
4624         u32 cid_addr = GET_CID_ADDR(cid);
4625
4626         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4627                 offset0 = BNX2_L2CTX_TYPE_XI;
4628                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4629                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4630                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4631         } else {
4632                 offset0 = BNX2_L2CTX_TYPE;
4633                 offset1 = BNX2_L2CTX_CMD_TYPE;
4634                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4635                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4636         }
4637         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4638         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4639
4640         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4641         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4642
4643         val = (u64) txr->tx_desc_mapping >> 32;
4644         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4645
4646         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4647         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4648 }
4649
4650 static void
4651 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4652 {
4653         struct tx_bd *txbd;
4654         u32 cid = TX_CID;
4655         struct bnx2_napi *bnapi;
4656         struct bnx2_tx_ring_info *txr;
4657
4658         bnapi = &bp->bnx2_napi[ring_num];
4659         txr = &bnapi->tx_ring;
4660
4661         if (ring_num == 0)
4662                 cid = TX_CID;
4663         else
4664                 cid = TX_TSS_CID + ring_num - 1;
4665
4666         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4667
4668         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4669
4670         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4671         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4672
4673         txr->tx_prod = 0;
4674         txr->tx_prod_bseq = 0;
4675
4676         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4677         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4678
4679         bnx2_init_tx_context(bp, cid, txr);
4680 }
4681
4682 static void
4683 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4684                      int num_rings)
4685 {
4686         int i;
4687         struct rx_bd *rxbd;
4688
4689         for (i = 0; i < num_rings; i++) {
4690                 int j;
4691
4692                 rxbd = &rx_ring[i][0];
4693                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4694                         rxbd->rx_bd_len = buf_size;
4695                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4696                 }
4697                 if (i == (num_rings - 1))
4698                         j = 0;
4699                 else
4700                         j = i + 1;
4701                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4702                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4703         }
4704 }
4705
4706 static void
4707 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4708 {
4709         int i;
4710         u16 prod, ring_prod;
4711         u32 cid, rx_cid_addr, val;
4712         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4713         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4714
4715         if (ring_num == 0)
4716                 cid = RX_CID;
4717         else
4718                 cid = RX_RSS_CID + ring_num - 1;
4719
4720         rx_cid_addr = GET_CID_ADDR(cid);
4721
4722         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4723                              bp->rx_buf_use_size, bp->rx_max_ring);
4724
4725         bnx2_init_rx_context(bp, cid);
4726
4727         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4728                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4729                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4730         }
4731
4732         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4733         if (bp->rx_pg_ring_size) {
4734                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4735                                      rxr->rx_pg_desc_mapping,
4736                                      PAGE_SIZE, bp->rx_max_pg_ring);
4737                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4738                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4739                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4740                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4741
4742                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4743                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4744
4745                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4746                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4747
4748                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4749                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4750         }
4751
4752         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4753         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4754
4755         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4756         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4757
4758         ring_prod = prod = rxr->rx_pg_prod;
4759         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4760                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4761                         break;
4762                 prod = NEXT_RX_BD(prod);
4763                 ring_prod = RX_PG_RING_IDX(prod);
4764         }
4765         rxr->rx_pg_prod = prod;
4766
4767         ring_prod = prod = rxr->rx_prod;
4768         for (i = 0; i < bp->rx_ring_size; i++) {
4769                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4770                         break;
4771                 prod = NEXT_RX_BD(prod);
4772                 ring_prod = RX_RING_IDX(prod);
4773         }
4774         rxr->rx_prod = prod;
4775
4776         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4777         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4778         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4779
4780         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4781         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4782
4783         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4784 }
4785
4786 static void
4787 bnx2_init_all_rings(struct bnx2 *bp)
4788 {
4789         int i;
4790
4791         bnx2_clear_ring_states(bp);
4792
4793         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4794         for (i = 0; i < bp->num_tx_rings; i++)
4795                 bnx2_init_tx_ring(bp, i);
4796
4797         if (bp->num_tx_rings > 1)
4798                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4799                        (TX_TSS_CID << 7));
4800
4801         for (i = 0; i < bp->num_rx_rings; i++)
4802                 bnx2_init_rx_ring(bp, i);
4803 }
4804
4805 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4806 {
4807         u32 max, num_rings = 1;
4808
4809         while (ring_size > MAX_RX_DESC_CNT) {
4810                 ring_size -= MAX_RX_DESC_CNT;
4811                 num_rings++;
4812         }
4813         /* round to next power of 2 */
4814         max = max_size;
4815         while ((max & num_rings) == 0)
4816                 max >>= 1;
4817
4818         if (num_rings != max)
4819                 max <<= 1;
4820
4821         return max;
4822 }
4823
4824 static void
4825 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4826 {
4827         u32 rx_size, rx_space, jumbo_size;
4828
4829         /* 8 for CRC and VLAN */
4830         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4831
4832         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4833                 sizeof(struct skb_shared_info);
4834
4835         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4836         bp->rx_pg_ring_size = 0;
4837         bp->rx_max_pg_ring = 0;
4838         bp->rx_max_pg_ring_idx = 0;
4839         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4840                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4841
4842                 jumbo_size = size * pages;
4843                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4844                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4845
4846                 bp->rx_pg_ring_size = jumbo_size;
4847                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4848                                                         MAX_RX_PG_RINGS);
4849                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4850                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4851                 bp->rx_copy_thresh = 0;
4852         }
4853
4854         bp->rx_buf_use_size = rx_size;
4855         /* hw alignment */
4856         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4857         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4858         bp->rx_ring_size = size;
4859         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4860         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4861 }
4862
4863 static void
4864 bnx2_free_tx_skbs(struct bnx2 *bp)
4865 {
4866         int i;
4867
4868         for (i = 0; i < bp->num_tx_rings; i++) {
4869                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4870                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4871                 int j;
4872
4873                 if (txr->tx_buf_ring == NULL)
4874                         continue;
4875
4876                 for (j = 0; j < TX_DESC_CNT; ) {
4877                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4878                         struct sk_buff *skb = tx_buf->skb;
4879                         int k, last;
4880
4881                         if (skb == NULL) {
4882                                 j++;
4883                                 continue;
4884                         }
4885
4886                         pci_unmap_single(bp->pdev,
4887                                          pci_unmap_addr(tx_buf, mapping),
4888                         skb_headlen(skb), PCI_DMA_TODEVICE);
4889
4890                         tx_buf->skb = NULL;
4891
4892                         last = skb_shinfo(skb)->nr_frags;
4893                         for (k = 0; k < last; k++) {
4894                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4895                                 pci_unmap_page(bp->pdev,
4896                                         pci_unmap_addr(tx_buf, mapping),
4897                                         skb_shinfo(skb)->frags[j].size,
4898                                         PCI_DMA_TODEVICE);
4899                         }
4900                         dev_kfree_skb(skb);
4901                         j += k + 1;
4902                 }
4903         }
4904 }
4905
4906 static void
4907 bnx2_free_rx_skbs(struct bnx2 *bp)
4908 {
4909         int i;
4910
4911         for (i = 0; i < bp->num_rx_rings; i++) {
4912                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4913                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4914                 int j;
4915
4916                 if (rxr->rx_buf_ring == NULL)
4917                         return;
4918
4919                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4920                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4921                         struct sk_buff *skb = rx_buf->skb;
4922
4923                         if (skb == NULL)
4924                                 continue;
4925
4926                         pci_unmap_single(bp->pdev,
4927                                          pci_unmap_addr(rx_buf, mapping),
4928                                          bp->rx_buf_use_size,
4929                                          PCI_DMA_FROMDEVICE);
4930
4931                         rx_buf->skb = NULL;
4932
4933                         dev_kfree_skb(skb);
4934                 }
4935                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4936                         bnx2_free_rx_page(bp, rxr, j);
4937         }
4938 }
4939
4940 static void
4941 bnx2_free_skbs(struct bnx2 *bp)
4942 {
4943         bnx2_free_tx_skbs(bp);
4944         bnx2_free_rx_skbs(bp);
4945 }
4946
4947 static int
4948 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4949 {
4950         int rc;
4951
4952         rc = bnx2_reset_chip(bp, reset_code);
4953         bnx2_free_skbs(bp);
4954         if (rc)
4955                 return rc;
4956
4957         if ((rc = bnx2_init_chip(bp)) != 0)
4958                 return rc;
4959
4960         bnx2_init_all_rings(bp);
4961         return 0;
4962 }
4963
4964 static int
4965 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4966 {
4967         int rc;
4968
4969         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4970                 return rc;
4971
4972         spin_lock_bh(&bp->phy_lock);
4973         bnx2_init_phy(bp, reset_phy);
4974         bnx2_set_link(bp);
4975         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4976                 bnx2_remote_phy_event(bp);
4977         spin_unlock_bh(&bp->phy_lock);
4978         return 0;
4979 }
4980
4981 static int
4982 bnx2_test_registers(struct bnx2 *bp)
4983 {
4984         int ret;
4985         int i, is_5709;
4986         static const struct {
4987                 u16   offset;
4988                 u16   flags;
4989 #define BNX2_FL_NOT_5709        1
4990                 u32   rw_mask;
4991                 u32   ro_mask;
4992         } reg_tbl[] = {
4993                 { 0x006c, 0, 0x00000000, 0x0000003f },
4994                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4995                 { 0x0094, 0, 0x00000000, 0x00000000 },
4996
4997                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4998                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4999                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5000                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5001                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5002                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5003                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5004                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5005                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5006
5007                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5008                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5009                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5010                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5011                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5012                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5013
5014                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5015                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5016                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5017
5018                 { 0x1000, 0, 0x00000000, 0x00000001 },
5019                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5020
5021                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5022                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5023                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5024                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5025                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5026                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5027                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5028                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5029                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5030                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5031
5032                 { 0x1800, 0, 0x00000000, 0x00000001 },
5033                 { 0x1804, 0, 0x00000000, 0x00000003 },
5034
5035                 { 0x2800, 0, 0x00000000, 0x00000001 },
5036                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5037                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5038                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5039                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5040                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5041                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5042                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5043                 { 0x2840, 0, 0x00000000, 0xffffffff },
5044                 { 0x2844, 0, 0x00000000, 0xffffffff },
5045                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5046                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5047
5048                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5049                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5050
5051                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5052                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5053                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5054                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5055                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5056                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5057                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5058                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5059                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5060
5061                 { 0x5004, 0, 0x00000000, 0x0000007f },
5062                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5063
5064                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5065                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5066                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5067                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5068                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5069                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5070                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5071                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5072                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5073
5074                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5075                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5076                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5077                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5078                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5079                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5080                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5081                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5082                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5083                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5084                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5085                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5086                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5087                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5088                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5089                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5090                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5091                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5092                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5093                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5094                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5095                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5096                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5097
5098                 { 0xffff, 0, 0x00000000, 0x00000000 },
5099         };
5100
5101         ret = 0;
5102         is_5709 = 0;
5103         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5104                 is_5709 = 1;
5105
5106         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5107                 u32 offset, rw_mask, ro_mask, save_val, val;
5108                 u16 flags = reg_tbl[i].flags;
5109
5110                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5111                         continue;
5112
5113                 offset = (u32) reg_tbl[i].offset;
5114                 rw_mask = reg_tbl[i].rw_mask;
5115                 ro_mask = reg_tbl[i].ro_mask;
5116
5117                 save_val = readl(bp->regview + offset);
5118
5119                 writel(0, bp->regview + offset);
5120
5121                 val = readl(bp->regview + offset);
5122                 if ((val & rw_mask) != 0) {
5123                         goto reg_test_err;
5124                 }
5125
5126                 if ((val & ro_mask) != (save_val & ro_mask)) {
5127                         goto reg_test_err;
5128                 }
5129
5130                 writel(0xffffffff, bp->regview + offset);
5131
5132                 val = readl(bp->regview + offset);
5133                 if ((val & rw_mask) != rw_mask) {
5134                         goto reg_test_err;
5135                 }
5136
5137                 if ((val & ro_mask) != (save_val & ro_mask)) {
5138                         goto reg_test_err;
5139                 }
5140
5141                 writel(save_val, bp->regview + offset);
5142                 continue;
5143
5144 reg_test_err:
5145                 writel(save_val, bp->regview + offset);
5146                 ret = -ENODEV;
5147                 break;
5148         }
5149         return ret;
5150 }
5151
5152 static int
5153 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5154 {
5155         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5156                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5157         int i;
5158
5159         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5160                 u32 offset;
5161
5162                 for (offset = 0; offset < size; offset += 4) {
5163
5164                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5165
5166                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5167                                 test_pattern[i]) {
5168                                 return -ENODEV;
5169                         }
5170                 }
5171         }
5172         return 0;
5173 }
5174
5175 static int
5176 bnx2_test_memory(struct bnx2 *bp)
5177 {
5178         int ret = 0;
5179         int i;
5180         static struct mem_entry {
5181                 u32   offset;
5182                 u32   len;
5183         } mem_tbl_5706[] = {
5184                 { 0x60000,  0x4000 },
5185                 { 0xa0000,  0x3000 },
5186                 { 0xe0000,  0x4000 },
5187                 { 0x120000, 0x4000 },
5188                 { 0x1a0000, 0x4000 },
5189                 { 0x160000, 0x4000 },
5190                 { 0xffffffff, 0    },
5191         },
5192         mem_tbl_5709[] = {
5193                 { 0x60000,  0x4000 },
5194                 { 0xa0000,  0x3000 },
5195                 { 0xe0000,  0x4000 },
5196                 { 0x120000, 0x4000 },
5197                 { 0x1a0000, 0x4000 },
5198                 { 0xffffffff, 0    },
5199         };
5200         struct mem_entry *mem_tbl;
5201
5202         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5203                 mem_tbl = mem_tbl_5709;
5204         else
5205                 mem_tbl = mem_tbl_5706;
5206
5207         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5208                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5209                         mem_tbl[i].len)) != 0) {
5210                         return ret;
5211                 }
5212         }
5213
5214         return ret;
5215 }
5216
5217 #define BNX2_MAC_LOOPBACK       0
5218 #define BNX2_PHY_LOOPBACK       1
5219
5220 static int
5221 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5222 {
5223         unsigned int pkt_size, num_pkts, i;
5224         struct sk_buff *skb, *rx_skb;
5225         unsigned char *packet;
5226         u16 rx_start_idx, rx_idx;
5227         dma_addr_t map;
5228         struct tx_bd *txbd;
5229         struct sw_bd *rx_buf;
5230         struct l2_fhdr *rx_hdr;
5231         int ret = -ENODEV;
5232         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5233         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5234         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5235
5236         tx_napi = bnapi;
5237
5238         txr = &tx_napi->tx_ring;
5239         rxr = &bnapi->rx_ring;
5240         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5241                 bp->loopback = MAC_LOOPBACK;
5242                 bnx2_set_mac_loopback(bp);
5243         }
5244         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5245                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5246                         return 0;
5247
5248                 bp->loopback = PHY_LOOPBACK;
5249                 bnx2_set_phy_loopback(bp);
5250         }
5251         else
5252                 return -EINVAL;
5253
5254         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5255         skb = netdev_alloc_skb(bp->dev, pkt_size);
5256         if (!skb)
5257                 return -ENOMEM;
5258         packet = skb_put(skb, pkt_size);
5259         memcpy(packet, bp->dev->dev_addr, 6);
5260         memset(packet + 6, 0x0, 8);
5261         for (i = 14; i < pkt_size; i++)
5262                 packet[i] = (unsigned char) (i & 0xff);
5263
5264         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5265                 PCI_DMA_TODEVICE);
5266
5267         REG_WR(bp, BNX2_HC_COMMAND,
5268                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5269
5270         REG_RD(bp, BNX2_HC_COMMAND);
5271
5272         udelay(5);
5273         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5274
5275         num_pkts = 0;
5276
5277         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5278
5279         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5280         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5281         txbd->tx_bd_mss_nbytes = pkt_size;
5282         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5283
5284         num_pkts++;
5285         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5286         txr->tx_prod_bseq += pkt_size;
5287
5288         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5289         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5290
5291         udelay(100);
5292
5293         REG_WR(bp, BNX2_HC_COMMAND,
5294                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5295
5296         REG_RD(bp, BNX2_HC_COMMAND);
5297
5298         udelay(5);
5299
5300         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5301         dev_kfree_skb(skb);
5302
5303         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5304                 goto loopback_test_done;
5305
5306         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5307         if (rx_idx != rx_start_idx + num_pkts) {
5308                 goto loopback_test_done;
5309         }
5310
5311         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5312         rx_skb = rx_buf->skb;
5313
5314         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5315         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5316
5317         pci_dma_sync_single_for_cpu(bp->pdev,
5318                 pci_unmap_addr(rx_buf, mapping),
5319                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5320
5321         if (rx_hdr->l2_fhdr_status &
5322                 (L2_FHDR_ERRORS_BAD_CRC |
5323                 L2_FHDR_ERRORS_PHY_DECODE |
5324                 L2_FHDR_ERRORS_ALIGNMENT |
5325                 L2_FHDR_ERRORS_TOO_SHORT |
5326                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5327
5328                 goto loopback_test_done;
5329         }
5330
5331         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5332                 goto loopback_test_done;
5333         }
5334
5335         for (i = 14; i < pkt_size; i++) {
5336                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5337                         goto loopback_test_done;
5338                 }
5339         }
5340
5341         ret = 0;
5342
5343 loopback_test_done:
5344         bp->loopback = 0;
5345         return ret;
5346 }
5347
5348 #define BNX2_MAC_LOOPBACK_FAILED        1
5349 #define BNX2_PHY_LOOPBACK_FAILED        2
5350 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5351                                          BNX2_PHY_LOOPBACK_FAILED)
5352
5353 static int
5354 bnx2_test_loopback(struct bnx2 *bp)
5355 {
5356         int rc = 0;
5357
5358         if (!netif_running(bp->dev))
5359                 return BNX2_LOOPBACK_FAILED;
5360
5361         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5362         spin_lock_bh(&bp->phy_lock);
5363         bnx2_init_phy(bp, 1);
5364         spin_unlock_bh(&bp->phy_lock);
5365         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5366                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5367         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5368                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5369         return rc;
5370 }
5371
5372 #define NVRAM_SIZE 0x200
5373 #define CRC32_RESIDUAL 0xdebb20e3
5374
5375 static int
5376 bnx2_test_nvram(struct bnx2 *bp)
5377 {
5378         __be32 buf[NVRAM_SIZE / 4];
5379         u8 *data = (u8 *) buf;
5380         int rc = 0;
5381         u32 magic, csum;
5382
5383         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5384                 goto test_nvram_done;
5385
5386         magic = be32_to_cpu(buf[0]);
5387         if (magic != 0x669955aa) {
5388                 rc = -ENODEV;
5389                 goto test_nvram_done;
5390         }
5391
5392         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5393                 goto test_nvram_done;
5394
5395         csum = ether_crc_le(0x100, data);
5396         if (csum != CRC32_RESIDUAL) {
5397                 rc = -ENODEV;
5398                 goto test_nvram_done;
5399         }
5400
5401         csum = ether_crc_le(0x100, data + 0x100);
5402         if (csum != CRC32_RESIDUAL) {
5403                 rc = -ENODEV;
5404         }
5405
5406 test_nvram_done:
5407         return rc;
5408 }
5409
5410 static int
5411 bnx2_test_link(struct bnx2 *bp)
5412 {
5413         u32 bmsr;
5414
5415         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5416                 if (bp->link_up)
5417                         return 0;
5418                 return -ENODEV;
5419         }
5420         spin_lock_bh(&bp->phy_lock);
5421         bnx2_enable_bmsr1(bp);
5422         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5423         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5424         bnx2_disable_bmsr1(bp);
5425         spin_unlock_bh(&bp->phy_lock);
5426
5427         if (bmsr & BMSR_LSTATUS) {
5428                 return 0;
5429         }
5430         return -ENODEV;
5431 }
5432
5433 static int
5434 bnx2_test_intr(struct bnx2 *bp)
5435 {
5436         int i;
5437         u16 status_idx;
5438
5439         if (!netif_running(bp->dev))
5440                 return -ENODEV;
5441
5442         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5443
5444         /* This register is not touched during run-time. */
5445         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5446         REG_RD(bp, BNX2_HC_COMMAND);
5447
5448         for (i = 0; i < 10; i++) {
5449                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5450                         status_idx) {
5451
5452                         break;
5453                 }
5454
5455                 msleep_interruptible(10);
5456         }
5457         if (i < 10)
5458                 return 0;
5459
5460         return -ENODEV;
5461 }
5462
5463 /* Determining link for parallel detection. */
5464 static int
5465 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5466 {
5467         u32 mode_ctl, an_dbg, exp;
5468
5469         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5470                 return 0;
5471
5472         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5473         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5474
5475         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5476                 return 0;
5477
5478         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5479         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5480         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5481
5482         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5483                 return 0;
5484
5485         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5486         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5487         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5488
5489         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5490                 return 0;
5491
5492         return 1;
5493 }
5494
5495 static void
5496 bnx2_5706_serdes_timer(struct bnx2 *bp)
5497 {
5498         int check_link = 1;
5499
5500         spin_lock(&bp->phy_lock);
5501         if (bp->serdes_an_pending) {
5502                 bp->serdes_an_pending--;
5503                 check_link = 0;
5504         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5505                 u32 bmcr;
5506
5507                 bp->current_interval = bp->timer_interval;
5508
5509                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5510
5511                 if (bmcr & BMCR_ANENABLE) {
5512                         if (bnx2_5706_serdes_has_link(bp)) {
5513                                 bmcr &= ~BMCR_ANENABLE;
5514                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5515                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5516                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5517                         }
5518                 }
5519         }
5520         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5521                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5522                 u32 phy2;
5523
5524                 bnx2_write_phy(bp, 0x17, 0x0f01);
5525                 bnx2_read_phy(bp, 0x15, &phy2);
5526                 if (phy2 & 0x20) {
5527                         u32 bmcr;
5528
5529                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5530                         bmcr |= BMCR_ANENABLE;
5531                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5532
5533                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5534                 }
5535         } else
5536                 bp->current_interval = bp->timer_interval;
5537
5538         if (check_link) {
5539                 u32 val;
5540
5541                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5542                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5543                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5544
5545                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5546                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5547                                 bnx2_5706s_force_link_dn(bp, 1);
5548                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5549                         } else
5550                                 bnx2_set_link(bp);
5551                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5552                         bnx2_set_link(bp);
5553         }
5554         spin_unlock(&bp->phy_lock);
5555 }
5556
5557 static void
5558 bnx2_5708_serdes_timer(struct bnx2 *bp)
5559 {
5560         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5561                 return;
5562
5563         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5564                 bp->serdes_an_pending = 0;
5565                 return;
5566         }
5567
5568         spin_lock(&bp->phy_lock);
5569         if (bp->serdes_an_pending)
5570                 bp->serdes_an_pending--;
5571         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5572                 u32 bmcr;
5573
5574                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5575                 if (bmcr & BMCR_ANENABLE) {
5576                         bnx2_enable_forced_2g5(bp);
5577                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5578                 } else {
5579                         bnx2_disable_forced_2g5(bp);
5580                         bp->serdes_an_pending = 2;
5581                         bp->current_interval = bp->timer_interval;
5582                 }
5583
5584         } else
5585                 bp->current_interval = bp->timer_interval;
5586
5587         spin_unlock(&bp->phy_lock);
5588 }
5589
5590 static void
5591 bnx2_timer(unsigned long data)
5592 {
5593         struct bnx2 *bp = (struct bnx2 *) data;
5594
5595         if (!netif_running(bp->dev))
5596                 return;
5597
5598         if (atomic_read(&bp->intr_sem) != 0)
5599                 goto bnx2_restart_timer;
5600
5601         bnx2_send_heart_beat(bp);
5602
5603         bp->stats_blk->stat_FwRxDrop =
5604                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5605
5606         /* workaround occasional corrupted counters */
5607         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5608                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5609                                             BNX2_HC_COMMAND_STATS_NOW);
5610
5611         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5612                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5613                         bnx2_5706_serdes_timer(bp);
5614                 else
5615                         bnx2_5708_serdes_timer(bp);
5616         }
5617
5618 bnx2_restart_timer:
5619         mod_timer(&bp->timer, jiffies + bp->current_interval);
5620 }
5621
5622 static int
5623 bnx2_request_irq(struct bnx2 *bp)
5624 {
5625         unsigned long flags;
5626         struct bnx2_irq *irq;
5627         int rc = 0, i;
5628
5629         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5630                 flags = 0;
5631         else
5632                 flags = IRQF_SHARED;
5633
5634         for (i = 0; i < bp->irq_nvecs; i++) {
5635                 irq = &bp->irq_tbl[i];
5636                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5637                                  &bp->bnx2_napi[i]);
5638                 if (rc)
5639                         break;
5640                 irq->requested = 1;
5641         }
5642         return rc;
5643 }
5644
5645 static void
5646 bnx2_free_irq(struct bnx2 *bp)
5647 {
5648         struct bnx2_irq *irq;
5649         int i;
5650
5651         for (i = 0; i < bp->irq_nvecs; i++) {
5652                 irq = &bp->irq_tbl[i];
5653                 if (irq->requested)
5654                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5655                 irq->requested = 0;
5656         }
5657         if (bp->flags & BNX2_FLAG_USING_MSI)
5658                 pci_disable_msi(bp->pdev);
5659         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5660                 pci_disable_msix(bp->pdev);
5661
5662         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5663 }
5664
5665 static void
5666 bnx2_enable_msix(struct bnx2 *bp)
5667 {
5668         int i, rc;
5669         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5670
5671         bnx2_setup_msix_tbl(bp);
5672         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5673         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5674         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5675
5676         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5677                 msix_ent[i].entry = i;
5678                 msix_ent[i].vector = 0;
5679
5680                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5681                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5682         }
5683
5684         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5685         if (rc != 0)
5686                 return;
5687
5688         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5689         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5690         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5691                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5692 }
5693
5694 static void
5695 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5696 {
5697         bp->irq_tbl[0].handler = bnx2_interrupt;
5698         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5699         bp->irq_nvecs = 1;
5700         bp->irq_tbl[0].vector = bp->pdev->irq;
5701
5702         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5703                 bnx2_enable_msix(bp);
5704
5705         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5706             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5707                 if (pci_enable_msi(bp->pdev) == 0) {
5708                         bp->flags |= BNX2_FLAG_USING_MSI;
5709                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5710                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5711                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5712                         } else
5713                                 bp->irq_tbl[0].handler = bnx2_msi;
5714
5715                         bp->irq_tbl[0].vector = bp->pdev->irq;
5716                 }
5717         }
5718         bp->num_tx_rings = 1;
5719         bp->num_rx_rings = 1;
5720 }
5721
5722 /* Called with rtnl_lock */
5723 static int
5724 bnx2_open(struct net_device *dev)
5725 {
5726         struct bnx2 *bp = netdev_priv(dev);
5727         int rc;
5728
5729         netif_carrier_off(dev);
5730
5731         bnx2_set_power_state(bp, PCI_D0);
5732         bnx2_disable_int(bp);
5733
5734         bnx2_setup_int_mode(bp, disable_msi);
5735         bnx2_napi_enable(bp);
5736         rc = bnx2_alloc_mem(bp);
5737         if (rc) {
5738                 bnx2_napi_disable(bp);
5739                 bnx2_free_mem(bp);
5740                 return rc;
5741         }
5742
5743         rc = bnx2_request_irq(bp);
5744
5745         if (rc) {
5746                 bnx2_napi_disable(bp);
5747                 bnx2_free_mem(bp);
5748                 return rc;
5749         }
5750
5751         rc = bnx2_init_nic(bp, 1);
5752
5753         if (rc) {
5754                 bnx2_napi_disable(bp);
5755                 bnx2_free_irq(bp);
5756                 bnx2_free_skbs(bp);
5757                 bnx2_free_mem(bp);
5758                 return rc;
5759         }
5760
5761         mod_timer(&bp->timer, jiffies + bp->current_interval);
5762
5763         atomic_set(&bp->intr_sem, 0);
5764
5765         bnx2_enable_int(bp);
5766
5767         if (bp->flags & BNX2_FLAG_USING_MSI) {
5768                 /* Test MSI to make sure it is working
5769                  * If MSI test fails, go back to INTx mode
5770                  */
5771                 if (bnx2_test_intr(bp) != 0) {
5772                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5773                                " using MSI, switching to INTx mode. Please"
5774                                " report this failure to the PCI maintainer"
5775                                " and include system chipset information.\n",
5776                                bp->dev->name);
5777
5778                         bnx2_disable_int(bp);
5779                         bnx2_free_irq(bp);
5780
5781                         bnx2_setup_int_mode(bp, 1);
5782
5783                         rc = bnx2_init_nic(bp, 0);
5784
5785                         if (!rc)
5786                                 rc = bnx2_request_irq(bp);
5787
5788                         if (rc) {
5789                                 bnx2_napi_disable(bp);
5790                                 bnx2_free_skbs(bp);
5791                                 bnx2_free_mem(bp);
5792                                 del_timer_sync(&bp->timer);
5793                                 return rc;
5794                         }
5795                         bnx2_enable_int(bp);
5796                 }
5797         }
5798         if (bp->flags & BNX2_FLAG_USING_MSI)
5799                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5800         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5801                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5802
5803         netif_start_queue(dev);
5804
5805         return 0;
5806 }
5807
5808 static void
5809 bnx2_reset_task(struct work_struct *work)
5810 {
5811         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5812
5813         if (!netif_running(bp->dev))
5814                 return;
5815
5816         bnx2_netif_stop(bp);
5817
5818         bnx2_init_nic(bp, 1);
5819
5820         atomic_set(&bp->intr_sem, 1);
5821         bnx2_netif_start(bp);
5822 }
5823
5824 static void
5825 bnx2_tx_timeout(struct net_device *dev)
5826 {
5827         struct bnx2 *bp = netdev_priv(dev);
5828
5829         /* This allows the netif to be shutdown gracefully before resetting */
5830         schedule_work(&bp->reset_task);
5831 }
5832
5833 #ifdef BCM_VLAN
5834 /* Called with rtnl_lock */
5835 static void
5836 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5837 {
5838         struct bnx2 *bp = netdev_priv(dev);
5839
5840         bnx2_netif_stop(bp);
5841
5842         bp->vlgrp = vlgrp;
5843         bnx2_set_rx_mode(dev);
5844
5845         bnx2_netif_start(bp);
5846 }
5847 #endif
5848
5849 /* Called with netif_tx_lock.
5850  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5851  * netif_wake_queue().
5852  */
5853 static int
5854 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5855 {
5856         struct bnx2 *bp = netdev_priv(dev);
5857         dma_addr_t mapping;
5858         struct tx_bd *txbd;
5859         struct sw_bd *tx_buf;
5860         u32 len, vlan_tag_flags, last_frag, mss;
5861         u16 prod, ring_prod;
5862         int i;
5863         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5864         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5865
5866         if (unlikely(bnx2_tx_avail(bp, txr) <
5867             (skb_shinfo(skb)->nr_frags + 1))) {
5868                 netif_stop_queue(dev);
5869                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5870                         dev->name);
5871
5872                 return NETDEV_TX_BUSY;
5873         }
5874         len = skb_headlen(skb);
5875         prod = txr->tx_prod;
5876         ring_prod = TX_RING_IDX(prod);
5877
5878         vlan_tag_flags = 0;
5879         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5880                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5881         }
5882
5883         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5884                 vlan_tag_flags |=
5885                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5886         }
5887         if ((mss = skb_shinfo(skb)->gso_size)) {
5888                 u32 tcp_opt_len, ip_tcp_len;
5889                 struct iphdr *iph;
5890
5891                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5892
5893                 tcp_opt_len = tcp_optlen(skb);
5894
5895                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5896                         u32 tcp_off = skb_transport_offset(skb) -
5897                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5898
5899                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5900                                           TX_BD_FLAGS_SW_FLAGS;
5901                         if (likely(tcp_off == 0))
5902                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5903                         else {
5904                                 tcp_off >>= 3;
5905                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5906                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5907                                                   ((tcp_off & 0x10) <<
5908                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5909                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5910                         }
5911                 } else {
5912                         if (skb_header_cloned(skb) &&
5913                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5914                                 dev_kfree_skb(skb);
5915                                 return NETDEV_TX_OK;
5916                         }
5917
5918                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5919
5920                         iph = ip_hdr(skb);
5921                         iph->check = 0;
5922                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5923                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5924                                                                  iph->daddr, 0,
5925                                                                  IPPROTO_TCP,
5926                                                                  0);
5927                         if (tcp_opt_len || (iph->ihl > 5)) {
5928                                 vlan_tag_flags |= ((iph->ihl - 5) +
5929                                                    (tcp_opt_len >> 2)) << 8;
5930                         }
5931                 }
5932         } else
5933                 mss = 0;
5934
5935         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5936
5937         tx_buf = &txr->tx_buf_ring[ring_prod];
5938         tx_buf->skb = skb;
5939         pci_unmap_addr_set(tx_buf, mapping, mapping);
5940
5941         txbd = &txr->tx_desc_ring[ring_prod];
5942
5943         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5944         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5945         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5946         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5947
5948         last_frag = skb_shinfo(skb)->nr_frags;
5949
5950         for (i = 0; i < last_frag; i++) {
5951                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5952
5953                 prod = NEXT_TX_BD(prod);
5954                 ring_prod = TX_RING_IDX(prod);
5955                 txbd = &txr->tx_desc_ring[ring_prod];
5956
5957                 len = frag->size;
5958                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5959                         len, PCI_DMA_TODEVICE);
5960                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5961                                 mapping, mapping);
5962
5963                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5964                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5965                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5966                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5967
5968         }
5969         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5970
5971         prod = NEXT_TX_BD(prod);
5972         txr->tx_prod_bseq += skb->len;
5973
5974         REG_WR16(bp, txr->tx_bidx_addr, prod);
5975         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5976
5977         mmiowb();
5978
5979         txr->tx_prod = prod;
5980         dev->trans_start = jiffies;
5981
5982         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5983                 netif_stop_queue(dev);
5984                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5985                         netif_wake_queue(dev);
5986         }
5987
5988         return NETDEV_TX_OK;
5989 }
5990
5991 /* Called with rtnl_lock */
5992 static int
5993 bnx2_close(struct net_device *dev)
5994 {
5995         struct bnx2 *bp = netdev_priv(dev);
5996         u32 reset_code;
5997
5998         cancel_work_sync(&bp->reset_task);
5999
6000         bnx2_disable_int_sync(bp);
6001         bnx2_napi_disable(bp);
6002         del_timer_sync(&bp->timer);
6003         if (bp->flags & BNX2_FLAG_NO_WOL)
6004                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6005         else if (bp->wol)
6006                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6007         else
6008                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6009         bnx2_reset_chip(bp, reset_code);
6010         bnx2_free_irq(bp);
6011         bnx2_free_skbs(bp);
6012         bnx2_free_mem(bp);
6013         bp->link_up = 0;
6014         netif_carrier_off(bp->dev);
6015         bnx2_set_power_state(bp, PCI_D3hot);
6016         return 0;
6017 }
6018
6019 #define GET_NET_STATS64(ctr)                                    \
6020         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6021         (unsigned long) (ctr##_lo)
6022
6023 #define GET_NET_STATS32(ctr)            \
6024         (ctr##_lo)
6025
6026 #if (BITS_PER_LONG == 64)
6027 #define GET_NET_STATS   GET_NET_STATS64
6028 #else
6029 #define GET_NET_STATS   GET_NET_STATS32
6030 #endif
6031
6032 static struct net_device_stats *
6033 bnx2_get_stats(struct net_device *dev)
6034 {
6035         struct bnx2 *bp = netdev_priv(dev);
6036         struct statistics_block *stats_blk = bp->stats_blk;
6037         struct net_device_stats *net_stats = &bp->net_stats;
6038
6039         if (bp->stats_blk == NULL) {
6040                 return net_stats;
6041         }
6042         net_stats->rx_packets =
6043                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6044                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6045                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6046
6047         net_stats->tx_packets =
6048                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6049                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6050                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6051
6052         net_stats->rx_bytes =
6053                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6054
6055         net_stats->tx_bytes =
6056                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6057
6058         net_stats->multicast =
6059                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6060
6061         net_stats->collisions =
6062                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6063
6064         net_stats->rx_length_errors =
6065                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6066                 stats_blk->stat_EtherStatsOverrsizePkts);
6067
6068         net_stats->rx_over_errors =
6069                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6070
6071         net_stats->rx_frame_errors =
6072                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6073
6074         net_stats->rx_crc_errors =
6075                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6076
6077         net_stats->rx_errors = net_stats->rx_length_errors +
6078                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6079                 net_stats->rx_crc_errors;
6080
6081         net_stats->tx_aborted_errors =
6082                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6083                 stats_blk->stat_Dot3StatsLateCollisions);
6084
6085         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6086             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6087                 net_stats->tx_carrier_errors = 0;
6088         else {
6089                 net_stats->tx_carrier_errors =
6090                         (unsigned long)
6091                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6092         }
6093
6094         net_stats->tx_errors =
6095                 (unsigned long)
6096                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6097                 +
6098                 net_stats->tx_aborted_errors +
6099                 net_stats->tx_carrier_errors;
6100
6101         net_stats->rx_missed_errors =
6102                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6103                 stats_blk->stat_FwRxDrop);
6104
6105         return net_stats;
6106 }
6107
6108 /* All ethtool functions called with rtnl_lock */
6109
6110 static int
6111 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6112 {
6113         struct bnx2 *bp = netdev_priv(dev);
6114         int support_serdes = 0, support_copper = 0;
6115
6116         cmd->supported = SUPPORTED_Autoneg;
6117         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6118                 support_serdes = 1;
6119                 support_copper = 1;
6120         } else if (bp->phy_port == PORT_FIBRE)
6121                 support_serdes = 1;
6122         else
6123                 support_copper = 1;
6124
6125         if (support_serdes) {
6126                 cmd->supported |= SUPPORTED_1000baseT_Full |
6127                         SUPPORTED_FIBRE;
6128                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6129                         cmd->supported |= SUPPORTED_2500baseX_Full;
6130
6131         }
6132         if (support_copper) {
6133                 cmd->supported |= SUPPORTED_10baseT_Half |
6134                         SUPPORTED_10baseT_Full |
6135                         SUPPORTED_100baseT_Half |
6136                         SUPPORTED_100baseT_Full |
6137                         SUPPORTED_1000baseT_Full |
6138                         SUPPORTED_TP;
6139
6140         }
6141
6142         spin_lock_bh(&bp->phy_lock);
6143         cmd->port = bp->phy_port;
6144         cmd->advertising = bp->advertising;
6145
6146         if (bp->autoneg & AUTONEG_SPEED) {
6147                 cmd->autoneg = AUTONEG_ENABLE;
6148         }
6149         else {
6150                 cmd->autoneg = AUTONEG_DISABLE;
6151         }
6152
6153         if (netif_carrier_ok(dev)) {
6154                 cmd->speed = bp->line_speed;
6155                 cmd->duplex = bp->duplex;
6156         }
6157         else {
6158                 cmd->speed = -1;
6159                 cmd->duplex = -1;
6160         }
6161         spin_unlock_bh(&bp->phy_lock);
6162
6163         cmd->transceiver = XCVR_INTERNAL;
6164         cmd->phy_address = bp->phy_addr;
6165
6166         return 0;
6167 }
6168
6169 static int
6170 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6171 {
6172         struct bnx2 *bp = netdev_priv(dev);
6173         u8 autoneg = bp->autoneg;
6174         u8 req_duplex = bp->req_duplex;
6175         u16 req_line_speed = bp->req_line_speed;
6176         u32 advertising = bp->advertising;
6177         int err = -EINVAL;
6178
6179         spin_lock_bh(&bp->phy_lock);
6180
6181         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6182                 goto err_out_unlock;
6183
6184         if (cmd->port != bp->phy_port &&
6185             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6186                 goto err_out_unlock;
6187
6188         if (cmd->autoneg == AUTONEG_ENABLE) {
6189                 autoneg |= AUTONEG_SPEED;
6190
6191                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6192
6193                 /* allow advertising 1 speed */
6194                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6195                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6196                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6197                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6198
6199                         if (cmd->port == PORT_FIBRE)
6200                                 goto err_out_unlock;
6201
6202                         advertising = cmd->advertising;
6203
6204                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6205                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6206                             (cmd->port == PORT_TP))
6207                                 goto err_out_unlock;
6208                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6209                         advertising = cmd->advertising;
6210                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6211                         goto err_out_unlock;
6212                 else {
6213                         if (cmd->port == PORT_FIBRE)
6214                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6215                         else
6216                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6217                 }
6218                 advertising |= ADVERTISED_Autoneg;
6219         }
6220         else {
6221                 if (cmd->port == PORT_FIBRE) {
6222                         if ((cmd->speed != SPEED_1000 &&
6223                              cmd->speed != SPEED_2500) ||
6224                             (cmd->duplex != DUPLEX_FULL))
6225                                 goto err_out_unlock;
6226
6227                         if (cmd->speed == SPEED_2500 &&
6228                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6229                                 goto err_out_unlock;
6230                 }
6231                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6232                         goto err_out_unlock;
6233
6234                 autoneg &= ~AUTONEG_SPEED;
6235                 req_line_speed = cmd->speed;
6236                 req_duplex = cmd->duplex;
6237                 advertising = 0;
6238         }
6239
6240         bp->autoneg = autoneg;
6241         bp->advertising = advertising;
6242         bp->req_line_speed = req_line_speed;
6243         bp->req_duplex = req_duplex;
6244
6245         err = bnx2_setup_phy(bp, cmd->port);
6246
6247 err_out_unlock:
6248         spin_unlock_bh(&bp->phy_lock);
6249
6250         return err;
6251 }
6252
6253 static void
6254 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6255 {
6256         struct bnx2 *bp = netdev_priv(dev);
6257
6258         strcpy(info->driver, DRV_MODULE_NAME);
6259         strcpy(info->version, DRV_MODULE_VERSION);
6260         strcpy(info->bus_info, pci_name(bp->pdev));
6261         strcpy(info->fw_version, bp->fw_version);
6262 }
6263
6264 #define BNX2_REGDUMP_LEN                (32 * 1024)
6265
6266 static int
6267 bnx2_get_regs_len(struct net_device *dev)
6268 {
6269         return BNX2_REGDUMP_LEN;
6270 }
6271
6272 static void
6273 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6274 {
6275         u32 *p = _p, i, offset;
6276         u8 *orig_p = _p;
6277         struct bnx2 *bp = netdev_priv(dev);
6278         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6279                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6280                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6281                                  0x1040, 0x1048, 0x1080, 0x10a4,
6282                                  0x1400, 0x1490, 0x1498, 0x14f0,
6283                                  0x1500, 0x155c, 0x1580, 0x15dc,
6284                                  0x1600, 0x1658, 0x1680, 0x16d8,
6285                                  0x1800, 0x1820, 0x1840, 0x1854,
6286                                  0x1880, 0x1894, 0x1900, 0x1984,
6287                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6288                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6289                                  0x2000, 0x2030, 0x23c0, 0x2400,
6290                                  0x2800, 0x2820, 0x2830, 0x2850,
6291                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6292                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6293                                  0x4080, 0x4090, 0x43c0, 0x4458,
6294                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6295                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6296                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6297                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6298                                  0x6800, 0x6848, 0x684c, 0x6860,
6299                                  0x6888, 0x6910, 0x8000 };
6300
6301         regs->version = 0;
6302
6303         memset(p, 0, BNX2_REGDUMP_LEN);
6304
6305         if (!netif_running(bp->dev))
6306                 return;
6307
6308         i = 0;
6309         offset = reg_boundaries[0];
6310         p += offset;
6311         while (offset < BNX2_REGDUMP_LEN) {
6312                 *p++ = REG_RD(bp, offset);
6313                 offset += 4;
6314                 if (offset == reg_boundaries[i + 1]) {
6315                         offset = reg_boundaries[i + 2];
6316                         p = (u32 *) (orig_p + offset);
6317                         i += 2;
6318                 }
6319         }
6320 }
6321
6322 static void
6323 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6324 {
6325         struct bnx2 *bp = netdev_priv(dev);
6326
6327         if (bp->flags & BNX2_FLAG_NO_WOL) {
6328                 wol->supported = 0;
6329                 wol->wolopts = 0;
6330         }
6331         else {
6332                 wol->supported = WAKE_MAGIC;
6333                 if (bp->wol)
6334                         wol->wolopts = WAKE_MAGIC;
6335                 else
6336                         wol->wolopts = 0;
6337         }
6338         memset(&wol->sopass, 0, sizeof(wol->sopass));
6339 }
6340
6341 static int
6342 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6343 {
6344         struct bnx2 *bp = netdev_priv(dev);
6345
6346         if (wol->wolopts & ~WAKE_MAGIC)
6347                 return -EINVAL;
6348
6349         if (wol->wolopts & WAKE_MAGIC) {
6350                 if (bp->flags & BNX2_FLAG_NO_WOL)
6351                         return -EINVAL;
6352
6353                 bp->wol = 1;
6354         }
6355         else {
6356                 bp->wol = 0;
6357         }
6358         return 0;
6359 }
6360
6361 static int
6362 bnx2_nway_reset(struct net_device *dev)
6363 {
6364         struct bnx2 *bp = netdev_priv(dev);
6365         u32 bmcr;
6366
6367         if (!(bp->autoneg & AUTONEG_SPEED)) {
6368                 return -EINVAL;
6369         }
6370
6371         spin_lock_bh(&bp->phy_lock);
6372
6373         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6374                 int rc;
6375
6376                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6377                 spin_unlock_bh(&bp->phy_lock);
6378                 return rc;
6379         }
6380
6381         /* Force a link down visible on the other side */
6382         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6383                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6384                 spin_unlock_bh(&bp->phy_lock);
6385
6386                 msleep(20);
6387
6388                 spin_lock_bh(&bp->phy_lock);
6389
6390                 bp->current_interval = SERDES_AN_TIMEOUT;
6391                 bp->serdes_an_pending = 1;
6392                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6393         }
6394
6395         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6396         bmcr &= ~BMCR_LOOPBACK;
6397         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6398
6399         spin_unlock_bh(&bp->phy_lock);
6400
6401         return 0;
6402 }
6403
6404 static int
6405 bnx2_get_eeprom_len(struct net_device *dev)
6406 {
6407         struct bnx2 *bp = netdev_priv(dev);
6408
6409         if (bp->flash_info == NULL)
6410                 return 0;
6411
6412         return (int) bp->flash_size;
6413 }
6414
6415 static int
6416 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6417                 u8 *eebuf)
6418 {
6419         struct bnx2 *bp = netdev_priv(dev);
6420         int rc;
6421
6422         /* parameters already validated in ethtool_get_eeprom */
6423
6424         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6425
6426         return rc;
6427 }
6428
6429 static int
6430 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6431                 u8 *eebuf)
6432 {
6433         struct bnx2 *bp = netdev_priv(dev);
6434         int rc;
6435
6436         /* parameters already validated in ethtool_set_eeprom */
6437
6438         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6439
6440         return rc;
6441 }
6442
6443 static int
6444 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6445 {
6446         struct bnx2 *bp = netdev_priv(dev);
6447
6448         memset(coal, 0, sizeof(struct ethtool_coalesce));
6449
6450         coal->rx_coalesce_usecs = bp->rx_ticks;
6451         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6452         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6453         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6454
6455         coal->tx_coalesce_usecs = bp->tx_ticks;
6456         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6457         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6458         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6459
6460         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6461
6462         return 0;
6463 }
6464
6465 static int
6466 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6467 {
6468         struct bnx2 *bp = netdev_priv(dev);
6469
6470         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6471         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6472
6473         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6474         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6475
6476         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6477         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6478
6479         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6480         if (bp->rx_quick_cons_trip_int > 0xff)
6481                 bp->rx_quick_cons_trip_int = 0xff;
6482
6483         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6484         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6485
6486         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6487         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6488
6489         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6490         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6491
6492         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6493         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6494                 0xff;
6495
6496         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6497         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6498                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6499                         bp->stats_ticks = USEC_PER_SEC;
6500         }
6501         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6502                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6503         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6504
6505         if (netif_running(bp->dev)) {
6506                 bnx2_netif_stop(bp);
6507                 bnx2_init_nic(bp, 0);
6508                 bnx2_netif_start(bp);
6509         }
6510
6511         return 0;
6512 }
6513
6514 static void
6515 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6516 {
6517         struct bnx2 *bp = netdev_priv(dev);
6518
6519         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6520         ering->rx_mini_max_pending = 0;
6521         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6522
6523         ering->rx_pending = bp->rx_ring_size;
6524         ering->rx_mini_pending = 0;
6525         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6526
6527         ering->tx_max_pending = MAX_TX_DESC_CNT;
6528         ering->tx_pending = bp->tx_ring_size;
6529 }
6530
6531 static int
6532 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6533 {
6534         if (netif_running(bp->dev)) {
6535                 bnx2_netif_stop(bp);
6536                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6537                 bnx2_free_skbs(bp);
6538                 bnx2_free_mem(bp);
6539         }
6540
6541         bnx2_set_rx_ring_size(bp, rx);
6542         bp->tx_ring_size = tx;
6543
6544         if (netif_running(bp->dev)) {
6545                 int rc;
6546
6547                 rc = bnx2_alloc_mem(bp);
6548                 if (rc)
6549                         return rc;
6550                 bnx2_init_nic(bp, 0);
6551                 bnx2_netif_start(bp);
6552         }
6553         return 0;
6554 }
6555
6556 static int
6557 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6558 {
6559         struct bnx2 *bp = netdev_priv(dev);
6560         int rc;
6561
6562         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6563                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6564                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6565
6566                 return -EINVAL;
6567         }
6568         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6569         return rc;
6570 }
6571
6572 static void
6573 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6574 {
6575         struct bnx2 *bp = netdev_priv(dev);
6576
6577         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6578         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6579         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6580 }
6581
6582 static int
6583 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6584 {
6585         struct bnx2 *bp = netdev_priv(dev);
6586
6587         bp->req_flow_ctrl = 0;
6588         if (epause->rx_pause)
6589                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6590         if (epause->tx_pause)
6591                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6592
6593         if (epause->autoneg) {
6594                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6595         }
6596         else {
6597                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6598         }
6599
6600         spin_lock_bh(&bp->phy_lock);
6601
6602         bnx2_setup_phy(bp, bp->phy_port);
6603
6604         spin_unlock_bh(&bp->phy_lock);
6605
6606         return 0;
6607 }
6608
6609 static u32
6610 bnx2_get_rx_csum(struct net_device *dev)
6611 {
6612         struct bnx2 *bp = netdev_priv(dev);
6613
6614         return bp->rx_csum;
6615 }
6616
6617 static int
6618 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6619 {
6620         struct bnx2 *bp = netdev_priv(dev);
6621
6622         bp->rx_csum = data;
6623         return 0;
6624 }
6625
6626 static int
6627 bnx2_set_tso(struct net_device *dev, u32 data)
6628 {
6629         struct bnx2 *bp = netdev_priv(dev);
6630
6631         if (data) {
6632                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6633                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6634                         dev->features |= NETIF_F_TSO6;
6635         } else
6636                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6637                                    NETIF_F_TSO_ECN);
6638         return 0;
6639 }
6640
6641 #define BNX2_NUM_STATS 46
6642
6643 static struct {
6644         char string[ETH_GSTRING_LEN];
6645 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6646         { "rx_bytes" },
6647         { "rx_error_bytes" },
6648         { "tx_bytes" },
6649         { "tx_error_bytes" },
6650         { "rx_ucast_packets" },
6651         { "rx_mcast_packets" },
6652         { "rx_bcast_packets" },
6653         { "tx_ucast_packets" },
6654         { "tx_mcast_packets" },
6655         { "tx_bcast_packets" },
6656         { "tx_mac_errors" },
6657         { "tx_carrier_errors" },
6658         { "rx_crc_errors" },
6659         { "rx_align_errors" },
6660         { "tx_single_collisions" },
6661         { "tx_multi_collisions" },
6662         { "tx_deferred" },
6663         { "tx_excess_collisions" },
6664         { "tx_late_collisions" },
6665         { "tx_total_collisions" },
6666         { "rx_fragments" },
6667         { "rx_jabbers" },
6668         { "rx_undersize_packets" },
6669         { "rx_oversize_packets" },
6670         { "rx_64_byte_packets" },
6671         { "rx_65_to_127_byte_packets" },
6672         { "rx_128_to_255_byte_packets" },
6673         { "rx_256_to_511_byte_packets" },
6674         { "rx_512_to_1023_byte_packets" },
6675         { "rx_1024_to_1522_byte_packets" },
6676         { "rx_1523_to_9022_byte_packets" },
6677         { "tx_64_byte_packets" },
6678         { "tx_65_to_127_byte_packets" },
6679         { "tx_128_to_255_byte_packets" },
6680         { "tx_256_to_511_byte_packets" },
6681         { "tx_512_to_1023_byte_packets" },
6682         { "tx_1024_to_1522_byte_packets" },
6683         { "tx_1523_to_9022_byte_packets" },
6684         { "rx_xon_frames" },
6685         { "rx_xoff_frames" },
6686         { "tx_xon_frames" },
6687         { "tx_xoff_frames" },
6688         { "rx_mac_ctrl_frames" },
6689         { "rx_filtered_packets" },
6690         { "rx_discards" },
6691         { "rx_fw_discards" },
6692 };
6693
6694 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6695
6696 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6697     STATS_OFFSET32(stat_IfHCInOctets_hi),
6698     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6699     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6700     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6701     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6702     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6703     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6704     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6705     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6706     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6707     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6708     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6709     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6710     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6711     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6712     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6713     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6714     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6715     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6716     STATS_OFFSET32(stat_EtherStatsCollisions),
6717     STATS_OFFSET32(stat_EtherStatsFragments),
6718     STATS_OFFSET32(stat_EtherStatsJabbers),
6719     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6720     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6721     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6722     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6723     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6724     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6725     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6726     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6727     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6728     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6729     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6730     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6731     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6732     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6733     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6734     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6735     STATS_OFFSET32(stat_XonPauseFramesReceived),
6736     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6737     STATS_OFFSET32(stat_OutXonSent),
6738     STATS_OFFSET32(stat_OutXoffSent),
6739     STATS_OFFSET32(stat_MacControlFramesReceived),
6740     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6741     STATS_OFFSET32(stat_IfInMBUFDiscards),
6742     STATS_OFFSET32(stat_FwRxDrop),
6743 };
6744
6745 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6746  * skipped because of errata.
6747  */
6748 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6749         8,0,8,8,8,8,8,8,8,8,
6750         4,0,4,4,4,4,4,4,4,4,
6751         4,4,4,4,4,4,4,4,4,4,
6752         4,4,4,4,4,4,4,4,4,4,
6753         4,4,4,4,4,4,
6754 };
6755
6756 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6757         8,0,8,8,8,8,8,8,8,8,
6758         4,4,4,4,4,4,4,4,4,4,
6759         4,4,4,4,4,4,4,4,4,4,
6760         4,4,4,4,4,4,4,4,4,4,
6761         4,4,4,4,4,4,
6762 };
6763
6764 #define BNX2_NUM_TESTS 6
6765
6766 static struct {
6767         char string[ETH_GSTRING_LEN];
6768 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6769         { "register_test (offline)" },
6770         { "memory_test (offline)" },
6771         { "loopback_test (offline)" },
6772         { "nvram_test (online)" },
6773         { "interrupt_test (online)" },
6774         { "link_test (online)" },
6775 };
6776
6777 static int
6778 bnx2_get_sset_count(struct net_device *dev, int sset)
6779 {
6780         switch (sset) {
6781         case ETH_SS_TEST:
6782                 return BNX2_NUM_TESTS;
6783         case ETH_SS_STATS:
6784                 return BNX2_NUM_STATS;
6785         default:
6786                 return -EOPNOTSUPP;
6787         }
6788 }
6789
6790 static void
6791 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6792 {
6793         struct bnx2 *bp = netdev_priv(dev);
6794
6795         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6796         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6797                 int i;
6798
6799                 bnx2_netif_stop(bp);
6800                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6801                 bnx2_free_skbs(bp);
6802
6803                 if (bnx2_test_registers(bp) != 0) {
6804                         buf[0] = 1;
6805                         etest->flags |= ETH_TEST_FL_FAILED;
6806                 }
6807                 if (bnx2_test_memory(bp) != 0) {
6808                         buf[1] = 1;
6809                         etest->flags |= ETH_TEST_FL_FAILED;
6810                 }
6811                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6812                         etest->flags |= ETH_TEST_FL_FAILED;
6813
6814                 if (!netif_running(bp->dev)) {
6815                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6816                 }
6817                 else {
6818                         bnx2_init_nic(bp, 1);
6819                         bnx2_netif_start(bp);
6820                 }
6821
6822                 /* wait for link up */
6823                 for (i = 0; i < 7; i++) {
6824                         if (bp->link_up)
6825                                 break;
6826                         msleep_interruptible(1000);
6827                 }
6828         }
6829
6830         if (bnx2_test_nvram(bp) != 0) {
6831                 buf[3] = 1;
6832                 etest->flags |= ETH_TEST_FL_FAILED;
6833         }
6834         if (bnx2_test_intr(bp) != 0) {
6835                 buf[4] = 1;
6836                 etest->flags |= ETH_TEST_FL_FAILED;
6837         }
6838
6839         if (bnx2_test_link(bp) != 0) {
6840                 buf[5] = 1;
6841                 etest->flags |= ETH_TEST_FL_FAILED;
6842
6843         }
6844 }
6845
6846 static void
6847 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6848 {
6849         switch (stringset) {
6850         case ETH_SS_STATS:
6851                 memcpy(buf, bnx2_stats_str_arr,
6852                         sizeof(bnx2_stats_str_arr));
6853                 break;
6854         case ETH_SS_TEST:
6855                 memcpy(buf, bnx2_tests_str_arr,
6856                         sizeof(bnx2_tests_str_arr));
6857                 break;
6858         }
6859 }
6860
6861 static void
6862 bnx2_get_ethtool_stats(struct net_device *dev,
6863                 struct ethtool_stats *stats, u64 *buf)
6864 {
6865         struct bnx2 *bp = netdev_priv(dev);
6866         int i;
6867         u32 *hw_stats = (u32 *) bp->stats_blk;
6868         u8 *stats_len_arr = NULL;
6869
6870         if (hw_stats == NULL) {
6871                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6872                 return;
6873         }
6874
6875         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6876             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6877             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6878             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6879                 stats_len_arr = bnx2_5706_stats_len_arr;
6880         else
6881                 stats_len_arr = bnx2_5708_stats_len_arr;
6882
6883         for (i = 0; i < BNX2_NUM_STATS; i++) {
6884                 if (stats_len_arr[i] == 0) {
6885                         /* skip this counter */
6886                         buf[i] = 0;
6887                         continue;
6888                 }
6889                 if (stats_len_arr[i] == 4) {
6890                         /* 4-byte counter */
6891                         buf[i] = (u64)
6892                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6893                         continue;
6894                 }
6895                 /* 8-byte counter */
6896                 buf[i] = (((u64) *(hw_stats +
6897                                         bnx2_stats_offset_arr[i])) << 32) +
6898                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6899         }
6900 }
6901
6902 static int
6903 bnx2_phys_id(struct net_device *dev, u32 data)
6904 {
6905         struct bnx2 *bp = netdev_priv(dev);
6906         int i;
6907         u32 save;
6908
6909         if (data == 0)
6910                 data = 2;
6911
6912         save = REG_RD(bp, BNX2_MISC_CFG);
6913         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6914
6915         for (i = 0; i < (data * 2); i++) {
6916                 if ((i % 2) == 0) {
6917                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6918                 }
6919                 else {
6920                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6921                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6922                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6923                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6924                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6925                                 BNX2_EMAC_LED_TRAFFIC);
6926                 }
6927                 msleep_interruptible(500);
6928                 if (signal_pending(current))
6929                         break;
6930         }
6931         REG_WR(bp, BNX2_EMAC_LED, 0);
6932         REG_WR(bp, BNX2_MISC_CFG, save);
6933         return 0;
6934 }
6935
6936 static int
6937 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6938 {
6939         struct bnx2 *bp = netdev_priv(dev);
6940
6941         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6942                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6943         else
6944                 return (ethtool_op_set_tx_csum(dev, data));
6945 }
6946
6947 static const struct ethtool_ops bnx2_ethtool_ops = {
6948         .get_settings           = bnx2_get_settings,
6949         .set_settings           = bnx2_set_settings,
6950         .get_drvinfo            = bnx2_get_drvinfo,
6951         .get_regs_len           = bnx2_get_regs_len,
6952         .get_regs               = bnx2_get_regs,
6953         .get_wol                = bnx2_get_wol,
6954         .set_wol                = bnx2_set_wol,
6955         .nway_reset             = bnx2_nway_reset,
6956         .get_link               = ethtool_op_get_link,
6957         .get_eeprom_len         = bnx2_get_eeprom_len,
6958         .get_eeprom             = bnx2_get_eeprom,
6959         .set_eeprom             = bnx2_set_eeprom,
6960         .get_coalesce           = bnx2_get_coalesce,
6961         .set_coalesce           = bnx2_set_coalesce,
6962         .get_ringparam          = bnx2_get_ringparam,
6963         .set_ringparam          = bnx2_set_ringparam,
6964         .get_pauseparam         = bnx2_get_pauseparam,
6965         .set_pauseparam         = bnx2_set_pauseparam,
6966         .get_rx_csum            = bnx2_get_rx_csum,
6967         .set_rx_csum            = bnx2_set_rx_csum,
6968         .set_tx_csum            = bnx2_set_tx_csum,
6969         .set_sg                 = ethtool_op_set_sg,
6970         .set_tso                = bnx2_set_tso,
6971         .self_test              = bnx2_self_test,
6972         .get_strings            = bnx2_get_strings,
6973         .phys_id                = bnx2_phys_id,
6974         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6975         .get_sset_count         = bnx2_get_sset_count,
6976 };
6977
6978 /* Called with rtnl_lock */
6979 static int
6980 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6981 {
6982         struct mii_ioctl_data *data = if_mii(ifr);
6983         struct bnx2 *bp = netdev_priv(dev);
6984         int err;
6985
6986         switch(cmd) {
6987         case SIOCGMIIPHY:
6988                 data->phy_id = bp->phy_addr;
6989
6990                 /* fallthru */
6991         case SIOCGMIIREG: {
6992                 u32 mii_regval;
6993
6994                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6995                         return -EOPNOTSUPP;
6996
6997                 if (!netif_running(dev))
6998                         return -EAGAIN;
6999
7000                 spin_lock_bh(&bp->phy_lock);
7001                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7002                 spin_unlock_bh(&bp->phy_lock);
7003
7004                 data->val_out = mii_regval;
7005
7006                 return err;
7007         }
7008
7009         case SIOCSMIIREG:
7010                 if (!capable(CAP_NET_ADMIN))
7011                         return -EPERM;
7012
7013                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7014                         return -EOPNOTSUPP;
7015
7016                 if (!netif_running(dev))
7017                         return -EAGAIN;
7018
7019                 spin_lock_bh(&bp->phy_lock);
7020                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7021                 spin_unlock_bh(&bp->phy_lock);
7022
7023                 return err;
7024
7025         default:
7026                 /* do nothing */
7027                 break;
7028         }
7029         return -EOPNOTSUPP;
7030 }
7031
7032 /* Called with rtnl_lock */
7033 static int
7034 bnx2_change_mac_addr(struct net_device *dev, void *p)
7035 {
7036         struct sockaddr *addr = p;
7037         struct bnx2 *bp = netdev_priv(dev);
7038
7039         if (!is_valid_ether_addr(addr->sa_data))
7040                 return -EINVAL;
7041
7042         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7043         if (netif_running(dev))
7044                 bnx2_set_mac_addr(bp);
7045
7046         return 0;
7047 }
7048
7049 /* Called with rtnl_lock */
7050 static int
7051 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7052 {
7053         struct bnx2 *bp = netdev_priv(dev);
7054
7055         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7056                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7057                 return -EINVAL;
7058
7059         dev->mtu = new_mtu;
7060         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7061 }
7062
7063 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7064 static void
7065 poll_bnx2(struct net_device *dev)
7066 {
7067         struct bnx2 *bp = netdev_priv(dev);
7068
7069         disable_irq(bp->pdev->irq);
7070         bnx2_interrupt(bp->pdev->irq, dev);
7071         enable_irq(bp->pdev->irq);
7072 }
7073 #endif
7074
7075 static void __devinit
7076 bnx2_get_5709_media(struct bnx2 *bp)
7077 {
7078         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7079         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7080         u32 strap;
7081
7082         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7083                 return;
7084         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7085                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7086                 return;
7087         }
7088
7089         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7090                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7091         else
7092                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7093
7094         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7095                 switch (strap) {
7096                 case 0x4:
7097                 case 0x5:
7098                 case 0x6:
7099                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7100                         return;
7101                 }
7102         } else {
7103                 switch (strap) {
7104                 case 0x1:
7105                 case 0x2:
7106                 case 0x4:
7107                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7108                         return;
7109                 }
7110         }
7111 }
7112
7113 static void __devinit
7114 bnx2_get_pci_speed(struct bnx2 *bp)
7115 {
7116         u32 reg;
7117
7118         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7119         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7120                 u32 clkreg;
7121
7122                 bp->flags |= BNX2_FLAG_PCIX;
7123
7124                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7125
7126                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7127                 switch (clkreg) {
7128                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7129                         bp->bus_speed_mhz = 133;
7130                         break;
7131
7132                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7133                         bp->bus_speed_mhz = 100;
7134                         break;
7135
7136                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7137                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7138                         bp->bus_speed_mhz = 66;
7139                         break;
7140
7141                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7142                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7143                         bp->bus_speed_mhz = 50;
7144                         break;
7145
7146                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7147                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7148                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7149                         bp->bus_speed_mhz = 33;
7150                         break;
7151                 }
7152         }
7153         else {
7154                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7155                         bp->bus_speed_mhz = 66;
7156                 else
7157                         bp->bus_speed_mhz = 33;
7158         }
7159
7160         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7161                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7162
7163 }
7164
7165 static int __devinit
7166 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7167 {
7168         struct bnx2 *bp;
7169         unsigned long mem_len;
7170         int rc, i, j;
7171         u32 reg;
7172         u64 dma_mask, persist_dma_mask;
7173
7174         SET_NETDEV_DEV(dev, &pdev->dev);
7175         bp = netdev_priv(dev);
7176
7177         bp->flags = 0;
7178         bp->phy_flags = 0;
7179
7180         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7181         rc = pci_enable_device(pdev);
7182         if (rc) {
7183                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7184                 goto err_out;
7185         }
7186
7187         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7188                 dev_err(&pdev->dev,
7189                         "Cannot find PCI device base address, aborting.\n");
7190                 rc = -ENODEV;
7191                 goto err_out_disable;
7192         }
7193
7194         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7195         if (rc) {
7196                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7197                 goto err_out_disable;
7198         }
7199
7200         pci_set_master(pdev);
7201         pci_save_state(pdev);
7202
7203         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7204         if (bp->pm_cap == 0) {
7205                 dev_err(&pdev->dev,
7206                         "Cannot find power management capability, aborting.\n");
7207                 rc = -EIO;
7208                 goto err_out_release;
7209         }
7210
7211         bp->dev = dev;
7212         bp->pdev = pdev;
7213
7214         spin_lock_init(&bp->phy_lock);
7215         spin_lock_init(&bp->indirect_lock);
7216         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7217
7218         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7219         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7220         dev->mem_end = dev->mem_start + mem_len;
7221         dev->irq = pdev->irq;
7222
7223         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7224
7225         if (!bp->regview) {
7226                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7227                 rc = -ENOMEM;
7228                 goto err_out_release;
7229         }
7230
7231         /* Configure byte swap and enable write to the reg_window registers.
7232          * Rely on CPU to do target byte swapping on big endian systems
7233          * The chip's target access swapping will not swap all accesses
7234          */
7235         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7236                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7237                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7238
7239         bnx2_set_power_state(bp, PCI_D0);
7240
7241         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7242
7243         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7244                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7245                         dev_err(&pdev->dev,
7246                                 "Cannot find PCIE capability, aborting.\n");
7247                         rc = -EIO;
7248                         goto err_out_unmap;
7249                 }
7250                 bp->flags |= BNX2_FLAG_PCIE;
7251                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7252                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7253         } else {
7254                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7255                 if (bp->pcix_cap == 0) {
7256                         dev_err(&pdev->dev,
7257                                 "Cannot find PCIX capability, aborting.\n");
7258                         rc = -EIO;
7259                         goto err_out_unmap;
7260                 }
7261         }
7262
7263         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7264                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7265                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7266         }
7267
7268         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7269                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7270                         bp->flags |= BNX2_FLAG_MSI_CAP;
7271         }
7272
7273         /* 5708 cannot support DMA addresses > 40-bit.  */
7274         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7275                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7276         else
7277                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7278
7279         /* Configure DMA attributes. */
7280         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7281                 dev->features |= NETIF_F_HIGHDMA;
7282                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7283                 if (rc) {
7284                         dev_err(&pdev->dev,
7285                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7286                         goto err_out_unmap;
7287                 }
7288         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7289                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7290                 goto err_out_unmap;
7291         }
7292
7293         if (!(bp->flags & BNX2_FLAG_PCIE))
7294                 bnx2_get_pci_speed(bp);
7295
7296         /* 5706A0 may falsely detect SERR and PERR. */
7297         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7298                 reg = REG_RD(bp, PCI_COMMAND);
7299                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7300                 REG_WR(bp, PCI_COMMAND, reg);
7301         }
7302         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7303                 !(bp->flags & BNX2_FLAG_PCIX)) {
7304
7305                 dev_err(&pdev->dev,
7306                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7307                 goto err_out_unmap;
7308         }
7309
7310         bnx2_init_nvram(bp);
7311
7312         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7313
7314         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7315             BNX2_SHM_HDR_SIGNATURE_SIG) {
7316                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7317
7318                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7319         } else
7320                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7321
7322         /* Get the permanent MAC address.  First we need to make sure the
7323          * firmware is actually running.
7324          */
7325         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7326
7327         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7328             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7329                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7330                 rc = -ENODEV;
7331                 goto err_out_unmap;
7332         }
7333
7334         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7335         for (i = 0, j = 0; i < 3; i++) {
7336                 u8 num, k, skip0;
7337
7338                 num = (u8) (reg >> (24 - (i * 8)));
7339                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7340                         if (num >= k || !skip0 || k == 1) {
7341                                 bp->fw_version[j++] = (num / k) + '0';
7342                                 skip0 = 0;
7343                         }
7344                 }
7345                 if (i != 2)
7346                         bp->fw_version[j++] = '.';
7347         }
7348         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7349         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7350                 bp->wol = 1;
7351
7352         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7353                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7354
7355                 for (i = 0; i < 30; i++) {
7356                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7357                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7358                                 break;
7359                         msleep(10);
7360                 }
7361         }
7362         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7363         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7364         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7365             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7366                 int i;
7367                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7368
7369                 bp->fw_version[j++] = ' ';
7370                 for (i = 0; i < 3; i++) {
7371                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7372                         reg = swab32(reg);
7373                         memcpy(&bp->fw_version[j], &reg, 4);
7374                         j += 4;
7375                 }
7376         }
7377
7378         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7379         bp->mac_addr[0] = (u8) (reg >> 8);
7380         bp->mac_addr[1] = (u8) reg;
7381
7382         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7383         bp->mac_addr[2] = (u8) (reg >> 24);
7384         bp->mac_addr[3] = (u8) (reg >> 16);
7385         bp->mac_addr[4] = (u8) (reg >> 8);
7386         bp->mac_addr[5] = (u8) reg;
7387
7388         bp->tx_ring_size = MAX_TX_DESC_CNT;
7389         bnx2_set_rx_ring_size(bp, 255);
7390
7391         bp->rx_csum = 1;
7392
7393         bp->tx_quick_cons_trip_int = 20;
7394         bp->tx_quick_cons_trip = 20;
7395         bp->tx_ticks_int = 80;
7396         bp->tx_ticks = 80;
7397
7398         bp->rx_quick_cons_trip_int = 6;
7399         bp->rx_quick_cons_trip = 6;
7400         bp->rx_ticks_int = 18;
7401         bp->rx_ticks = 18;
7402
7403         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7404
7405         bp->timer_interval =  HZ;
7406         bp->current_interval =  HZ;
7407
7408         bp->phy_addr = 1;
7409
7410         /* Disable WOL support if we are running on a SERDES chip. */
7411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7412                 bnx2_get_5709_media(bp);
7413         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7414                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7415
7416         bp->phy_port = PORT_TP;
7417         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7418                 bp->phy_port = PORT_FIBRE;
7419                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7420                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7421                         bp->flags |= BNX2_FLAG_NO_WOL;
7422                         bp->wol = 0;
7423                 }
7424                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7425                         /* Don't do parallel detect on this board because of
7426                          * some board problems.  The link will not go down
7427                          * if we do parallel detect.
7428                          */
7429                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7430                             pdev->subsystem_device == 0x310c)
7431                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7432                 } else {
7433                         bp->phy_addr = 2;
7434                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7435                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7436                 }
7437                 bnx2_init_remote_phy(bp);
7438
7439         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7440                    CHIP_NUM(bp) == CHIP_NUM_5708)
7441                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7442         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7443                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7444                   CHIP_REV(bp) == CHIP_REV_Bx))
7445                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7446
7447         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7448             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7449             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7450                 bp->flags |= BNX2_FLAG_NO_WOL;
7451                 bp->wol = 0;
7452         }
7453
7454         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7455                 bp->tx_quick_cons_trip_int =
7456                         bp->tx_quick_cons_trip;
7457                 bp->tx_ticks_int = bp->tx_ticks;
7458                 bp->rx_quick_cons_trip_int =
7459                         bp->rx_quick_cons_trip;
7460                 bp->rx_ticks_int = bp->rx_ticks;
7461                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7462                 bp->com_ticks_int = bp->com_ticks;
7463                 bp->cmd_ticks_int = bp->cmd_ticks;
7464         }
7465
7466         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7467          *
7468          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7469          * with byte enables disabled on the unused 32-bit word.  This is legal
7470          * but causes problems on the AMD 8132 which will eventually stop
7471          * responding after a while.
7472          *
7473          * AMD believes this incompatibility is unique to the 5706, and
7474          * prefers to locally disable MSI rather than globally disabling it.
7475          */
7476         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7477                 struct pci_dev *amd_8132 = NULL;
7478
7479                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7480                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7481                                                   amd_8132))) {
7482
7483                         if (amd_8132->revision >= 0x10 &&
7484                             amd_8132->revision <= 0x13) {
7485                                 disable_msi = 1;
7486                                 pci_dev_put(amd_8132);
7487                                 break;
7488                         }
7489                 }
7490         }
7491
7492         bnx2_set_default_link(bp);
7493         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7494
7495         init_timer(&bp->timer);
7496         bp->timer.expires = RUN_AT(bp->timer_interval);
7497         bp->timer.data = (unsigned long) bp;
7498         bp->timer.function = bnx2_timer;
7499
7500         return 0;
7501
7502 err_out_unmap:
7503         if (bp->regview) {
7504                 iounmap(bp->regview);
7505                 bp->regview = NULL;
7506         }
7507
7508 err_out_release:
7509         pci_release_regions(pdev);
7510
7511 err_out_disable:
7512         pci_disable_device(pdev);
7513         pci_set_drvdata(pdev, NULL);
7514
7515 err_out:
7516         return rc;
7517 }
7518
7519 static char * __devinit
7520 bnx2_bus_string(struct bnx2 *bp, char *str)
7521 {
7522         char *s = str;
7523
7524         if (bp->flags & BNX2_FLAG_PCIE) {
7525                 s += sprintf(s, "PCI Express");
7526         } else {
7527                 s += sprintf(s, "PCI");
7528                 if (bp->flags & BNX2_FLAG_PCIX)
7529                         s += sprintf(s, "-X");
7530                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7531                         s += sprintf(s, " 32-bit");
7532                 else
7533                         s += sprintf(s, " 64-bit");
7534                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7535         }
7536         return str;
7537 }
7538
7539 static void __devinit
7540 bnx2_init_napi(struct bnx2 *bp)
7541 {
7542         int i;
7543
7544         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7545                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7546                 int (*poll)(struct napi_struct *, int);
7547
7548                 if (i == 0)
7549                         poll = bnx2_poll;
7550                 else
7551                         poll = bnx2_poll_msix;
7552
7553                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7554                 bnapi->bp = bp;
7555         }
7556 }
7557
7558 static int __devinit
7559 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7560 {
7561         static int version_printed = 0;
7562         struct net_device *dev = NULL;
7563         struct bnx2 *bp;
7564         int rc;
7565         char str[40];
7566         DECLARE_MAC_BUF(mac);
7567
7568         if (version_printed++ == 0)
7569                 printk(KERN_INFO "%s", version);
7570
7571         /* dev zeroed in init_etherdev */
7572         dev = alloc_etherdev(sizeof(*bp));
7573
7574         if (!dev)
7575                 return -ENOMEM;
7576
7577         rc = bnx2_init_board(pdev, dev);
7578         if (rc < 0) {
7579                 free_netdev(dev);
7580                 return rc;
7581         }
7582
7583         dev->open = bnx2_open;
7584         dev->hard_start_xmit = bnx2_start_xmit;
7585         dev->stop = bnx2_close;
7586         dev->get_stats = bnx2_get_stats;
7587         dev->set_multicast_list = bnx2_set_rx_mode;
7588         dev->do_ioctl = bnx2_ioctl;
7589         dev->set_mac_address = bnx2_change_mac_addr;
7590         dev->change_mtu = bnx2_change_mtu;
7591         dev->tx_timeout = bnx2_tx_timeout;
7592         dev->watchdog_timeo = TX_TIMEOUT;
7593 #ifdef BCM_VLAN
7594         dev->vlan_rx_register = bnx2_vlan_rx_register;
7595 #endif
7596         dev->ethtool_ops = &bnx2_ethtool_ops;
7597
7598         bp = netdev_priv(dev);
7599         bnx2_init_napi(bp);
7600
7601 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7602         dev->poll_controller = poll_bnx2;
7603 #endif
7604
7605         pci_set_drvdata(pdev, dev);
7606
7607         memcpy(dev->dev_addr, bp->mac_addr, 6);
7608         memcpy(dev->perm_addr, bp->mac_addr, 6);
7609         bp->name = board_info[ent->driver_data].name;
7610
7611         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7612         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7613                 dev->features |= NETIF_F_IPV6_CSUM;
7614
7615 #ifdef BCM_VLAN
7616         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7617 #endif
7618         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7619         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7620                 dev->features |= NETIF_F_TSO6;
7621
7622         if ((rc = register_netdev(dev))) {
7623                 dev_err(&pdev->dev, "Cannot register net device\n");
7624                 if (bp->regview)
7625                         iounmap(bp->regview);
7626                 pci_release_regions(pdev);
7627                 pci_disable_device(pdev);
7628                 pci_set_drvdata(pdev, NULL);
7629                 free_netdev(dev);
7630                 return rc;
7631         }
7632
7633         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7634                 "IRQ %d, node addr %s\n",
7635                 dev->name,
7636                 bp->name,
7637                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7638                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7639                 bnx2_bus_string(bp, str),
7640                 dev->base_addr,
7641                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7642
7643         return 0;
7644 }
7645
7646 static void __devexit
7647 bnx2_remove_one(struct pci_dev *pdev)
7648 {
7649         struct net_device *dev = pci_get_drvdata(pdev);
7650         struct bnx2 *bp = netdev_priv(dev);
7651
7652         flush_scheduled_work();
7653
7654         unregister_netdev(dev);
7655
7656         if (bp->regview)
7657                 iounmap(bp->regview);
7658
7659         free_netdev(dev);
7660         pci_release_regions(pdev);
7661         pci_disable_device(pdev);
7662         pci_set_drvdata(pdev, NULL);
7663 }
7664
7665 static int
7666 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7667 {
7668         struct net_device *dev = pci_get_drvdata(pdev);
7669         struct bnx2 *bp = netdev_priv(dev);
7670         u32 reset_code;
7671
7672         /* PCI register 4 needs to be saved whether netif_running() or not.
7673          * MSI address and data need to be saved if using MSI and
7674          * netif_running().
7675          */
7676         pci_save_state(pdev);
7677         if (!netif_running(dev))
7678                 return 0;
7679
7680         flush_scheduled_work();
7681         bnx2_netif_stop(bp);
7682         netif_device_detach(dev);
7683         del_timer_sync(&bp->timer);
7684         if (bp->flags & BNX2_FLAG_NO_WOL)
7685                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7686         else if (bp->wol)
7687                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7688         else
7689                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7690         bnx2_reset_chip(bp, reset_code);
7691         bnx2_free_skbs(bp);
7692         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7693         return 0;
7694 }
7695
7696 static int
7697 bnx2_resume(struct pci_dev *pdev)
7698 {
7699         struct net_device *dev = pci_get_drvdata(pdev);
7700         struct bnx2 *bp = netdev_priv(dev);
7701
7702         pci_restore_state(pdev);
7703         if (!netif_running(dev))
7704                 return 0;
7705
7706         bnx2_set_power_state(bp, PCI_D0);
7707         netif_device_attach(dev);
7708         bnx2_init_nic(bp, 1);
7709         bnx2_netif_start(bp);
7710         return 0;
7711 }
7712
7713 /**
7714  * bnx2_io_error_detected - called when PCI error is detected
7715  * @pdev: Pointer to PCI device
7716  * @state: The current pci connection state
7717  *
7718  * This function is called after a PCI bus error affecting
7719  * this device has been detected.
7720  */
7721 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7722                                                pci_channel_state_t state)
7723 {
7724         struct net_device *dev = pci_get_drvdata(pdev);
7725         struct bnx2 *bp = netdev_priv(dev);
7726
7727         rtnl_lock();
7728         netif_device_detach(dev);
7729
7730         if (netif_running(dev)) {
7731                 bnx2_netif_stop(bp);
7732                 del_timer_sync(&bp->timer);
7733                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7734         }
7735
7736         pci_disable_device(pdev);
7737         rtnl_unlock();
7738
7739         /* Request a slot slot reset. */
7740         return PCI_ERS_RESULT_NEED_RESET;
7741 }
7742
7743 /**
7744  * bnx2_io_slot_reset - called after the pci bus has been reset.
7745  * @pdev: Pointer to PCI device
7746  *
7747  * Restart the card from scratch, as if from a cold-boot.
7748  */
7749 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7750 {
7751         struct net_device *dev = pci_get_drvdata(pdev);
7752         struct bnx2 *bp = netdev_priv(dev);
7753
7754         rtnl_lock();
7755         if (pci_enable_device(pdev)) {
7756                 dev_err(&pdev->dev,
7757                         "Cannot re-enable PCI device after reset.\n");
7758                 rtnl_unlock();
7759                 return PCI_ERS_RESULT_DISCONNECT;
7760         }
7761         pci_set_master(pdev);
7762         pci_restore_state(pdev);
7763
7764         if (netif_running(dev)) {
7765                 bnx2_set_power_state(bp, PCI_D0);
7766                 bnx2_init_nic(bp, 1);
7767         }
7768
7769         rtnl_unlock();
7770         return PCI_ERS_RESULT_RECOVERED;
7771 }
7772
7773 /**
7774  * bnx2_io_resume - called when traffic can start flowing again.
7775  * @pdev: Pointer to PCI device
7776  *
7777  * This callback is called when the error recovery driver tells us that
7778  * its OK to resume normal operation.
7779  */
7780 static void bnx2_io_resume(struct pci_dev *pdev)
7781 {
7782         struct net_device *dev = pci_get_drvdata(pdev);
7783         struct bnx2 *bp = netdev_priv(dev);
7784
7785         rtnl_lock();
7786         if (netif_running(dev))
7787                 bnx2_netif_start(bp);
7788
7789         netif_device_attach(dev);
7790         rtnl_unlock();
7791 }
7792
7793 static struct pci_error_handlers bnx2_err_handler = {
7794         .error_detected = bnx2_io_error_detected,
7795         .slot_reset     = bnx2_io_slot_reset,
7796         .resume         = bnx2_io_resume,
7797 };
7798
7799 static struct pci_driver bnx2_pci_driver = {
7800         .name           = DRV_MODULE_NAME,
7801         .id_table       = bnx2_pci_tbl,
7802         .probe          = bnx2_init_one,
7803         .remove         = __devexit_p(bnx2_remove_one),
7804         .suspend        = bnx2_suspend,
7805         .resume         = bnx2_resume,
7806         .err_handler    = &bnx2_err_handler,
7807 };
7808
7809 static int __init bnx2_init(void)
7810 {
7811         return pci_register_driver(&bnx2_pci_driver);
7812 }
7813
7814 static void __exit bnx2_cleanup(void)
7815 {
7816         pci_unregister_driver(&bnx2_pci_driver);
7817 }
7818
7819 module_init(bnx2_init);
7820 module_exit(bnx2_cleanup);
7821
7822
7823