bnx2: Optimize fast-path tx and rx work.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.6"
60 #define DRV_MODULE_RELDATE      "May 16, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static void
519 bnx2_free_rx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_rx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526                 int j;
527
528                 for (j = 0; j < bp->rx_max_ring; j++) {
529                         if (rxr->rx_desc_ring[j])
530                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531                                                     rxr->rx_desc_ring[j],
532                                                     rxr->rx_desc_mapping[j]);
533                         rxr->rx_desc_ring[j] = NULL;
534                 }
535                 if (rxr->rx_buf_ring)
536                         vfree(rxr->rx_buf_ring);
537                 rxr->rx_buf_ring = NULL;
538
539                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540                         if (rxr->rx_pg_desc_ring[j])
541                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542                                                     rxr->rx_pg_desc_ring[i],
543                                                     rxr->rx_pg_desc_mapping[i]);
544                         rxr->rx_pg_desc_ring[i] = NULL;
545                 }
546                 if (rxr->rx_pg_ring)
547                         vfree(rxr->rx_pg_ring);
548                 rxr->rx_pg_ring = NULL;
549         }
550 }
551
552 static int
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
554 {
555         int i;
556
557         for (i = 0; i < bp->num_tx_rings; i++) {
558                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
560
561                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562                 if (txr->tx_buf_ring == NULL)
563                         return -ENOMEM;
564
565                 txr->tx_desc_ring =
566                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567                                              &txr->tx_desc_mapping);
568                 if (txr->tx_desc_ring == NULL)
569                         return -ENOMEM;
570         }
571         return 0;
572 }
573
574 static int
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
576 {
577         int i;
578
579         for (i = 0; i < bp->num_rx_rings; i++) {
580                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582                 int j;
583
584                 rxr->rx_buf_ring =
585                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586                 if (rxr->rx_buf_ring == NULL)
587                         return -ENOMEM;
588
589                 memset(rxr->rx_buf_ring, 0,
590                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592                 for (j = 0; j < bp->rx_max_ring; j++) {
593                         rxr->rx_desc_ring[j] =
594                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595                                                      &rxr->rx_desc_mapping[j]);
596                         if (rxr->rx_desc_ring[j] == NULL)
597                                 return -ENOMEM;
598
599                 }
600
601                 if (bp->rx_pg_ring_size) {
602                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603                                                   bp->rx_max_pg_ring);
604                         if (rxr->rx_pg_ring == NULL)
605                                 return -ENOMEM;
606
607                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608                                bp->rx_max_pg_ring);
609                 }
610
611                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612                         rxr->rx_pg_desc_ring[j] =
613                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614                                                 &rxr->rx_pg_desc_mapping[j]);
615                         if (rxr->rx_pg_desc_ring[j] == NULL)
616                                 return -ENOMEM;
617
618                 }
619         }
620         return 0;
621 }
622
623 static void
624 bnx2_free_mem(struct bnx2 *bp)
625 {
626         int i;
627         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
628
629         bnx2_free_tx_mem(bp);
630         bnx2_free_rx_mem(bp);
631
632         for (i = 0; i < bp->ctx_pages; i++) {
633                 if (bp->ctx_blk[i]) {
634                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
635                                             bp->ctx_blk[i],
636                                             bp->ctx_blk_mapping[i]);
637                         bp->ctx_blk[i] = NULL;
638                 }
639         }
640         if (bnapi->status_blk.msi) {
641                 pci_free_consistent(bp->pdev, bp->status_stats_size,
642                                     bnapi->status_blk.msi,
643                                     bp->status_blk_mapping);
644                 bnapi->status_blk.msi = NULL;
645                 bp->stats_blk = NULL;
646         }
647 }
648
649 static int
650 bnx2_alloc_mem(struct bnx2 *bp)
651 {
652         int i, status_blk_size, err;
653         struct bnx2_napi *bnapi;
654         void *status_blk;
655
656         /* Combine status and statistics blocks into one allocation. */
657         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
658         if (bp->flags & BNX2_FLAG_MSIX_CAP)
659                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
660                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
661         bp->status_stats_size = status_blk_size +
662                                 sizeof(struct statistics_block);
663
664         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
665                                           &bp->status_blk_mapping);
666         if (status_blk == NULL)
667                 goto alloc_mem_err;
668
669         memset(status_blk, 0, bp->status_stats_size);
670
671         bnapi = &bp->bnx2_napi[0];
672         bnapi->status_blk.msi = status_blk;
673         bnapi->hw_tx_cons_ptr =
674                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
675         bnapi->hw_rx_cons_ptr =
676                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
677         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
678                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
679                         struct status_block_msix *sblk;
680
681                         bnapi = &bp->bnx2_napi[i];
682
683                         sblk = (void *) (status_blk +
684                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
685                         bnapi->status_blk.msix = sblk;
686                         bnapi->hw_tx_cons_ptr =
687                                 &sblk->status_tx_quick_consumer_index;
688                         bnapi->hw_rx_cons_ptr =
689                                 &sblk->status_rx_quick_consumer_index;
690                         bnapi->int_num = i << 24;
691                 }
692         }
693
694         bp->stats_blk = status_blk + status_blk_size;
695
696         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
697
698         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
699                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
700                 if (bp->ctx_pages == 0)
701                         bp->ctx_pages = 1;
702                 for (i = 0; i < bp->ctx_pages; i++) {
703                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
704                                                 BCM_PAGE_SIZE,
705                                                 &bp->ctx_blk_mapping[i]);
706                         if (bp->ctx_blk[i] == NULL)
707                                 goto alloc_mem_err;
708                 }
709         }
710
711         err = bnx2_alloc_rx_mem(bp);
712         if (err)
713                 goto alloc_mem_err;
714
715         err = bnx2_alloc_tx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         return 0;
720
721 alloc_mem_err:
722         bnx2_free_mem(bp);
723         return -ENOMEM;
724 }
725
726 static void
727 bnx2_report_fw_link(struct bnx2 *bp)
728 {
729         u32 fw_link_status = 0;
730
731         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
732                 return;
733
734         if (bp->link_up) {
735                 u32 bmsr;
736
737                 switch (bp->line_speed) {
738                 case SPEED_10:
739                         if (bp->duplex == DUPLEX_HALF)
740                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
741                         else
742                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
743                         break;
744                 case SPEED_100:
745                         if (bp->duplex == DUPLEX_HALF)
746                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
747                         else
748                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
749                         break;
750                 case SPEED_1000:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
755                         break;
756                 case SPEED_2500:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
761                         break;
762                 }
763
764                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
765
766                 if (bp->autoneg) {
767                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
768
769                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
770                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
771
772                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
773                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
774                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
775                         else
776                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
777                 }
778         }
779         else
780                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
781
782         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
783 }
784
785 static char *
786 bnx2_xceiver_str(struct bnx2 *bp)
787 {
788         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
789                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
790                  "Copper"));
791 }
792
793 static void
794 bnx2_report_link(struct bnx2 *bp)
795 {
796         if (bp->link_up) {
797                 netif_carrier_on(bp->dev);
798                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
799                        bnx2_xceiver_str(bp));
800
801                 printk("%d Mbps ", bp->line_speed);
802
803                 if (bp->duplex == DUPLEX_FULL)
804                         printk("full duplex");
805                 else
806                         printk("half duplex");
807
808                 if (bp->flow_ctrl) {
809                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
810                                 printk(", receive ");
811                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
812                                         printk("& transmit ");
813                         }
814                         else {
815                                 printk(", transmit ");
816                         }
817                         printk("flow control ON");
818                 }
819                 printk("\n");
820         }
821         else {
822                 netif_carrier_off(bp->dev);
823                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
824                        bnx2_xceiver_str(bp));
825         }
826
827         bnx2_report_fw_link(bp);
828 }
829
830 static void
831 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
832 {
833         u32 local_adv, remote_adv;
834
835         bp->flow_ctrl = 0;
836         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
837                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
838
839                 if (bp->duplex == DUPLEX_FULL) {
840                         bp->flow_ctrl = bp->req_flow_ctrl;
841                 }
842                 return;
843         }
844
845         if (bp->duplex != DUPLEX_FULL) {
846                 return;
847         }
848
849         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
850             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
851                 u32 val;
852
853                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
854                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
855                         bp->flow_ctrl |= FLOW_CTRL_TX;
856                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
857                         bp->flow_ctrl |= FLOW_CTRL_RX;
858                 return;
859         }
860
861         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
862         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863
864         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
865                 u32 new_local_adv = 0;
866                 u32 new_remote_adv = 0;
867
868                 if (local_adv & ADVERTISE_1000XPAUSE)
869                         new_local_adv |= ADVERTISE_PAUSE_CAP;
870                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
871                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
872                 if (remote_adv & ADVERTISE_1000XPAUSE)
873                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
874                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
876
877                 local_adv = new_local_adv;
878                 remote_adv = new_remote_adv;
879         }
880
881         /* See Table 28B-3 of 802.3ab-1999 spec. */
882         if (local_adv & ADVERTISE_PAUSE_CAP) {
883                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
884                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
885                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
886                         }
887                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
888                                 bp->flow_ctrl = FLOW_CTRL_RX;
889                         }
890                 }
891                 else {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                 }
896         }
897         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
898                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
899                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
900
901                         bp->flow_ctrl = FLOW_CTRL_TX;
902                 }
903         }
904 }
905
906 static int
907 bnx2_5709s_linkup(struct bnx2 *bp)
908 {
909         u32 val, speed;
910
911         bp->link_up = 1;
912
913         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
914         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
915         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
916
917         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
918                 bp->line_speed = bp->req_line_speed;
919                 bp->duplex = bp->req_duplex;
920                 return 0;
921         }
922         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
923         switch (speed) {
924                 case MII_BNX2_GP_TOP_AN_SPEED_10:
925                         bp->line_speed = SPEED_10;
926                         break;
927                 case MII_BNX2_GP_TOP_AN_SPEED_100:
928                         bp->line_speed = SPEED_100;
929                         break;
930                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
931                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
932                         bp->line_speed = SPEED_1000;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
935                         bp->line_speed = SPEED_2500;
936                         break;
937         }
938         if (val & MII_BNX2_GP_TOP_AN_FD)
939                 bp->duplex = DUPLEX_FULL;
940         else
941                 bp->duplex = DUPLEX_HALF;
942         return 0;
943 }
944
945 static int
946 bnx2_5708s_linkup(struct bnx2 *bp)
947 {
948         u32 val;
949
950         bp->link_up = 1;
951         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
952         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
953                 case BCM5708S_1000X_STAT1_SPEED_10:
954                         bp->line_speed = SPEED_10;
955                         break;
956                 case BCM5708S_1000X_STAT1_SPEED_100:
957                         bp->line_speed = SPEED_100;
958                         break;
959                 case BCM5708S_1000X_STAT1_SPEED_1G:
960                         bp->line_speed = SPEED_1000;
961                         break;
962                 case BCM5708S_1000X_STAT1_SPEED_2G5:
963                         bp->line_speed = SPEED_2500;
964                         break;
965         }
966         if (val & BCM5708S_1000X_STAT1_FD)
967                 bp->duplex = DUPLEX_FULL;
968         else
969                 bp->duplex = DUPLEX_HALF;
970
971         return 0;
972 }
973
974 static int
975 bnx2_5706s_linkup(struct bnx2 *bp)
976 {
977         u32 bmcr, local_adv, remote_adv, common;
978
979         bp->link_up = 1;
980         bp->line_speed = SPEED_1000;
981
982         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
983         if (bmcr & BMCR_FULLDPLX) {
984                 bp->duplex = DUPLEX_FULL;
985         }
986         else {
987                 bp->duplex = DUPLEX_HALF;
988         }
989
990         if (!(bmcr & BMCR_ANENABLE)) {
991                 return 0;
992         }
993
994         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
995         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
996
997         common = local_adv & remote_adv;
998         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
999
1000                 if (common & ADVERTISE_1000XFULL) {
1001                         bp->duplex = DUPLEX_FULL;
1002                 }
1003                 else {
1004                         bp->duplex = DUPLEX_HALF;
1005                 }
1006         }
1007
1008         return 0;
1009 }
1010
1011 static int
1012 bnx2_copper_linkup(struct bnx2 *bp)
1013 {
1014         u32 bmcr;
1015
1016         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1017         if (bmcr & BMCR_ANENABLE) {
1018                 u32 local_adv, remote_adv, common;
1019
1020                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1021                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1022
1023                 common = local_adv & (remote_adv >> 2);
1024                 if (common & ADVERTISE_1000FULL) {
1025                         bp->line_speed = SPEED_1000;
1026                         bp->duplex = DUPLEX_FULL;
1027                 }
1028                 else if (common & ADVERTISE_1000HALF) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_HALF;
1031                 }
1032                 else {
1033                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1035
1036                         common = local_adv & remote_adv;
1037                         if (common & ADVERTISE_100FULL) {
1038                                 bp->line_speed = SPEED_100;
1039                                 bp->duplex = DUPLEX_FULL;
1040                         }
1041                         else if (common & ADVERTISE_100HALF) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_HALF;
1044                         }
1045                         else if (common & ADVERTISE_10FULL) {
1046                                 bp->line_speed = SPEED_10;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_10HALF) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else {
1054                                 bp->line_speed = 0;
1055                                 bp->link_up = 0;
1056                         }
1057                 }
1058         }
1059         else {
1060                 if (bmcr & BMCR_SPEED100) {
1061                         bp->line_speed = SPEED_100;
1062                 }
1063                 else {
1064                         bp->line_speed = SPEED_10;
1065                 }
1066                 if (bmcr & BMCR_FULLDPLX) {
1067                         bp->duplex = DUPLEX_FULL;
1068                 }
1069                 else {
1070                         bp->duplex = DUPLEX_HALF;
1071                 }
1072         }
1073
1074         return 0;
1075 }
1076
1077 static void
1078 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1079 {
1080         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1081
1082         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1083         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1084         val |= 0x02 << 8;
1085
1086         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1087                 u32 lo_water, hi_water;
1088
1089                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1090                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1091                 else
1092                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1093                 if (lo_water >= bp->rx_ring_size)
1094                         lo_water = 0;
1095
1096                 hi_water = bp->rx_ring_size / 4;
1097
1098                 if (hi_water <= lo_water)
1099                         lo_water = 0;
1100
1101                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1102                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1103
1104                 if (hi_water > 0xf)
1105                         hi_water = 0xf;
1106                 else if (hi_water == 0)
1107                         lo_water = 0;
1108                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1109         }
1110         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1111 }
1112
1113 static void
1114 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1115 {
1116         int i;
1117         u32 cid;
1118
1119         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1120                 if (i == 1)
1121                         cid = RX_RSS_CID;
1122                 bnx2_init_rx_context(bp, cid);
1123         }
1124 }
1125
1126 static int
1127 bnx2_set_mac_link(struct bnx2 *bp)
1128 {
1129         u32 val;
1130
1131         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1132         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1133                 (bp->duplex == DUPLEX_HALF)) {
1134                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1135         }
1136
1137         /* Configure the EMAC mode register. */
1138         val = REG_RD(bp, BNX2_EMAC_MODE);
1139
1140         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1141                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1142                 BNX2_EMAC_MODE_25G_MODE);
1143
1144         if (bp->link_up) {
1145                 switch (bp->line_speed) {
1146                         case SPEED_10:
1147                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1148                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1149                                         break;
1150                                 }
1151                                 /* fall through */
1152                         case SPEED_100:
1153                                 val |= BNX2_EMAC_MODE_PORT_MII;
1154                                 break;
1155                         case SPEED_2500:
1156                                 val |= BNX2_EMAC_MODE_25G_MODE;
1157                                 /* fall through */
1158                         case SPEED_1000:
1159                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1160                                 break;
1161                 }
1162         }
1163         else {
1164                 val |= BNX2_EMAC_MODE_PORT_GMII;
1165         }
1166
1167         /* Set the MAC to operate in the appropriate duplex mode. */
1168         if (bp->duplex == DUPLEX_HALF)
1169                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1170         REG_WR(bp, BNX2_EMAC_MODE, val);
1171
1172         /* Enable/disable rx PAUSE. */
1173         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1174
1175         if (bp->flow_ctrl & FLOW_CTRL_RX)
1176                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1177         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1178
1179         /* Enable/disable tx PAUSE. */
1180         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1181         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_TX)
1184                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1186
1187         /* Acknowledge the interrupt. */
1188         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1189
1190         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191                 bnx2_init_all_rx_contexts(bp);
1192
1193         return 0;
1194 }
1195
1196 static void
1197 bnx2_enable_bmsr1(struct bnx2 *bp)
1198 {
1199         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1200             (CHIP_NUM(bp) == CHIP_NUM_5709))
1201                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202                                MII_BNX2_BLK_ADDR_GP_STATUS);
1203 }
1204
1205 static void
1206 bnx2_disable_bmsr1(struct bnx2 *bp)
1207 {
1208         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1209             (CHIP_NUM(bp) == CHIP_NUM_5709))
1210                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1211                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1212 }
1213
1214 static int
1215 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1216 {
1217         u32 up1;
1218         int ret = 1;
1219
1220         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1221                 return 0;
1222
1223         if (bp->autoneg & AUTONEG_SPEED)
1224                 bp->advertising |= ADVERTISED_2500baseX_Full;
1225
1226         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1227                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1228
1229         bnx2_read_phy(bp, bp->mii_up1, &up1);
1230         if (!(up1 & BCM5708S_UP1_2G5)) {
1231                 up1 |= BCM5708S_UP1_2G5;
1232                 bnx2_write_phy(bp, bp->mii_up1, up1);
1233                 ret = 0;
1234         }
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1238                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1239
1240         return ret;
1241 }
1242
1243 static int
1244 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1245 {
1246         u32 up1;
1247         int ret = 0;
1248
1249         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1250                 return 0;
1251
1252         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1253                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1254
1255         bnx2_read_phy(bp, bp->mii_up1, &up1);
1256         if (up1 & BCM5708S_UP1_2G5) {
1257                 up1 &= ~BCM5708S_UP1_2G5;
1258                 bnx2_write_phy(bp, bp->mii_up1, up1);
1259                 ret = 1;
1260         }
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1264                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1265
1266         return ret;
1267 }
1268
1269 static void
1270 bnx2_enable_forced_2g5(struct bnx2 *bp)
1271 {
1272         u32 bmcr;
1273
1274         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1275                 return;
1276
1277         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1278                 u32 val;
1279
1280                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1281                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1282                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1283                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1284                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1285                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1286
1287                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1288                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1289                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1290
1291         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1294         }
1295
1296         if (bp->autoneg & AUTONEG_SPEED) {
1297                 bmcr &= ~BMCR_ANENABLE;
1298                 if (bp->req_duplex == DUPLEX_FULL)
1299                         bmcr |= BMCR_FULLDPLX;
1300         }
1301         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1302 }
1303
1304 static void
1305 bnx2_disable_forced_2g5(struct bnx2 *bp)
1306 {
1307         u32 bmcr;
1308
1309         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1310                 return;
1311
1312         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1313                 u32 val;
1314
1315                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1316                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1317                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1318                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1319                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1323                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1324
1325         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1326                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1327                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1328         }
1329
1330         if (bp->autoneg & AUTONEG_SPEED)
1331                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1332         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1333 }
1334
1335 static void
1336 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1337 {
1338         u32 val;
1339
1340         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1341         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1342         if (start)
1343                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1344         else
1345                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1346 }
1347
1348 static int
1349 bnx2_set_link(struct bnx2 *bp)
1350 {
1351         u32 bmsr;
1352         u8 link_up;
1353
1354         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1355                 bp->link_up = 1;
1356                 return 0;
1357         }
1358
1359         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1360                 return 0;
1361
1362         link_up = bp->link_up;
1363
1364         bnx2_enable_bmsr1(bp);
1365         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1366         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1367         bnx2_disable_bmsr1(bp);
1368
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1371                 u32 val, an_dbg;
1372
1373                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1374                         bnx2_5706s_force_link_dn(bp, 0);
1375                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1376                 }
1377                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1378
1379                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1380                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1381                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1382
1383                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1384                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1385                         bmsr |= BMSR_LSTATUS;
1386                 else
1387                         bmsr &= ~BMSR_LSTATUS;
1388         }
1389
1390         if (bmsr & BMSR_LSTATUS) {
1391                 bp->link_up = 1;
1392
1393                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1394                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                                 bnx2_5706s_linkup(bp);
1396                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                                 bnx2_5708s_linkup(bp);
1398                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                                 bnx2_5709s_linkup(bp);
1400                 }
1401                 else {
1402                         bnx2_copper_linkup(bp);
1403                 }
1404                 bnx2_resolve_flow_ctrl(bp);
1405         }
1406         else {
1407                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1408                     (bp->autoneg & AUTONEG_SPEED))
1409                         bnx2_disable_forced_2g5(bp);
1410
1411                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1412                         u32 bmcr;
1413
1414                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1415                         bmcr |= BMCR_ANENABLE;
1416                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1417
1418                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1419                 }
1420                 bp->link_up = 0;
1421         }
1422
1423         if (bp->link_up != link_up) {
1424                 bnx2_report_link(bp);
1425         }
1426
1427         bnx2_set_mac_link(bp);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 bnx2_reset_phy(struct bnx2 *bp)
1434 {
1435         int i;
1436         u32 reg;
1437
1438         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1439
1440 #define PHY_RESET_MAX_WAIT 100
1441         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1442                 udelay(10);
1443
1444                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1445                 if (!(reg & BMCR_RESET)) {
1446                         udelay(20);
1447                         break;
1448                 }
1449         }
1450         if (i == PHY_RESET_MAX_WAIT) {
1451                 return -EBUSY;
1452         }
1453         return 0;
1454 }
1455
1456 static u32
1457 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1458 {
1459         u32 adv = 0;
1460
1461         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1462                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1463
1464                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1465                         adv = ADVERTISE_1000XPAUSE;
1466                 }
1467                 else {
1468                         adv = ADVERTISE_PAUSE_CAP;
1469                 }
1470         }
1471         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1472                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1473                         adv = ADVERTISE_1000XPSE_ASYM;
1474                 }
1475                 else {
1476                         adv = ADVERTISE_PAUSE_ASYM;
1477                 }
1478         }
1479         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1480                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1481                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1482                 }
1483                 else {
1484                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1485                 }
1486         }
1487         return adv;
1488 }
1489
1490 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1491
1492 static int
1493 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1494 {
1495         u32 speed_arg = 0, pause_adv;
1496
1497         pause_adv = bnx2_phy_get_pause_adv(bp);
1498
1499         if (bp->autoneg & AUTONEG_SPEED) {
1500                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1501                 if (bp->advertising & ADVERTISED_10baseT_Half)
1502                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1503                 if (bp->advertising & ADVERTISED_10baseT_Full)
1504                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1505                 if (bp->advertising & ADVERTISED_100baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1507                 if (bp->advertising & ADVERTISED_100baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1509                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1511                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1513         } else {
1514                 if (bp->req_line_speed == SPEED_2500)
1515                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1516                 else if (bp->req_line_speed == SPEED_1000)
1517                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1518                 else if (bp->req_line_speed == SPEED_100) {
1519                         if (bp->req_duplex == DUPLEX_FULL)
1520                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                         else
1522                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1523                 } else if (bp->req_line_speed == SPEED_10) {
1524                         if (bp->req_duplex == DUPLEX_FULL)
1525                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1526                         else
1527                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1528                 }
1529         }
1530
1531         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1532                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1533         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1534                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1535
1536         if (port == PORT_TP)
1537                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1538                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1539
1540         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1541
1542         spin_unlock_bh(&bp->phy_lock);
1543         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1544         spin_lock_bh(&bp->phy_lock);
1545
1546         return 0;
1547 }
1548
1549 static int
1550 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1551 {
1552         u32 adv, bmcr;
1553         u32 new_adv = 0;
1554
1555         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556                 return (bnx2_setup_remote_phy(bp, port));
1557
1558         if (!(bp->autoneg & AUTONEG_SPEED)) {
1559                 u32 new_bmcr;
1560                 int force_link_down = 0;
1561
1562                 if (bp->req_line_speed == SPEED_2500) {
1563                         if (!bnx2_test_and_enable_2g5(bp))
1564                                 force_link_down = 1;
1565                 } else if (bp->req_line_speed == SPEED_1000) {
1566                         if (bnx2_test_and_disable_2g5(bp))
1567                                 force_link_down = 1;
1568                 }
1569                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1570                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1571
1572                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1573                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1574                 new_bmcr |= BMCR_SPEED1000;
1575
1576                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1577                         if (bp->req_line_speed == SPEED_2500)
1578                                 bnx2_enable_forced_2g5(bp);
1579                         else if (bp->req_line_speed == SPEED_1000) {
1580                                 bnx2_disable_forced_2g5(bp);
1581                                 new_bmcr &= ~0x2000;
1582                         }
1583
1584                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1585                         if (bp->req_line_speed == SPEED_2500)
1586                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1587                         else
1588                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1589                 }
1590
1591                 if (bp->req_duplex == DUPLEX_FULL) {
1592                         adv |= ADVERTISE_1000XFULL;
1593                         new_bmcr |= BMCR_FULLDPLX;
1594                 }
1595                 else {
1596                         adv |= ADVERTISE_1000XHALF;
1597                         new_bmcr &= ~BMCR_FULLDPLX;
1598                 }
1599                 if ((new_bmcr != bmcr) || (force_link_down)) {
1600                         /* Force a link down visible on the other side */
1601                         if (bp->link_up) {
1602                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1603                                                ~(ADVERTISE_1000XFULL |
1604                                                  ADVERTISE_1000XHALF));
1605                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1606                                         BMCR_ANRESTART | BMCR_ANENABLE);
1607
1608                                 bp->link_up = 0;
1609                                 netif_carrier_off(bp->dev);
1610                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1611                                 bnx2_report_link(bp);
1612                         }
1613                         bnx2_write_phy(bp, bp->mii_adv, adv);
1614                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                 } else {
1616                         bnx2_resolve_flow_ctrl(bp);
1617                         bnx2_set_mac_link(bp);
1618                 }
1619                 return 0;
1620         }
1621
1622         bnx2_test_and_enable_2g5(bp);
1623
1624         if (bp->advertising & ADVERTISED_1000baseT_Full)
1625                 new_adv |= ADVERTISE_1000XFULL;
1626
1627         new_adv |= bnx2_phy_get_pause_adv(bp);
1628
1629         bnx2_read_phy(bp, bp->mii_adv, &adv);
1630         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1631
1632         bp->serdes_an_pending = 0;
1633         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1634                 /* Force a link down visible on the other side */
1635                 if (bp->link_up) {
1636                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1637                         spin_unlock_bh(&bp->phy_lock);
1638                         msleep(20);
1639                         spin_lock_bh(&bp->phy_lock);
1640                 }
1641
1642                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1643                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1644                         BMCR_ANENABLE);
1645                 /* Speed up link-up time when the link partner
1646                  * does not autonegotiate which is very common
1647                  * in blade servers. Some blade servers use
1648                  * IPMI for kerboard input and it's important
1649                  * to minimize link disruptions. Autoneg. involves
1650                  * exchanging base pages plus 3 next pages and
1651                  * normally completes in about 120 msec.
1652                  */
1653                 bp->current_interval = SERDES_AN_TIMEOUT;
1654                 bp->serdes_an_pending = 1;
1655                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1656         } else {
1657                 bnx2_resolve_flow_ctrl(bp);
1658                 bnx2_set_mac_link(bp);
1659         }
1660
1661         return 0;
1662 }
1663
1664 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1665         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1666                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1667                 (ADVERTISED_1000baseT_Full)
1668
1669 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1670         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1671         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1672         ADVERTISED_1000baseT_Full)
1673
1674 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1675         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1676
1677 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1678
1679 static void
1680 bnx2_set_default_remote_link(struct bnx2 *bp)
1681 {
1682         u32 link;
1683
1684         if (bp->phy_port == PORT_TP)
1685                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1686         else
1687                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1688
1689         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1690                 bp->req_line_speed = 0;
1691                 bp->autoneg |= AUTONEG_SPEED;
1692                 bp->advertising = ADVERTISED_Autoneg;
1693                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1694                         bp->advertising |= ADVERTISED_10baseT_Half;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1696                         bp->advertising |= ADVERTISED_10baseT_Full;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1698                         bp->advertising |= ADVERTISED_100baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1700                         bp->advertising |= ADVERTISED_100baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1702                         bp->advertising |= ADVERTISED_1000baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1704                         bp->advertising |= ADVERTISED_2500baseX_Full;
1705         } else {
1706                 bp->autoneg = 0;
1707                 bp->advertising = 0;
1708                 bp->req_duplex = DUPLEX_FULL;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1710                         bp->req_line_speed = SPEED_10;
1711                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1712                                 bp->req_duplex = DUPLEX_HALF;
1713                 }
1714                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1715                         bp->req_line_speed = SPEED_100;
1716                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1717                                 bp->req_duplex = DUPLEX_HALF;
1718                 }
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1720                         bp->req_line_speed = SPEED_1000;
1721                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1722                         bp->req_line_speed = SPEED_2500;
1723         }
1724 }
1725
1726 static void
1727 bnx2_set_default_link(struct bnx2 *bp)
1728 {
1729         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1730                 bnx2_set_default_remote_link(bp);
1731                 return;
1732         }
1733
1734         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1735         bp->req_line_speed = 0;
1736         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1737                 u32 reg;
1738
1739                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1740
1741                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1742                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1743                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1744                         bp->autoneg = 0;
1745                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1746                         bp->req_duplex = DUPLEX_FULL;
1747                 }
1748         } else
1749                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1750 }
1751
1752 static void
1753 bnx2_send_heart_beat(struct bnx2 *bp)
1754 {
1755         u32 msg;
1756         u32 addr;
1757
1758         spin_lock(&bp->indirect_lock);
1759         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1760         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1761         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1762         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1763         spin_unlock(&bp->indirect_lock);
1764 }
1765
1766 static void
1767 bnx2_remote_phy_event(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u8 link_up = bp->link_up;
1771         u8 old_port;
1772
1773         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1774
1775         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1776                 bnx2_send_heart_beat(bp);
1777
1778         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1779
1780         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1781                 bp->link_up = 0;
1782         else {
1783                 u32 speed;
1784
1785                 bp->link_up = 1;
1786                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1787                 bp->duplex = DUPLEX_FULL;
1788                 switch (speed) {
1789                         case BNX2_LINK_STATUS_10HALF:
1790                                 bp->duplex = DUPLEX_HALF;
1791                         case BNX2_LINK_STATUS_10FULL:
1792                                 bp->line_speed = SPEED_10;
1793                                 break;
1794                         case BNX2_LINK_STATUS_100HALF:
1795                                 bp->duplex = DUPLEX_HALF;
1796                         case BNX2_LINK_STATUS_100BASE_T4:
1797                         case BNX2_LINK_STATUS_100FULL:
1798                                 bp->line_speed = SPEED_100;
1799                                 break;
1800                         case BNX2_LINK_STATUS_1000HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_1000FULL:
1803                                 bp->line_speed = SPEED_1000;
1804                                 break;
1805                         case BNX2_LINK_STATUS_2500HALF:
1806                                 bp->duplex = DUPLEX_HALF;
1807                         case BNX2_LINK_STATUS_2500FULL:
1808                                 bp->line_speed = SPEED_2500;
1809                                 break;
1810                         default:
1811                                 bp->line_speed = 0;
1812                                 break;
1813                 }
1814
1815                 bp->flow_ctrl = 0;
1816                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1817                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1818                         if (bp->duplex == DUPLEX_FULL)
1819                                 bp->flow_ctrl = bp->req_flow_ctrl;
1820                 } else {
1821                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1822                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1823                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1824                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1825                 }
1826
1827                 old_port = bp->phy_port;
1828                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1829                         bp->phy_port = PORT_FIBRE;
1830                 else
1831                         bp->phy_port = PORT_TP;
1832
1833                 if (old_port != bp->phy_port)
1834                         bnx2_set_default_link(bp);
1835
1836         }
1837         if (bp->link_up != link_up)
1838                 bnx2_report_link(bp);
1839
1840         bnx2_set_mac_link(bp);
1841 }
1842
1843 static int
1844 bnx2_set_remote_link(struct bnx2 *bp)
1845 {
1846         u32 evt_code;
1847
1848         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1849         switch (evt_code) {
1850                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1851                         bnx2_remote_phy_event(bp);
1852                         break;
1853                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1854                 default:
1855                         bnx2_send_heart_beat(bp);
1856                         break;
1857         }
1858         return 0;
1859 }
1860
1861 static int
1862 bnx2_setup_copper_phy(struct bnx2 *bp)
1863 {
1864         u32 bmcr;
1865         u32 new_bmcr;
1866
1867         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1868
1869         if (bp->autoneg & AUTONEG_SPEED) {
1870                 u32 adv_reg, adv1000_reg;
1871                 u32 new_adv_reg = 0;
1872                 u32 new_adv1000_reg = 0;
1873
1874                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1875                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1876                         ADVERTISE_PAUSE_ASYM);
1877
1878                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1879                 adv1000_reg &= PHY_ALL_1000_SPEED;
1880
1881                 if (bp->advertising & ADVERTISED_10baseT_Half)
1882                         new_adv_reg |= ADVERTISE_10HALF;
1883                 if (bp->advertising & ADVERTISED_10baseT_Full)
1884                         new_adv_reg |= ADVERTISE_10FULL;
1885                 if (bp->advertising & ADVERTISED_100baseT_Half)
1886                         new_adv_reg |= ADVERTISE_100HALF;
1887                 if (bp->advertising & ADVERTISED_100baseT_Full)
1888                         new_adv_reg |= ADVERTISE_100FULL;
1889                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1890                         new_adv1000_reg |= ADVERTISE_1000FULL;
1891
1892                 new_adv_reg |= ADVERTISE_CSMA;
1893
1894                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1895
1896                 if ((adv1000_reg != new_adv1000_reg) ||
1897                         (adv_reg != new_adv_reg) ||
1898                         ((bmcr & BMCR_ANENABLE) == 0)) {
1899
1900                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1901                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1902                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1903                                 BMCR_ANENABLE);
1904                 }
1905                 else if (bp->link_up) {
1906                         /* Flow ctrl may have changed from auto to forced */
1907                         /* or vice-versa. */
1908
1909                         bnx2_resolve_flow_ctrl(bp);
1910                         bnx2_set_mac_link(bp);
1911                 }
1912                 return 0;
1913         }
1914
1915         new_bmcr = 0;
1916         if (bp->req_line_speed == SPEED_100) {
1917                 new_bmcr |= BMCR_SPEED100;
1918         }
1919         if (bp->req_duplex == DUPLEX_FULL) {
1920                 new_bmcr |= BMCR_FULLDPLX;
1921         }
1922         if (new_bmcr != bmcr) {
1923                 u32 bmsr;
1924
1925                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1926                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1927
1928                 if (bmsr & BMSR_LSTATUS) {
1929                         /* Force link down */
1930                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1931                         spin_unlock_bh(&bp->phy_lock);
1932                         msleep(50);
1933                         spin_lock_bh(&bp->phy_lock);
1934
1935                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1936                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1937                 }
1938
1939                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1940
1941                 /* Normally, the new speed is setup after the link has
1942                  * gone down and up again. In some cases, link will not go
1943                  * down so we need to set up the new speed here.
1944                  */
1945                 if (bmsr & BMSR_LSTATUS) {
1946                         bp->line_speed = bp->req_line_speed;
1947                         bp->duplex = bp->req_duplex;
1948                         bnx2_resolve_flow_ctrl(bp);
1949                         bnx2_set_mac_link(bp);
1950                 }
1951         } else {
1952                 bnx2_resolve_flow_ctrl(bp);
1953                 bnx2_set_mac_link(bp);
1954         }
1955         return 0;
1956 }
1957
1958 static int
1959 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1960 {
1961         if (bp->loopback == MAC_LOOPBACK)
1962                 return 0;
1963
1964         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1965                 return (bnx2_setup_serdes_phy(bp, port));
1966         }
1967         else {
1968                 return (bnx2_setup_copper_phy(bp));
1969         }
1970 }
1971
1972 static int
1973 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1974 {
1975         u32 val;
1976
1977         bp->mii_bmcr = MII_BMCR + 0x10;
1978         bp->mii_bmsr = MII_BMSR + 0x10;
1979         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1980         bp->mii_adv = MII_ADVERTISE + 0x10;
1981         bp->mii_lpa = MII_LPA + 0x10;
1982         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1983
1984         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1985         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1986
1987         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1988         if (reset_phy)
1989                 bnx2_reset_phy(bp);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1992
1993         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1994         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1995         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1996         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1997
1998         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1999         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2000         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2001                 val |= BCM5708S_UP1_2G5;
2002         else
2003                 val &= ~BCM5708S_UP1_2G5;
2004         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2005
2006         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2007         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2008         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2009         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2010
2011         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2012
2013         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2014               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2015         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2018
2019         return 0;
2020 }
2021
2022 static int
2023 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2024 {
2025         u32 val;
2026
2027         if (reset_phy)
2028                 bnx2_reset_phy(bp);
2029
2030         bp->mii_up1 = BCM5708S_UP1;
2031
2032         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2033         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2034         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2035
2036         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2037         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2038         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2041         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2045                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2046                 val |= BCM5708S_UP1_2G5;
2047                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2048         }
2049
2050         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2051             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2052             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2053                 /* increase tx signal amplitude */
2054                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2055                                BCM5708S_BLK_ADDR_TX_MISC);
2056                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2057                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2058                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2059                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2060         }
2061
2062         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2063               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2064
2065         if (val) {
2066                 u32 is_backplane;
2067
2068                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2069                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2070                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2071                                        BCM5708S_BLK_ADDR_TX_MISC);
2072                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2073                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074                                        BCM5708S_BLK_ADDR_DIG);
2075                 }
2076         }
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2082 {
2083         if (reset_phy)
2084                 bnx2_reset_phy(bp);
2085
2086         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2087
2088         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2089                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2090
2091         if (bp->dev->mtu > 1500) {
2092                 u32 val;
2093
2094                 /* Set extended packet length bit */
2095                 bnx2_write_phy(bp, 0x18, 0x7);
2096                 bnx2_read_phy(bp, 0x18, &val);
2097                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2098
2099                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2100                 bnx2_read_phy(bp, 0x1c, &val);
2101                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2102         }
2103         else {
2104                 u32 val;
2105
2106                 bnx2_write_phy(bp, 0x18, 0x7);
2107                 bnx2_read_phy(bp, 0x18, &val);
2108                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2109
2110                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2111                 bnx2_read_phy(bp, 0x1c, &val);
2112                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2113         }
2114
2115         return 0;
2116 }
2117
2118 static int
2119 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2120 {
2121         u32 val;
2122
2123         if (reset_phy)
2124                 bnx2_reset_phy(bp);
2125
2126         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2127                 bnx2_write_phy(bp, 0x18, 0x0c00);
2128                 bnx2_write_phy(bp, 0x17, 0x000a);
2129                 bnx2_write_phy(bp, 0x15, 0x310b);
2130                 bnx2_write_phy(bp, 0x17, 0x201f);
2131                 bnx2_write_phy(bp, 0x15, 0x9506);
2132                 bnx2_write_phy(bp, 0x17, 0x401f);
2133                 bnx2_write_phy(bp, 0x15, 0x14e2);
2134                 bnx2_write_phy(bp, 0x18, 0x0400);
2135         }
2136
2137         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2138                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2139                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2140                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2141                 val &= ~(1 << 8);
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2143         }
2144
2145         if (bp->dev->mtu > 1500) {
2146                 /* Set extended packet length bit */
2147                 bnx2_write_phy(bp, 0x18, 0x7);
2148                 bnx2_read_phy(bp, 0x18, &val);
2149                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2150
2151                 bnx2_read_phy(bp, 0x10, &val);
2152                 bnx2_write_phy(bp, 0x10, val | 0x1);
2153         }
2154         else {
2155                 bnx2_write_phy(bp, 0x18, 0x7);
2156                 bnx2_read_phy(bp, 0x18, &val);
2157                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2158
2159                 bnx2_read_phy(bp, 0x10, &val);
2160                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2161         }
2162
2163         /* ethernet@wirespeed */
2164         bnx2_write_phy(bp, 0x18, 0x7007);
2165         bnx2_read_phy(bp, 0x18, &val);
2166         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2167         return 0;
2168 }
2169
2170
2171 static int
2172 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2173 {
2174         u32 val;
2175         int rc = 0;
2176
2177         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2178         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2179
2180         bp->mii_bmcr = MII_BMCR;
2181         bp->mii_bmsr = MII_BMSR;
2182         bp->mii_bmsr1 = MII_BMSR;
2183         bp->mii_adv = MII_ADVERTISE;
2184         bp->mii_lpa = MII_LPA;
2185
2186         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2187
2188         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2189                 goto setup_phy;
2190
2191         bnx2_read_phy(bp, MII_PHYSID1, &val);
2192         bp->phy_id = val << 16;
2193         bnx2_read_phy(bp, MII_PHYSID2, &val);
2194         bp->phy_id |= val & 0xffff;
2195
2196         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2197                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2198                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2199                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2200                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2201                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2202                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2203         }
2204         else {
2205                 rc = bnx2_init_copper_phy(bp, reset_phy);
2206         }
2207
2208 setup_phy:
2209         if (!rc)
2210                 rc = bnx2_setup_phy(bp, bp->phy_port);
2211
2212         return rc;
2213 }
2214
2215 static int
2216 bnx2_set_mac_loopback(struct bnx2 *bp)
2217 {
2218         u32 mac_mode;
2219
2220         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2221         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2222         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2223         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2224         bp->link_up = 1;
2225         return 0;
2226 }
2227
2228 static int bnx2_test_link(struct bnx2 *);
2229
2230 static int
2231 bnx2_set_phy_loopback(struct bnx2 *bp)
2232 {
2233         u32 mac_mode;
2234         int rc, i;
2235
2236         spin_lock_bh(&bp->phy_lock);
2237         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2238                             BMCR_SPEED1000);
2239         spin_unlock_bh(&bp->phy_lock);
2240         if (rc)
2241                 return rc;
2242
2243         for (i = 0; i < 10; i++) {
2244                 if (bnx2_test_link(bp) == 0)
2245                         break;
2246                 msleep(100);
2247         }
2248
2249         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2250         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2251                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2252                       BNX2_EMAC_MODE_25G_MODE);
2253
2254         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2255         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2256         bp->link_up = 1;
2257         return 0;
2258 }
2259
2260 static int
2261 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2262 {
2263         int i;
2264         u32 val;
2265
2266         bp->fw_wr_seq++;
2267         msg_data |= bp->fw_wr_seq;
2268
2269         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2270
2271         /* wait for an acknowledgement. */
2272         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2273                 msleep(10);
2274
2275                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2276
2277                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2278                         break;
2279         }
2280         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2281                 return 0;
2282
2283         /* If we timed out, inform the firmware that this is the case. */
2284         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2285                 if (!silent)
2286                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2287                                             "%x\n", msg_data);
2288
2289                 msg_data &= ~BNX2_DRV_MSG_CODE;
2290                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2291
2292                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2293
2294                 return -EBUSY;
2295         }
2296
2297         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2298                 return -EIO;
2299
2300         return 0;
2301 }
2302
2303 static int
2304 bnx2_init_5709_context(struct bnx2 *bp)
2305 {
2306         int i, ret = 0;
2307         u32 val;
2308
2309         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2310         val |= (BCM_PAGE_BITS - 8) << 16;
2311         REG_WR(bp, BNX2_CTX_COMMAND, val);
2312         for (i = 0; i < 10; i++) {
2313                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2314                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2315                         break;
2316                 udelay(2);
2317         }
2318         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2319                 return -EBUSY;
2320
2321         for (i = 0; i < bp->ctx_pages; i++) {
2322                 int j;
2323
2324                 if (bp->ctx_blk[i])
2325                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2326                 else
2327                         return -ENOMEM;
2328
2329                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2330                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2331                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2332                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2333                        (u64) bp->ctx_blk_mapping[i] >> 32);
2334                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2335                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2336                 for (j = 0; j < 10; j++) {
2337
2338                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2339                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2340                                 break;
2341                         udelay(5);
2342                 }
2343                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2344                         ret = -EBUSY;
2345                         break;
2346                 }
2347         }
2348         return ret;
2349 }
2350
2351 static void
2352 bnx2_init_context(struct bnx2 *bp)
2353 {
2354         u32 vcid;
2355
2356         vcid = 96;
2357         while (vcid) {
2358                 u32 vcid_addr, pcid_addr, offset;
2359                 int i;
2360
2361                 vcid--;
2362
2363                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2364                         u32 new_vcid;
2365
2366                         vcid_addr = GET_PCID_ADDR(vcid);
2367                         if (vcid & 0x8) {
2368                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2369                         }
2370                         else {
2371                                 new_vcid = vcid;
2372                         }
2373                         pcid_addr = GET_PCID_ADDR(new_vcid);
2374                 }
2375                 else {
2376                         vcid_addr = GET_CID_ADDR(vcid);
2377                         pcid_addr = vcid_addr;
2378                 }
2379
2380                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2381                         vcid_addr += (i << PHY_CTX_SHIFT);
2382                         pcid_addr += (i << PHY_CTX_SHIFT);
2383
2384                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2385                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2386
2387                         /* Zero out the context. */
2388                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2389                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2390                 }
2391         }
2392 }
2393
2394 static int
2395 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2396 {
2397         u16 *good_mbuf;
2398         u32 good_mbuf_cnt;
2399         u32 val;
2400
2401         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2402         if (good_mbuf == NULL) {
2403                 printk(KERN_ERR PFX "Failed to allocate memory in "
2404                                     "bnx2_alloc_bad_rbuf\n");
2405                 return -ENOMEM;
2406         }
2407
2408         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2409                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2410
2411         good_mbuf_cnt = 0;
2412
2413         /* Allocate a bunch of mbufs and save the good ones in an array. */
2414         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2415         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2416                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2417                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2418
2419                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2420
2421                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2422
2423                 /* The addresses with Bit 9 set are bad memory blocks. */
2424                 if (!(val & (1 << 9))) {
2425                         good_mbuf[good_mbuf_cnt] = (u16) val;
2426                         good_mbuf_cnt++;
2427                 }
2428
2429                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2430         }
2431
2432         /* Free the good ones back to the mbuf pool thus discarding
2433          * all the bad ones. */
2434         while (good_mbuf_cnt) {
2435                 good_mbuf_cnt--;
2436
2437                 val = good_mbuf[good_mbuf_cnt];
2438                 val = (val << 9) | val | 1;
2439
2440                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2441         }
2442         kfree(good_mbuf);
2443         return 0;
2444 }
2445
2446 static void
2447 bnx2_set_mac_addr(struct bnx2 *bp)
2448 {
2449         u32 val;
2450         u8 *mac_addr = bp->dev->dev_addr;
2451
2452         val = (mac_addr[0] << 8) | mac_addr[1];
2453
2454         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2455
2456         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2457                 (mac_addr[4] << 8) | mac_addr[5];
2458
2459         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2460 }
2461
2462 static inline int
2463 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2464 {
2465         dma_addr_t mapping;
2466         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2467         struct rx_bd *rxbd =
2468                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2469         struct page *page = alloc_page(GFP_ATOMIC);
2470
2471         if (!page)
2472                 return -ENOMEM;
2473         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2474                                PCI_DMA_FROMDEVICE);
2475         rx_pg->page = page;
2476         pci_unmap_addr_set(rx_pg, mapping, mapping);
2477         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2478         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2479         return 0;
2480 }
2481
2482 static void
2483 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2484 {
2485         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2486         struct page *page = rx_pg->page;
2487
2488         if (!page)
2489                 return;
2490
2491         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2492                        PCI_DMA_FROMDEVICE);
2493
2494         __free_page(page);
2495         rx_pg->page = NULL;
2496 }
2497
2498 static inline int
2499 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2500 {
2501         struct sk_buff *skb;
2502         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2503         dma_addr_t mapping;
2504         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2505         unsigned long align;
2506
2507         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2508         if (skb == NULL) {
2509                 return -ENOMEM;
2510         }
2511
2512         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2513                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2514
2515         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2516                 PCI_DMA_FROMDEVICE);
2517
2518         rx_buf->skb = skb;
2519         pci_unmap_addr_set(rx_buf, mapping, mapping);
2520
2521         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2522         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2523
2524         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2525
2526         return 0;
2527 }
2528
2529 static int
2530 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2531 {
2532         struct status_block *sblk = bnapi->status_blk.msi;
2533         u32 new_link_state, old_link_state;
2534         int is_set = 1;
2535
2536         new_link_state = sblk->status_attn_bits & event;
2537         old_link_state = sblk->status_attn_bits_ack & event;
2538         if (new_link_state != old_link_state) {
2539                 if (new_link_state)
2540                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2541                 else
2542                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2543         } else
2544                 is_set = 0;
2545
2546         return is_set;
2547 }
2548
2549 static void
2550 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2551 {
2552         spin_lock(&bp->phy_lock);
2553
2554         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2555                 bnx2_set_link(bp);
2556         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2557                 bnx2_set_remote_link(bp);
2558
2559         spin_unlock(&bp->phy_lock);
2560
2561 }
2562
2563 static inline u16
2564 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2565 {
2566         u16 cons;
2567
2568         /* Tell compiler that status block fields can change. */
2569         barrier();
2570         cons = *bnapi->hw_tx_cons_ptr;
2571         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2572                 cons++;
2573         return cons;
2574 }
2575
2576 static int
2577 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2578 {
2579         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2580         u16 hw_cons, sw_cons, sw_ring_cons;
2581         int tx_pkt = 0;
2582
2583         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2584         sw_cons = txr->tx_cons;
2585
2586         while (sw_cons != hw_cons) {
2587                 struct sw_bd *tx_buf;
2588                 struct sk_buff *skb;
2589                 int i, last;
2590
2591                 sw_ring_cons = TX_RING_IDX(sw_cons);
2592
2593                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2594                 skb = tx_buf->skb;
2595
2596                 /* partial BD completions possible with TSO packets */
2597                 if (skb_is_gso(skb)) {
2598                         u16 last_idx, last_ring_idx;
2599
2600                         last_idx = sw_cons +
2601                                 skb_shinfo(skb)->nr_frags + 1;
2602                         last_ring_idx = sw_ring_cons +
2603                                 skb_shinfo(skb)->nr_frags + 1;
2604                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2605                                 last_idx++;
2606                         }
2607                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2608                                 break;
2609                         }
2610                 }
2611
2612                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2613                         skb_headlen(skb), PCI_DMA_TODEVICE);
2614
2615                 tx_buf->skb = NULL;
2616                 last = skb_shinfo(skb)->nr_frags;
2617
2618                 for (i = 0; i < last; i++) {
2619                         sw_cons = NEXT_TX_BD(sw_cons);
2620
2621                         pci_unmap_page(bp->pdev,
2622                                 pci_unmap_addr(
2623                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2624                                         mapping),
2625                                 skb_shinfo(skb)->frags[i].size,
2626                                 PCI_DMA_TODEVICE);
2627                 }
2628
2629                 sw_cons = NEXT_TX_BD(sw_cons);
2630
2631                 dev_kfree_skb(skb);
2632                 tx_pkt++;
2633                 if (tx_pkt == budget)
2634                         break;
2635
2636                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2637         }
2638
2639         txr->hw_tx_cons = hw_cons;
2640         txr->tx_cons = sw_cons;
2641         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2642          * before checking for netif_queue_stopped().  Without the
2643          * memory barrier, there is a small possibility that bnx2_start_xmit()
2644          * will miss it and cause the queue to be stopped forever.
2645          */
2646         smp_mb();
2647
2648         if (unlikely(netif_queue_stopped(bp->dev)) &&
2649                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2650                 netif_tx_lock(bp->dev);
2651                 if ((netif_queue_stopped(bp->dev)) &&
2652                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2653                         netif_wake_queue(bp->dev);
2654                 netif_tx_unlock(bp->dev);
2655         }
2656         return tx_pkt;
2657 }
2658
2659 static void
2660 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2661                         struct sk_buff *skb, int count)
2662 {
2663         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2664         struct rx_bd *cons_bd, *prod_bd;
2665         dma_addr_t mapping;
2666         int i;
2667         u16 hw_prod = rxr->rx_pg_prod, prod;
2668         u16 cons = rxr->rx_pg_cons;
2669
2670         for (i = 0; i < count; i++) {
2671                 prod = RX_PG_RING_IDX(hw_prod);
2672
2673                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2674                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2675                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2676                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2677
2678                 if (i == 0 && skb) {
2679                         struct page *page;
2680                         struct skb_shared_info *shinfo;
2681
2682                         shinfo = skb_shinfo(skb);
2683                         shinfo->nr_frags--;
2684                         page = shinfo->frags[shinfo->nr_frags].page;
2685                         shinfo->frags[shinfo->nr_frags].page = NULL;
2686                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2687                                                PCI_DMA_FROMDEVICE);
2688                         cons_rx_pg->page = page;
2689                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2690                         dev_kfree_skb(skb);
2691                 }
2692                 if (prod != cons) {
2693                         prod_rx_pg->page = cons_rx_pg->page;
2694                         cons_rx_pg->page = NULL;
2695                         pci_unmap_addr_set(prod_rx_pg, mapping,
2696                                 pci_unmap_addr(cons_rx_pg, mapping));
2697
2698                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2699                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2700
2701                 }
2702                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2703                 hw_prod = NEXT_RX_BD(hw_prod);
2704         }
2705         rxr->rx_pg_prod = hw_prod;
2706         rxr->rx_pg_cons = cons;
2707 }
2708
2709 static inline void
2710 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2711                   struct sk_buff *skb, u16 cons, u16 prod)
2712 {
2713         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2714         struct rx_bd *cons_bd, *prod_bd;
2715
2716         cons_rx_buf = &rxr->rx_buf_ring[cons];
2717         prod_rx_buf = &rxr->rx_buf_ring[prod];
2718
2719         pci_dma_sync_single_for_device(bp->pdev,
2720                 pci_unmap_addr(cons_rx_buf, mapping),
2721                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         prod_rx_buf->skb = skb;
2726
2727         if (cons == prod)
2728                 return;
2729
2730         pci_unmap_addr_set(prod_rx_buf, mapping,
2731                         pci_unmap_addr(cons_rx_buf, mapping));
2732
2733         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2734         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2735         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2737 }
2738
2739 static int
2740 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2741             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2742             u32 ring_idx)
2743 {
2744         int err;
2745         u16 prod = ring_idx & 0xffff;
2746
2747         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2748         if (unlikely(err)) {
2749                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2750                 if (hdr_len) {
2751                         unsigned int raw_len = len + 4;
2752                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2753
2754                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2755                 }
2756                 return err;
2757         }
2758
2759         skb_reserve(skb, BNX2_RX_OFFSET);
2760         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2761                          PCI_DMA_FROMDEVICE);
2762
2763         if (hdr_len == 0) {
2764                 skb_put(skb, len);
2765                 return 0;
2766         } else {
2767                 unsigned int i, frag_len, frag_size, pages;
2768                 struct sw_pg *rx_pg;
2769                 u16 pg_cons = rxr->rx_pg_cons;
2770                 u16 pg_prod = rxr->rx_pg_prod;
2771
2772                 frag_size = len + 4 - hdr_len;
2773                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2774                 skb_put(skb, hdr_len);
2775
2776                 for (i = 0; i < pages; i++) {
2777                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2778                         if (unlikely(frag_len <= 4)) {
2779                                 unsigned int tail = 4 - frag_len;
2780
2781                                 rxr->rx_pg_cons = pg_cons;
2782                                 rxr->rx_pg_prod = pg_prod;
2783                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2784                                                         pages - i);
2785                                 skb->len -= tail;
2786                                 if (i == 0) {
2787                                         skb->tail -= tail;
2788                                 } else {
2789                                         skb_frag_t *frag =
2790                                                 &skb_shinfo(skb)->frags[i - 1];
2791                                         frag->size -= tail;
2792                                         skb->data_len -= tail;
2793                                         skb->truesize -= tail;
2794                                 }
2795                                 return 0;
2796                         }
2797                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2798
2799                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2800                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2801
2802                         if (i == pages - 1)
2803                                 frag_len -= 4;
2804
2805                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2806                         rx_pg->page = NULL;
2807
2808                         err = bnx2_alloc_rx_page(bp, rxr,
2809                                                  RX_PG_RING_IDX(pg_prod));
2810                         if (unlikely(err)) {
2811                                 rxr->rx_pg_cons = pg_cons;
2812                                 rxr->rx_pg_prod = pg_prod;
2813                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2814                                                         pages - i);
2815                                 return err;
2816                         }
2817
2818                         frag_size -= frag_len;
2819                         skb->data_len += frag_len;
2820                         skb->truesize += frag_len;
2821                         skb->len += frag_len;
2822
2823                         pg_prod = NEXT_RX_BD(pg_prod);
2824                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2825                 }
2826                 rxr->rx_pg_prod = pg_prod;
2827                 rxr->rx_pg_cons = pg_cons;
2828         }
2829         return 0;
2830 }
2831
2832 static inline u16
2833 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2834 {
2835         u16 cons;
2836
2837         /* Tell compiler that status block fields can change. */
2838         barrier();
2839         cons = *bnapi->hw_rx_cons_ptr;
2840         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2841                 cons++;
2842         return cons;
2843 }
2844
2845 static int
2846 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2847 {
2848         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2849         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2850         struct l2_fhdr *rx_hdr;
2851         int rx_pkt = 0, pg_ring_used = 0;
2852
2853         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2854         sw_cons = rxr->rx_cons;
2855         sw_prod = rxr->rx_prod;
2856
2857         /* Memory barrier necessary as speculative reads of the rx
2858          * buffer can be ahead of the index in the status block
2859          */
2860         rmb();
2861         while (sw_cons != hw_cons) {
2862                 unsigned int len, hdr_len;
2863                 u32 status;
2864                 struct sw_bd *rx_buf;
2865                 struct sk_buff *skb;
2866                 dma_addr_t dma_addr;
2867
2868                 sw_ring_cons = RX_RING_IDX(sw_cons);
2869                 sw_ring_prod = RX_RING_IDX(sw_prod);
2870
2871                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2872                 skb = rx_buf->skb;
2873
2874                 rx_buf->skb = NULL;
2875
2876                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2877
2878                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2879                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2880                         PCI_DMA_FROMDEVICE);
2881
2882                 rx_hdr = (struct l2_fhdr *) skb->data;
2883                 len = rx_hdr->l2_fhdr_pkt_len;
2884
2885                 if ((status = rx_hdr->l2_fhdr_status) &
2886                         (L2_FHDR_ERRORS_BAD_CRC |
2887                         L2_FHDR_ERRORS_PHY_DECODE |
2888                         L2_FHDR_ERRORS_ALIGNMENT |
2889                         L2_FHDR_ERRORS_TOO_SHORT |
2890                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2891
2892                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2893                                           sw_ring_prod);
2894                         goto next_rx;
2895                 }
2896                 hdr_len = 0;
2897                 if (status & L2_FHDR_STATUS_SPLIT) {
2898                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2899                         pg_ring_used = 1;
2900                 } else if (len > bp->rx_jumbo_thresh) {
2901                         hdr_len = bp->rx_jumbo_thresh;
2902                         pg_ring_used = 1;
2903                 }
2904
2905                 len -= 4;
2906
2907                 if (len <= bp->rx_copy_thresh) {
2908                         struct sk_buff *new_skb;
2909
2910                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2911                         if (new_skb == NULL) {
2912                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2913                                                   sw_ring_prod);
2914                                 goto next_rx;
2915                         }
2916
2917                         /* aligned copy */
2918                         skb_copy_from_linear_data_offset(skb,
2919                                                          BNX2_RX_OFFSET - 2,
2920                                       new_skb->data, len + 2);
2921                         skb_reserve(new_skb, 2);
2922                         skb_put(new_skb, len);
2923
2924                         bnx2_reuse_rx_skb(bp, rxr, skb,
2925                                 sw_ring_cons, sw_ring_prod);
2926
2927                         skb = new_skb;
2928                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2929                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2930                         goto next_rx;
2931
2932                 skb->protocol = eth_type_trans(skb, bp->dev);
2933
2934                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2935                         (ntohs(skb->protocol) != 0x8100)) {
2936
2937                         dev_kfree_skb(skb);
2938                         goto next_rx;
2939
2940                 }
2941
2942                 skb->ip_summed = CHECKSUM_NONE;
2943                 if (bp->rx_csum &&
2944                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2945                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2946
2947                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2948                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2949                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2950                 }
2951
2952 #ifdef BCM_VLAN
2953                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2954                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2955                                 rx_hdr->l2_fhdr_vlan_tag);
2956                 }
2957                 else
2958 #endif
2959                         netif_receive_skb(skb);
2960
2961                 bp->dev->last_rx = jiffies;
2962                 rx_pkt++;
2963
2964 next_rx:
2965                 sw_cons = NEXT_RX_BD(sw_cons);
2966                 sw_prod = NEXT_RX_BD(sw_prod);
2967
2968                 if ((rx_pkt == budget))
2969                         break;
2970
2971                 /* Refresh hw_cons to see if there is new work */
2972                 if (sw_cons == hw_cons) {
2973                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2974                         rmb();
2975                 }
2976         }
2977         rxr->rx_cons = sw_cons;
2978         rxr->rx_prod = sw_prod;
2979
2980         if (pg_ring_used)
2981                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2982
2983         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2984
2985         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2986
2987         mmiowb();
2988
2989         return rx_pkt;
2990
2991 }
2992
2993 /* MSI ISR - The only difference between this and the INTx ISR
2994  * is that the MSI interrupt is always serviced.
2995  */
2996 static irqreturn_t
2997 bnx2_msi(int irq, void *dev_instance)
2998 {
2999         struct net_device *dev = dev_instance;
3000         struct bnx2 *bp = netdev_priv(dev);
3001         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3002
3003         prefetch(bnapi->status_blk.msi);
3004         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3006                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3007
3008         /* Return here if interrupt is disabled. */
3009         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3010                 return IRQ_HANDLED;
3011
3012         netif_rx_schedule(dev, &bnapi->napi);
3013
3014         return IRQ_HANDLED;
3015 }
3016
3017 static irqreturn_t
3018 bnx2_msi_1shot(int irq, void *dev_instance)
3019 {
3020         struct net_device *dev = dev_instance;
3021         struct bnx2 *bp = netdev_priv(dev);
3022         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3023
3024         prefetch(bnapi->status_blk.msi);
3025
3026         /* Return here if interrupt is disabled. */
3027         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3028                 return IRQ_HANDLED;
3029
3030         netif_rx_schedule(dev, &bnapi->napi);
3031
3032         return IRQ_HANDLED;
3033 }
3034
3035 static irqreturn_t
3036 bnx2_interrupt(int irq, void *dev_instance)
3037 {
3038         struct net_device *dev = dev_instance;
3039         struct bnx2 *bp = netdev_priv(dev);
3040         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3041         struct status_block *sblk = bnapi->status_blk.msi;
3042
3043         /* When using INTx, it is possible for the interrupt to arrive
3044          * at the CPU before the status block posted prior to the
3045          * interrupt. Reading a register will flush the status block.
3046          * When using MSI, the MSI message will always complete after
3047          * the status block write.
3048          */
3049         if ((sblk->status_idx == bnapi->last_status_idx) &&
3050             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3051              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3052                 return IRQ_NONE;
3053
3054         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3055                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3056                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3057
3058         /* Read back to deassert IRQ immediately to avoid too many
3059          * spurious interrupts.
3060          */
3061         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3062
3063         /* Return here if interrupt is shared and is disabled. */
3064         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3065                 return IRQ_HANDLED;
3066
3067         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3068                 bnapi->last_status_idx = sblk->status_idx;
3069                 __netif_rx_schedule(dev, &bnapi->napi);
3070         }
3071
3072         return IRQ_HANDLED;
3073 }
3074
3075 static irqreturn_t
3076 bnx2_tx_msix(int irq, void *dev_instance)
3077 {
3078         struct net_device *dev = dev_instance;
3079         struct bnx2 *bp = netdev_priv(dev);
3080         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
3081
3082         prefetch(bnapi->status_blk.msix);
3083
3084         /* Return here if interrupt is disabled. */
3085         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3086                 return IRQ_HANDLED;
3087
3088         netif_rx_schedule(dev, &bnapi->napi);
3089         return IRQ_HANDLED;
3090 }
3091
3092 static inline int
3093 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3094 {
3095         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3096         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3097
3098         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3099             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3100                 return 1;
3101         return 0;
3102 }
3103
3104 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3105                                  STATUS_ATTN_BITS_TIMER_ABORT)
3106
3107 static inline int
3108 bnx2_has_work(struct bnx2_napi *bnapi)
3109 {
3110         struct status_block *sblk = bnapi->status_blk.msi;
3111
3112         if (bnx2_has_fast_work(bnapi))
3113                 return 1;
3114
3115         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3116             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3117                 return 1;
3118
3119         return 0;
3120 }
3121
3122 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3123 {
3124         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3125         struct bnx2 *bp = bnapi->bp;
3126         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3127         int work_done = 0;
3128         struct status_block_msix *sblk = bnapi->status_blk.msix;
3129
3130         do {
3131                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3132                 if (unlikely(work_done >= budget))
3133                         return work_done;
3134
3135                 bnapi->last_status_idx = sblk->status_idx;
3136                 rmb();
3137         } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
3138
3139         netif_rx_complete(bp->dev, napi);
3140         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3141                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3142                bnapi->last_status_idx);
3143         return work_done;
3144 }
3145
3146 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3147 {
3148         struct status_block *sblk = bnapi->status_blk.msi;
3149         u32 status_attn_bits = sblk->status_attn_bits;
3150         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3151
3152         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3153             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3154
3155                 bnx2_phy_int(bp, bnapi);
3156
3157                 /* This is needed to take care of transient status
3158                  * during link changes.
3159                  */
3160                 REG_WR(bp, BNX2_HC_COMMAND,
3161                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3162                 REG_RD(bp, BNX2_HC_COMMAND);
3163         }
3164 }
3165
3166 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3167                           int work_done, int budget)
3168 {
3169         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3170         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3171
3172         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3173                 bnx2_tx_int(bp, bnapi, 0);
3174
3175         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3176                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3177
3178         return work_done;
3179 }
3180
3181 static int bnx2_poll(struct napi_struct *napi, int budget)
3182 {
3183         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3184         struct bnx2 *bp = bnapi->bp;
3185         int work_done = 0;
3186         struct status_block *sblk = bnapi->status_blk.msi;
3187
3188         while (1) {
3189                 bnx2_poll_link(bp, bnapi);
3190
3191                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3192
3193                 if (unlikely(work_done >= budget))
3194                         break;
3195
3196                 /* bnapi->last_status_idx is used below to tell the hw how
3197                  * much work has been processed, so we must read it before
3198                  * checking for more work.
3199                  */
3200                 bnapi->last_status_idx = sblk->status_idx;
3201                 rmb();
3202                 if (likely(!bnx2_has_work(bnapi))) {
3203                         netif_rx_complete(bp->dev, napi);
3204                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3205                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3206                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3207                                        bnapi->last_status_idx);
3208                                 break;
3209                         }
3210                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3211                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3212                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3213                                bnapi->last_status_idx);
3214
3215                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3216                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3217                                bnapi->last_status_idx);
3218                         break;
3219                 }
3220         }
3221
3222         return work_done;
3223 }
3224
3225 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3226  * from set_multicast.
3227  */
3228 static void
3229 bnx2_set_rx_mode(struct net_device *dev)
3230 {
3231         struct bnx2 *bp = netdev_priv(dev);
3232         u32 rx_mode, sort_mode;
3233         int i;
3234
3235         spin_lock_bh(&bp->phy_lock);
3236
3237         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3238                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3239         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3240 #ifdef BCM_VLAN
3241         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3242                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3243 #else
3244         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3245                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3246 #endif
3247         if (dev->flags & IFF_PROMISC) {
3248                 /* Promiscuous mode. */
3249                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3250                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3251                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3252         }
3253         else if (dev->flags & IFF_ALLMULTI) {
3254                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3255                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3256                                0xffffffff);
3257                 }
3258                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3259         }
3260         else {
3261                 /* Accept one or more multicast(s). */
3262                 struct dev_mc_list *mclist;
3263                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3264                 u32 regidx;
3265                 u32 bit;
3266                 u32 crc;
3267
3268                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3269
3270                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3271                      i++, mclist = mclist->next) {
3272
3273                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3274                         bit = crc & 0xff;
3275                         regidx = (bit & 0xe0) >> 5;
3276                         bit &= 0x1f;
3277                         mc_filter[regidx] |= (1 << bit);
3278                 }
3279
3280                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3281                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3282                                mc_filter[i]);
3283                 }
3284
3285                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3286         }
3287
3288         if (rx_mode != bp->rx_mode) {
3289                 bp->rx_mode = rx_mode;
3290                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3291         }
3292
3293         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3294         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3295         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3296
3297         spin_unlock_bh(&bp->phy_lock);
3298 }
3299
3300 static void
3301 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3302         u32 rv2p_proc)
3303 {
3304         int i;
3305         u32 val;
3306
3307         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3308                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3309                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3310                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3311                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3312         }
3313
3314         for (i = 0; i < rv2p_code_len; i += 8) {
3315                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3316                 rv2p_code++;
3317                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3318                 rv2p_code++;
3319
3320                 if (rv2p_proc == RV2P_PROC1) {
3321                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3322                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3323                 }
3324                 else {
3325                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3326                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3327                 }
3328         }
3329
3330         /* Reset the processor, un-stall is done later. */
3331         if (rv2p_proc == RV2P_PROC1) {
3332                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3333         }
3334         else {
3335                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3336         }
3337 }
3338
3339 static int
3340 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3341 {
3342         u32 offset;
3343         u32 val;
3344         int rc;
3345
3346         /* Halt the CPU. */
3347         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3348         val |= cpu_reg->mode_value_halt;
3349         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3350         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3351
3352         /* Load the Text area. */
3353         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3354         if (fw->gz_text) {
3355                 int j;
3356
3357                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3358                                        fw->gz_text_len);
3359                 if (rc < 0)
3360                         return rc;
3361
3362                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3363                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3364                 }
3365         }
3366
3367         /* Load the Data area. */
3368         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3369         if (fw->data) {
3370                 int j;
3371
3372                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3373                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3374                 }
3375         }
3376
3377         /* Load the SBSS area. */
3378         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3379         if (fw->sbss_len) {
3380                 int j;
3381
3382                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3383                         bnx2_reg_wr_ind(bp, offset, 0);
3384                 }
3385         }
3386
3387         /* Load the BSS area. */
3388         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3389         if (fw->bss_len) {
3390                 int j;
3391
3392                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3393                         bnx2_reg_wr_ind(bp, offset, 0);
3394                 }
3395         }
3396
3397         /* Load the Read-Only area. */
3398         offset = cpu_reg->spad_base +
3399                 (fw->rodata_addr - cpu_reg->mips_view_base);
3400         if (fw->rodata) {
3401                 int j;
3402
3403                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3404                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3405                 }
3406         }
3407
3408         /* Clear the pre-fetch instruction. */
3409         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3410         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3411
3412         /* Start the CPU. */
3413         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3414         val &= ~cpu_reg->mode_value_halt;
3415         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3416         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3417
3418         return 0;
3419 }
3420
3421 static int
3422 bnx2_init_cpus(struct bnx2 *bp)
3423 {
3424         struct fw_info *fw;
3425         int rc, rv2p_len;
3426         void *text, *rv2p;
3427
3428         /* Initialize the RV2P processor. */
3429         text = vmalloc(FW_BUF_SIZE);
3430         if (!text)
3431                 return -ENOMEM;
3432         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3433                 rv2p = bnx2_xi_rv2p_proc1;
3434                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3435         } else {
3436                 rv2p = bnx2_rv2p_proc1;
3437                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3438         }
3439         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3440         if (rc < 0)
3441                 goto init_cpu_err;
3442
3443         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3444
3445         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3446                 rv2p = bnx2_xi_rv2p_proc2;
3447                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3448         } else {
3449                 rv2p = bnx2_rv2p_proc2;
3450                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3451         }
3452         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3453         if (rc < 0)
3454                 goto init_cpu_err;
3455
3456         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3457
3458         /* Initialize the RX Processor. */
3459         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3460                 fw = &bnx2_rxp_fw_09;
3461         else
3462                 fw = &bnx2_rxp_fw_06;
3463
3464         fw->text = text;
3465         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3466         if (rc)
3467                 goto init_cpu_err;
3468
3469         /* Initialize the TX Processor. */
3470         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3471                 fw = &bnx2_txp_fw_09;
3472         else
3473                 fw = &bnx2_txp_fw_06;
3474
3475         fw->text = text;
3476         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3477         if (rc)
3478                 goto init_cpu_err;
3479
3480         /* Initialize the TX Patch-up Processor. */
3481         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3482                 fw = &bnx2_tpat_fw_09;
3483         else
3484                 fw = &bnx2_tpat_fw_06;
3485
3486         fw->text = text;
3487         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3488         if (rc)
3489                 goto init_cpu_err;
3490
3491         /* Initialize the Completion Processor. */
3492         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3493                 fw = &bnx2_com_fw_09;
3494         else
3495                 fw = &bnx2_com_fw_06;
3496
3497         fw->text = text;
3498         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3499         if (rc)
3500                 goto init_cpu_err;
3501
3502         /* Initialize the Command Processor. */
3503         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3504                 fw = &bnx2_cp_fw_09;
3505         else
3506                 fw = &bnx2_cp_fw_06;
3507
3508         fw->text = text;
3509         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3510
3511 init_cpu_err:
3512         vfree(text);
3513         return rc;
3514 }
3515
3516 static int
3517 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3518 {
3519         u16 pmcsr;
3520
3521         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3522
3523         switch (state) {
3524         case PCI_D0: {
3525                 u32 val;
3526
3527                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3528                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3529                         PCI_PM_CTRL_PME_STATUS);
3530
3531                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3532                         /* delay required during transition out of D3hot */
3533                         msleep(20);
3534
3535                 val = REG_RD(bp, BNX2_EMAC_MODE);
3536                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3537                 val &= ~BNX2_EMAC_MODE_MPKT;
3538                 REG_WR(bp, BNX2_EMAC_MODE, val);
3539
3540                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3541                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3542                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3543                 break;
3544         }
3545         case PCI_D3hot: {
3546                 int i;
3547                 u32 val, wol_msg;
3548
3549                 if (bp->wol) {
3550                         u32 advertising;
3551                         u8 autoneg;
3552
3553                         autoneg = bp->autoneg;
3554                         advertising = bp->advertising;
3555
3556                         if (bp->phy_port == PORT_TP) {
3557                                 bp->autoneg = AUTONEG_SPEED;
3558                                 bp->advertising = ADVERTISED_10baseT_Half |
3559                                         ADVERTISED_10baseT_Full |
3560                                         ADVERTISED_100baseT_Half |
3561                                         ADVERTISED_100baseT_Full |
3562                                         ADVERTISED_Autoneg;
3563                         }
3564
3565                         spin_lock_bh(&bp->phy_lock);
3566                         bnx2_setup_phy(bp, bp->phy_port);
3567                         spin_unlock_bh(&bp->phy_lock);
3568
3569                         bp->autoneg = autoneg;
3570                         bp->advertising = advertising;
3571
3572                         bnx2_set_mac_addr(bp);
3573
3574                         val = REG_RD(bp, BNX2_EMAC_MODE);
3575
3576                         /* Enable port mode. */
3577                         val &= ~BNX2_EMAC_MODE_PORT;
3578                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3579                                BNX2_EMAC_MODE_ACPI_RCVD |
3580                                BNX2_EMAC_MODE_MPKT;
3581                         if (bp->phy_port == PORT_TP)
3582                                 val |= BNX2_EMAC_MODE_PORT_MII;
3583                         else {
3584                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3585                                 if (bp->line_speed == SPEED_2500)
3586                                         val |= BNX2_EMAC_MODE_25G_MODE;
3587                         }
3588
3589                         REG_WR(bp, BNX2_EMAC_MODE, val);
3590
3591                         /* receive all multicast */
3592                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3593                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3594                                        0xffffffff);
3595                         }
3596                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3597                                BNX2_EMAC_RX_MODE_SORT_MODE);
3598
3599                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3600                               BNX2_RPM_SORT_USER0_MC_EN;
3601                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3602                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3603                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3604                                BNX2_RPM_SORT_USER0_ENA);
3605
3606                         /* Need to enable EMAC and RPM for WOL. */
3607                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3608                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3609                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3610                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3611
3612                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3613                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3614                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3615
3616                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3617                 }
3618                 else {
3619                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3620                 }
3621
3622                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3623                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3624
3625                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3626                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3627                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3628
3629                         if (bp->wol)
3630                                 pmcsr |= 3;
3631                 }
3632                 else {
3633                         pmcsr |= 3;
3634                 }
3635                 if (bp->wol) {
3636                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3637                 }
3638                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3639                                       pmcsr);
3640
3641                 /* No more memory access after this point until
3642                  * device is brought back to D0.
3643                  */
3644                 udelay(50);
3645                 break;
3646         }
3647         default:
3648                 return -EINVAL;
3649         }
3650         return 0;
3651 }
3652
3653 static int
3654 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3655 {
3656         u32 val;
3657         int j;
3658
3659         /* Request access to the flash interface. */
3660         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3661         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3662                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3663                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3664                         break;
3665
3666                 udelay(5);
3667         }
3668
3669         if (j >= NVRAM_TIMEOUT_COUNT)
3670                 return -EBUSY;
3671
3672         return 0;
3673 }
3674
3675 static int
3676 bnx2_release_nvram_lock(struct bnx2 *bp)
3677 {
3678         int j;
3679         u32 val;
3680
3681         /* Relinquish nvram interface. */
3682         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3683
3684         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3685                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3686                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3687                         break;
3688
3689                 udelay(5);
3690         }
3691
3692         if (j >= NVRAM_TIMEOUT_COUNT)
3693                 return -EBUSY;
3694
3695         return 0;
3696 }
3697
3698
3699 static int
3700 bnx2_enable_nvram_write(struct bnx2 *bp)
3701 {
3702         u32 val;
3703
3704         val = REG_RD(bp, BNX2_MISC_CFG);
3705         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3706
3707         if (bp->flash_info->flags & BNX2_NV_WREN) {
3708                 int j;
3709
3710                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3711                 REG_WR(bp, BNX2_NVM_COMMAND,
3712                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3713
3714                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3715                         udelay(5);
3716
3717                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3718                         if (val & BNX2_NVM_COMMAND_DONE)
3719                                 break;
3720                 }
3721
3722                 if (j >= NVRAM_TIMEOUT_COUNT)
3723                         return -EBUSY;
3724         }
3725         return 0;
3726 }
3727
3728 static void
3729 bnx2_disable_nvram_write(struct bnx2 *bp)
3730 {
3731         u32 val;
3732
3733         val = REG_RD(bp, BNX2_MISC_CFG);
3734         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3735 }
3736
3737
3738 static void
3739 bnx2_enable_nvram_access(struct bnx2 *bp)
3740 {
3741         u32 val;
3742
3743         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3744         /* Enable both bits, even on read. */
3745         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3746                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3747 }
3748
3749 static void
3750 bnx2_disable_nvram_access(struct bnx2 *bp)
3751 {
3752         u32 val;
3753
3754         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3755         /* Disable both bits, even after read. */
3756         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3757                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3758                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3759 }
3760
3761 static int
3762 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3763 {
3764         u32 cmd;
3765         int j;
3766
3767         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3768                 /* Buffered flash, no erase needed */
3769                 return 0;
3770
3771         /* Build an erase command */
3772         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3773               BNX2_NVM_COMMAND_DOIT;
3774
3775         /* Need to clear DONE bit separately. */
3776         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3777
3778         /* Address of the NVRAM to read from. */
3779         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3780
3781         /* Issue an erase command. */
3782         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3783
3784         /* Wait for completion. */
3785         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3786                 u32 val;
3787
3788                 udelay(5);
3789
3790                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3791                 if (val & BNX2_NVM_COMMAND_DONE)
3792                         break;
3793         }
3794
3795         if (j >= NVRAM_TIMEOUT_COUNT)
3796                 return -EBUSY;
3797
3798         return 0;
3799 }
3800
3801 static int
3802 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3803 {
3804         u32 cmd;
3805         int j;
3806
3807         /* Build the command word. */
3808         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3809
3810         /* Calculate an offset of a buffered flash, not needed for 5709. */
3811         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3812                 offset = ((offset / bp->flash_info->page_size) <<
3813                            bp->flash_info->page_bits) +
3814                           (offset % bp->flash_info->page_size);
3815         }
3816
3817         /* Need to clear DONE bit separately. */
3818         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3819
3820         /* Address of the NVRAM to read from. */
3821         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3822
3823         /* Issue a read command. */
3824         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3825
3826         /* Wait for completion. */
3827         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3828                 u32 val;
3829
3830                 udelay(5);
3831
3832                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3833                 if (val & BNX2_NVM_COMMAND_DONE) {
3834                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3835                         memcpy(ret_val, &v, 4);
3836                         break;
3837                 }
3838         }
3839         if (j >= NVRAM_TIMEOUT_COUNT)
3840                 return -EBUSY;
3841
3842         return 0;
3843 }
3844
3845
3846 static int
3847 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3848 {
3849         u32 cmd;
3850         __be32 val32;
3851         int j;
3852
3853         /* Build the command word. */
3854         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3855
3856         /* Calculate an offset of a buffered flash, not needed for 5709. */
3857         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3858                 offset = ((offset / bp->flash_info->page_size) <<
3859                           bp->flash_info->page_bits) +
3860                          (offset % bp->flash_info->page_size);
3861         }
3862
3863         /* Need to clear DONE bit separately. */
3864         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3865
3866         memcpy(&val32, val, 4);
3867
3868         /* Write the data. */
3869         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3870
3871         /* Address of the NVRAM to write to. */
3872         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3873
3874         /* Issue the write command. */
3875         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3876
3877         /* Wait for completion. */
3878         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3879                 udelay(5);
3880
3881                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3882                         break;
3883         }
3884         if (j >= NVRAM_TIMEOUT_COUNT)
3885                 return -EBUSY;
3886
3887         return 0;
3888 }
3889
3890 static int
3891 bnx2_init_nvram(struct bnx2 *bp)
3892 {
3893         u32 val;
3894         int j, entry_count, rc = 0;
3895         struct flash_spec *flash;
3896
3897         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3898                 bp->flash_info = &flash_5709;
3899                 goto get_flash_size;
3900         }
3901
3902         /* Determine the selected interface. */
3903         val = REG_RD(bp, BNX2_NVM_CFG1);
3904
3905         entry_count = ARRAY_SIZE(flash_table);
3906
3907         if (val & 0x40000000) {
3908
3909                 /* Flash interface has been reconfigured */
3910                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3911                      j++, flash++) {
3912                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3913                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3914                                 bp->flash_info = flash;
3915                                 break;
3916                         }
3917                 }
3918         }
3919         else {
3920                 u32 mask;
3921                 /* Not yet been reconfigured */
3922
3923                 if (val & (1 << 23))
3924                         mask = FLASH_BACKUP_STRAP_MASK;
3925                 else
3926                         mask = FLASH_STRAP_MASK;
3927
3928                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3929                         j++, flash++) {
3930
3931                         if ((val & mask) == (flash->strapping & mask)) {
3932                                 bp->flash_info = flash;
3933
3934                                 /* Request access to the flash interface. */
3935                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3936                                         return rc;
3937
3938                                 /* Enable access to flash interface */
3939                                 bnx2_enable_nvram_access(bp);
3940
3941                                 /* Reconfigure the flash interface */
3942                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3943                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3944                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3945                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3946
3947                                 /* Disable access to flash interface */
3948                                 bnx2_disable_nvram_access(bp);
3949                                 bnx2_release_nvram_lock(bp);
3950
3951                                 break;
3952                         }
3953                 }
3954         } /* if (val & 0x40000000) */
3955
3956         if (j == entry_count) {
3957                 bp->flash_info = NULL;
3958                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3959                 return -ENODEV;
3960         }
3961
3962 get_flash_size:
3963         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3964         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3965         if (val)
3966                 bp->flash_size = val;
3967         else
3968                 bp->flash_size = bp->flash_info->total_size;
3969
3970         return rc;
3971 }
3972
3973 static int
3974 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3975                 int buf_size)
3976 {
3977         int rc = 0;
3978         u32 cmd_flags, offset32, len32, extra;
3979
3980         if (buf_size == 0)
3981                 return 0;
3982
3983         /* Request access to the flash interface. */
3984         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3985                 return rc;
3986
3987         /* Enable access to flash interface */
3988         bnx2_enable_nvram_access(bp);
3989
3990         len32 = buf_size;
3991         offset32 = offset;
3992         extra = 0;
3993
3994         cmd_flags = 0;
3995
3996         if (offset32 & 3) {
3997                 u8 buf[4];
3998                 u32 pre_len;
3999
4000                 offset32 &= ~3;
4001                 pre_len = 4 - (offset & 3);
4002
4003                 if (pre_len >= len32) {
4004                         pre_len = len32;
4005                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4006                                     BNX2_NVM_COMMAND_LAST;
4007                 }
4008                 else {
4009                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4010                 }
4011
4012                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4013
4014                 if (rc)
4015                         return rc;
4016
4017                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4018
4019                 offset32 += 4;
4020                 ret_buf += pre_len;
4021                 len32 -= pre_len;
4022         }
4023         if (len32 & 3) {
4024                 extra = 4 - (len32 & 3);
4025                 len32 = (len32 + 4) & ~3;
4026         }
4027
4028         if (len32 == 4) {
4029                 u8 buf[4];
4030
4031                 if (cmd_flags)
4032                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4033                 else
4034                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4035                                     BNX2_NVM_COMMAND_LAST;
4036
4037                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4038
4039                 memcpy(ret_buf, buf, 4 - extra);
4040         }
4041         else if (len32 > 0) {
4042                 u8 buf[4];
4043
4044                 /* Read the first word. */
4045                 if (cmd_flags)
4046                         cmd_flags = 0;
4047                 else
4048                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4049
4050                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4051
4052                 /* Advance to the next dword. */
4053                 offset32 += 4;
4054                 ret_buf += 4;
4055                 len32 -= 4;
4056
4057                 while (len32 > 4 && rc == 0) {
4058                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4059
4060                         /* Advance to the next dword. */
4061                         offset32 += 4;
4062                         ret_buf += 4;
4063                         len32 -= 4;
4064                 }
4065
4066                 if (rc)
4067                         return rc;
4068
4069                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4070                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4071
4072                 memcpy(ret_buf, buf, 4 - extra);
4073         }
4074
4075         /* Disable access to flash interface */
4076         bnx2_disable_nvram_access(bp);
4077
4078         bnx2_release_nvram_lock(bp);
4079
4080         return rc;
4081 }
4082
4083 static int
4084 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4085                 int buf_size)
4086 {
4087         u32 written, offset32, len32;
4088         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4089         int rc = 0;
4090         int align_start, align_end;
4091
4092         buf = data_buf;
4093         offset32 = offset;
4094         len32 = buf_size;
4095         align_start = align_end = 0;
4096
4097         if ((align_start = (offset32 & 3))) {
4098                 offset32 &= ~3;
4099                 len32 += align_start;
4100                 if (len32 < 4)
4101                         len32 = 4;
4102                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4103                         return rc;
4104         }
4105
4106         if (len32 & 3) {
4107                 align_end = 4 - (len32 & 3);
4108                 len32 += align_end;
4109                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4110                         return rc;
4111         }
4112
4113         if (align_start || align_end) {
4114                 align_buf = kmalloc(len32, GFP_KERNEL);
4115                 if (align_buf == NULL)
4116                         return -ENOMEM;
4117                 if (align_start) {
4118                         memcpy(align_buf, start, 4);
4119                 }
4120                 if (align_end) {
4121                         memcpy(align_buf + len32 - 4, end, 4);
4122                 }
4123                 memcpy(align_buf + align_start, data_buf, buf_size);
4124                 buf = align_buf;
4125         }
4126
4127         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4128                 flash_buffer = kmalloc(264, GFP_KERNEL);
4129                 if (flash_buffer == NULL) {
4130                         rc = -ENOMEM;
4131                         goto nvram_write_end;
4132                 }
4133         }
4134
4135         written = 0;
4136         while ((written < len32) && (rc == 0)) {
4137                 u32 page_start, page_end, data_start, data_end;
4138                 u32 addr, cmd_flags;
4139                 int i;
4140
4141                 /* Find the page_start addr */
4142                 page_start = offset32 + written;
4143                 page_start -= (page_start % bp->flash_info->page_size);
4144                 /* Find the page_end addr */
4145                 page_end = page_start + bp->flash_info->page_size;
4146                 /* Find the data_start addr */
4147                 data_start = (written == 0) ? offset32 : page_start;
4148                 /* Find the data_end addr */
4149                 data_end = (page_end > offset32 + len32) ?
4150                         (offset32 + len32) : page_end;
4151
4152                 /* Request access to the flash interface. */
4153                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4154                         goto nvram_write_end;
4155
4156                 /* Enable access to flash interface */
4157                 bnx2_enable_nvram_access(bp);
4158
4159                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4160                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4161                         int j;
4162
4163                         /* Read the whole page into the buffer
4164                          * (non-buffer flash only) */
4165                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4166                                 if (j == (bp->flash_info->page_size - 4)) {
4167                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4168                                 }
4169                                 rc = bnx2_nvram_read_dword(bp,
4170                                         page_start + j,
4171                                         &flash_buffer[j],
4172                                         cmd_flags);
4173
4174                                 if (rc)
4175                                         goto nvram_write_end;
4176
4177                                 cmd_flags = 0;
4178                         }
4179                 }
4180
4181                 /* Enable writes to flash interface (unlock write-protect) */
4182                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4183                         goto nvram_write_end;
4184
4185                 /* Loop to write back the buffer data from page_start to
4186                  * data_start */
4187                 i = 0;
4188                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4189                         /* Erase the page */
4190                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4191                                 goto nvram_write_end;
4192
4193                         /* Re-enable the write again for the actual write */
4194                         bnx2_enable_nvram_write(bp);
4195
4196                         for (addr = page_start; addr < data_start;
4197                                 addr += 4, i += 4) {
4198
4199                                 rc = bnx2_nvram_write_dword(bp, addr,
4200                                         &flash_buffer[i], cmd_flags);
4201
4202                                 if (rc != 0)
4203                                         goto nvram_write_end;
4204
4205                                 cmd_flags = 0;
4206                         }
4207                 }
4208
4209                 /* Loop to write the new data from data_start to data_end */
4210                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4211                         if ((addr == page_end - 4) ||
4212                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4213                                  (addr == data_end - 4))) {
4214
4215                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4216                         }
4217                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4218                                 cmd_flags);
4219
4220                         if (rc != 0)
4221                                 goto nvram_write_end;
4222
4223                         cmd_flags = 0;
4224                         buf += 4;
4225                 }
4226
4227                 /* Loop to write back the buffer data from data_end
4228                  * to page_end */
4229                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4230                         for (addr = data_end; addr < page_end;
4231                                 addr += 4, i += 4) {
4232
4233                                 if (addr == page_end-4) {
4234                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4235                                 }
4236                                 rc = bnx2_nvram_write_dword(bp, addr,
4237                                         &flash_buffer[i], cmd_flags);
4238
4239                                 if (rc != 0)
4240                                         goto nvram_write_end;
4241
4242                                 cmd_flags = 0;
4243                         }
4244                 }
4245
4246                 /* Disable writes to flash interface (lock write-protect) */
4247                 bnx2_disable_nvram_write(bp);
4248
4249                 /* Disable access to flash interface */
4250                 bnx2_disable_nvram_access(bp);
4251                 bnx2_release_nvram_lock(bp);
4252
4253                 /* Increment written */
4254                 written += data_end - data_start;
4255         }
4256
4257 nvram_write_end:
4258         kfree(flash_buffer);
4259         kfree(align_buf);
4260         return rc;
4261 }
4262
4263 static void
4264 bnx2_init_remote_phy(struct bnx2 *bp)
4265 {
4266         u32 val;
4267
4268         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4269         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4270                 return;
4271
4272         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4273         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4274                 return;
4275
4276         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4277                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4278
4279                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4280                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4281                         bp->phy_port = PORT_FIBRE;
4282                 else
4283                         bp->phy_port = PORT_TP;
4284
4285                 if (netif_running(bp->dev)) {
4286                         u32 sig;
4287
4288                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4289                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4290                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4291                 }
4292         }
4293 }
4294
4295 static void
4296 bnx2_setup_msix_tbl(struct bnx2 *bp)
4297 {
4298         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4299
4300         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4301         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4302 }
4303
4304 static int
4305 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4306 {
4307         u32 val;
4308         int i, rc = 0;
4309         u8 old_port;
4310
4311         /* Wait for the current PCI transaction to complete before
4312          * issuing a reset. */
4313         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4314                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4315                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4316                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4317                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4318         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4319         udelay(5);
4320
4321         /* Wait for the firmware to tell us it is ok to issue a reset. */
4322         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4323
4324         /* Deposit a driver reset signature so the firmware knows that
4325          * this is a soft reset. */
4326         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4327                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4328
4329         /* Do a dummy read to force the chip to complete all current transaction
4330          * before we issue a reset. */
4331         val = REG_RD(bp, BNX2_MISC_ID);
4332
4333         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4334                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4335                 REG_RD(bp, BNX2_MISC_COMMAND);
4336                 udelay(5);
4337
4338                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4339                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4340
4341                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4342
4343         } else {
4344                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4345                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4346                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4347
4348                 /* Chip reset. */
4349                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4350
4351                 /* Reading back any register after chip reset will hang the
4352                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4353                  * of margin for write posting.
4354                  */
4355                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4356                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4357                         msleep(20);
4358
4359                 /* Reset takes approximate 30 usec */
4360                 for (i = 0; i < 10; i++) {
4361                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4362                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4363                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4364                                 break;
4365                         udelay(10);
4366                 }
4367
4368                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4369                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4370                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4371                         return -EBUSY;
4372                 }
4373         }
4374
4375         /* Make sure byte swapping is properly configured. */
4376         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4377         if (val != 0x01020304) {
4378                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4379                 return -ENODEV;
4380         }
4381
4382         /* Wait for the firmware to finish its initialization. */
4383         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4384         if (rc)
4385                 return rc;
4386
4387         spin_lock_bh(&bp->phy_lock);
4388         old_port = bp->phy_port;
4389         bnx2_init_remote_phy(bp);
4390         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4391             old_port != bp->phy_port)
4392                 bnx2_set_default_remote_link(bp);
4393         spin_unlock_bh(&bp->phy_lock);
4394
4395         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4396                 /* Adjust the voltage regular to two steps lower.  The default
4397                  * of this register is 0x0000000e. */
4398                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4399
4400                 /* Remove bad rbuf memory from the free pool. */
4401                 rc = bnx2_alloc_bad_rbuf(bp);
4402         }
4403
4404         if (bp->flags & BNX2_FLAG_USING_MSIX)
4405                 bnx2_setup_msix_tbl(bp);
4406
4407         return rc;
4408 }
4409
4410 static int
4411 bnx2_init_chip(struct bnx2 *bp)
4412 {
4413         u32 val;
4414         int rc, i;
4415
4416         /* Make sure the interrupt is not active. */
4417         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4418
4419         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4420               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4421 #ifdef __BIG_ENDIAN
4422               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4423 #endif
4424               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4425               DMA_READ_CHANS << 12 |
4426               DMA_WRITE_CHANS << 16;
4427
4428         val |= (0x2 << 20) | (1 << 11);
4429
4430         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4431                 val |= (1 << 23);
4432
4433         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4434             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4435                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4436
4437         REG_WR(bp, BNX2_DMA_CONFIG, val);
4438
4439         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4440                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4441                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4442                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4443         }
4444
4445         if (bp->flags & BNX2_FLAG_PCIX) {
4446                 u16 val16;
4447
4448                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4449                                      &val16);
4450                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4451                                       val16 & ~PCI_X_CMD_ERO);
4452         }
4453
4454         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4455                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4456                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4457                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4458
4459         /* Initialize context mapping and zero out the quick contexts.  The
4460          * context block must have already been enabled. */
4461         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4462                 rc = bnx2_init_5709_context(bp);
4463                 if (rc)
4464                         return rc;
4465         } else
4466                 bnx2_init_context(bp);
4467
4468         if ((rc = bnx2_init_cpus(bp)) != 0)
4469                 return rc;
4470
4471         bnx2_init_nvram(bp);
4472
4473         bnx2_set_mac_addr(bp);
4474
4475         val = REG_RD(bp, BNX2_MQ_CONFIG);
4476         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4477         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4478         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4479                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4480
4481         REG_WR(bp, BNX2_MQ_CONFIG, val);
4482
4483         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4484         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4485         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4486
4487         val = (BCM_PAGE_BITS - 8) << 24;
4488         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4489
4490         /* Configure page size. */
4491         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4492         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4493         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4494         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4495
4496         val = bp->mac_addr[0] +
4497               (bp->mac_addr[1] << 8) +
4498               (bp->mac_addr[2] << 16) +
4499               bp->mac_addr[3] +
4500               (bp->mac_addr[4] << 8) +
4501               (bp->mac_addr[5] << 16);
4502         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4503
4504         /* Program the MTU.  Also include 4 bytes for CRC32. */
4505         val = bp->dev->mtu + ETH_HLEN + 4;
4506         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4507                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4508         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4509
4510         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4511                 bp->bnx2_napi[i].last_status_idx = 0;
4512
4513         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4514
4515         /* Set up how to generate a link change interrupt. */
4516         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4517
4518         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4519                (u64) bp->status_blk_mapping & 0xffffffff);
4520         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4521
4522         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4523                (u64) bp->stats_blk_mapping & 0xffffffff);
4524         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4525                (u64) bp->stats_blk_mapping >> 32);
4526
4527         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4528                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4529
4530         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4531                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4532
4533         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4534                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4535
4536         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4537
4538         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4539
4540         REG_WR(bp, BNX2_HC_COM_TICKS,
4541                (bp->com_ticks_int << 16) | bp->com_ticks);
4542
4543         REG_WR(bp, BNX2_HC_CMD_TICKS,
4544                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4545
4546         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4547                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4548         else
4549                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4550         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4551
4552         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4553                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4554         else {
4555                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4556                       BNX2_HC_CONFIG_COLLECT_STATS;
4557         }
4558
4559         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4560                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4561                            BNX2_HC_SB_CONFIG_1;
4562
4563                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4564                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4565
4566                 REG_WR(bp, base,
4567                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4568                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4569
4570                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4571                         (bp->tx_quick_cons_trip_int << 16) |
4572                          bp->tx_quick_cons_trip);
4573
4574                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4575                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4576
4577                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4578         }
4579
4580         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4581                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4582
4583         REG_WR(bp, BNX2_HC_CONFIG, val);
4584
4585         /* Clear internal stats counters. */
4586         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4587
4588         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4589
4590         /* Initialize the receive filter. */
4591         bnx2_set_rx_mode(bp->dev);
4592
4593         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4594                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4595                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4596                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4597         }
4598         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4599                           0);
4600
4601         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4602         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4603
4604         udelay(20);
4605
4606         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4607
4608         return rc;
4609 }
4610
4611 static void
4612 bnx2_clear_ring_states(struct bnx2 *bp)
4613 {
4614         struct bnx2_napi *bnapi;
4615         struct bnx2_tx_ring_info *txr;
4616         struct bnx2_rx_ring_info *rxr;
4617         int i;
4618
4619         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4620                 bnapi = &bp->bnx2_napi[i];
4621                 txr = &bnapi->tx_ring;
4622                 rxr = &bnapi->rx_ring;
4623
4624                 txr->tx_cons = 0;
4625                 txr->hw_tx_cons = 0;
4626                 rxr->rx_prod_bseq = 0;
4627                 rxr->rx_prod = 0;
4628                 rxr->rx_cons = 0;
4629                 rxr->rx_pg_prod = 0;
4630                 rxr->rx_pg_cons = 0;
4631         }
4632 }
4633
4634 static void
4635 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4636 {
4637         u32 val, offset0, offset1, offset2, offset3;
4638         u32 cid_addr = GET_CID_ADDR(cid);
4639
4640         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4641                 offset0 = BNX2_L2CTX_TYPE_XI;
4642                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4643                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4644                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4645         } else {
4646                 offset0 = BNX2_L2CTX_TYPE;
4647                 offset1 = BNX2_L2CTX_CMD_TYPE;
4648                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4649                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4650         }
4651         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4652         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4653
4654         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4655         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4656
4657         val = (u64) txr->tx_desc_mapping >> 32;
4658         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4659
4660         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4661         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4662 }
4663
4664 static void
4665 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4666 {
4667         struct tx_bd *txbd;
4668         u32 cid = TX_CID;
4669         struct bnx2_napi *bnapi;
4670         struct bnx2_tx_ring_info *txr;
4671
4672         bnapi = &bp->bnx2_napi[ring_num];
4673         txr = &bnapi->tx_ring;
4674
4675         if (ring_num == 0)
4676                 cid = TX_CID;
4677         else
4678                 cid = TX_TSS_CID + ring_num - 1;
4679
4680         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4681
4682         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4683
4684         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4685         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4686
4687         txr->tx_prod = 0;
4688         txr->tx_prod_bseq = 0;
4689
4690         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4691         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4692
4693         bnx2_init_tx_context(bp, cid, txr);
4694 }
4695
4696 static void
4697 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4698                      int num_rings)
4699 {
4700         int i;
4701         struct rx_bd *rxbd;
4702
4703         for (i = 0; i < num_rings; i++) {
4704                 int j;
4705
4706                 rxbd = &rx_ring[i][0];
4707                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4708                         rxbd->rx_bd_len = buf_size;
4709                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4710                 }
4711                 if (i == (num_rings - 1))
4712                         j = 0;
4713                 else
4714                         j = i + 1;
4715                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4716                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4717         }
4718 }
4719
4720 static void
4721 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4722 {
4723         int i;
4724         u16 prod, ring_prod;
4725         u32 cid, rx_cid_addr, val;
4726         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4727         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4728
4729         if (ring_num == 0)
4730                 cid = RX_CID;
4731         else
4732                 cid = RX_RSS_CID + ring_num - 1;
4733
4734         rx_cid_addr = GET_CID_ADDR(cid);
4735
4736         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4737                              bp->rx_buf_use_size, bp->rx_max_ring);
4738
4739         bnx2_init_rx_context(bp, cid);
4740
4741         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4742                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4743                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4744         }
4745
4746         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4747         if (bp->rx_pg_ring_size) {
4748                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4749                                      rxr->rx_pg_desc_mapping,
4750                                      PAGE_SIZE, bp->rx_max_pg_ring);
4751                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4752                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4753                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4754                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4755
4756                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4757                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4758
4759                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4760                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4761
4762                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4763                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4764         }
4765
4766         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4767         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4768
4769         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4770         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4771
4772         ring_prod = prod = rxr->rx_pg_prod;
4773         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4774                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4775                         break;
4776                 prod = NEXT_RX_BD(prod);
4777                 ring_prod = RX_PG_RING_IDX(prod);
4778         }
4779         rxr->rx_pg_prod = prod;
4780
4781         ring_prod = prod = rxr->rx_prod;
4782         for (i = 0; i < bp->rx_ring_size; i++) {
4783                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4784                         break;
4785                 prod = NEXT_RX_BD(prod);
4786                 ring_prod = RX_RING_IDX(prod);
4787         }
4788         rxr->rx_prod = prod;
4789
4790         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4791         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4792         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4793
4794         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4795         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4796
4797         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4798 }
4799
4800 static void
4801 bnx2_init_all_rings(struct bnx2 *bp)
4802 {
4803         int i;
4804
4805         bnx2_clear_ring_states(bp);
4806
4807         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4808         for (i = 0; i < bp->num_tx_rings; i++)
4809                 bnx2_init_tx_ring(bp, i);
4810
4811         if (bp->num_tx_rings > 1)
4812                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4813                        (TX_TSS_CID << 7));
4814
4815         for (i = 0; i < bp->num_rx_rings; i++)
4816                 bnx2_init_rx_ring(bp, i);
4817 }
4818
4819 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4820 {
4821         u32 max, num_rings = 1;
4822
4823         while (ring_size > MAX_RX_DESC_CNT) {
4824                 ring_size -= MAX_RX_DESC_CNT;
4825                 num_rings++;
4826         }
4827         /* round to next power of 2 */
4828         max = max_size;
4829         while ((max & num_rings) == 0)
4830                 max >>= 1;
4831
4832         if (num_rings != max)
4833                 max <<= 1;
4834
4835         return max;
4836 }
4837
4838 static void
4839 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4840 {
4841         u32 rx_size, rx_space, jumbo_size;
4842
4843         /* 8 for CRC and VLAN */
4844         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4845
4846         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4847                 sizeof(struct skb_shared_info);
4848
4849         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4850         bp->rx_pg_ring_size = 0;
4851         bp->rx_max_pg_ring = 0;
4852         bp->rx_max_pg_ring_idx = 0;
4853         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4854                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4855
4856                 jumbo_size = size * pages;
4857                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4858                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4859
4860                 bp->rx_pg_ring_size = jumbo_size;
4861                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4862                                                         MAX_RX_PG_RINGS);
4863                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4864                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4865                 bp->rx_copy_thresh = 0;
4866         }
4867
4868         bp->rx_buf_use_size = rx_size;
4869         /* hw alignment */
4870         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4871         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4872         bp->rx_ring_size = size;
4873         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4874         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4875 }
4876
4877 static void
4878 bnx2_free_tx_skbs(struct bnx2 *bp)
4879 {
4880         int i;
4881
4882         for (i = 0; i < bp->num_tx_rings; i++) {
4883                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4884                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4885                 int j;
4886
4887                 if (txr->tx_buf_ring == NULL)
4888                         continue;
4889
4890                 for (j = 0; j < TX_DESC_CNT; ) {
4891                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4892                         struct sk_buff *skb = tx_buf->skb;
4893                         int k, last;
4894
4895                         if (skb == NULL) {
4896                                 j++;
4897                                 continue;
4898                         }
4899
4900                         pci_unmap_single(bp->pdev,
4901                                          pci_unmap_addr(tx_buf, mapping),
4902                         skb_headlen(skb), PCI_DMA_TODEVICE);
4903
4904                         tx_buf->skb = NULL;
4905
4906                         last = skb_shinfo(skb)->nr_frags;
4907                         for (k = 0; k < last; k++) {
4908                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4909                                 pci_unmap_page(bp->pdev,
4910                                         pci_unmap_addr(tx_buf, mapping),
4911                                         skb_shinfo(skb)->frags[j].size,
4912                                         PCI_DMA_TODEVICE);
4913                         }
4914                         dev_kfree_skb(skb);
4915                         j += k + 1;
4916                 }
4917         }
4918 }
4919
4920 static void
4921 bnx2_free_rx_skbs(struct bnx2 *bp)
4922 {
4923         int i;
4924
4925         for (i = 0; i < bp->num_rx_rings; i++) {
4926                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4927                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4928                 int j;
4929
4930                 if (rxr->rx_buf_ring == NULL)
4931                         return;
4932
4933                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4934                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4935                         struct sk_buff *skb = rx_buf->skb;
4936
4937                         if (skb == NULL)
4938                                 continue;
4939
4940                         pci_unmap_single(bp->pdev,
4941                                          pci_unmap_addr(rx_buf, mapping),
4942                                          bp->rx_buf_use_size,
4943                                          PCI_DMA_FROMDEVICE);
4944
4945                         rx_buf->skb = NULL;
4946
4947                         dev_kfree_skb(skb);
4948                 }
4949                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4950                         bnx2_free_rx_page(bp, rxr, j);
4951         }
4952 }
4953
4954 static void
4955 bnx2_free_skbs(struct bnx2 *bp)
4956 {
4957         bnx2_free_tx_skbs(bp);
4958         bnx2_free_rx_skbs(bp);
4959 }
4960
4961 static int
4962 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4963 {
4964         int rc;
4965
4966         rc = bnx2_reset_chip(bp, reset_code);
4967         bnx2_free_skbs(bp);
4968         if (rc)
4969                 return rc;
4970
4971         if ((rc = bnx2_init_chip(bp)) != 0)
4972                 return rc;
4973
4974         bnx2_init_all_rings(bp);
4975         return 0;
4976 }
4977
4978 static int
4979 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4980 {
4981         int rc;
4982
4983         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4984                 return rc;
4985
4986         spin_lock_bh(&bp->phy_lock);
4987         bnx2_init_phy(bp, reset_phy);
4988         bnx2_set_link(bp);
4989         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4990                 bnx2_remote_phy_event(bp);
4991         spin_unlock_bh(&bp->phy_lock);
4992         return 0;
4993 }
4994
4995 static int
4996 bnx2_test_registers(struct bnx2 *bp)
4997 {
4998         int ret;
4999         int i, is_5709;
5000         static const struct {
5001                 u16   offset;
5002                 u16   flags;
5003 #define BNX2_FL_NOT_5709        1
5004                 u32   rw_mask;
5005                 u32   ro_mask;
5006         } reg_tbl[] = {
5007                 { 0x006c, 0, 0x00000000, 0x0000003f },
5008                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5009                 { 0x0094, 0, 0x00000000, 0x00000000 },
5010
5011                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5012                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5013                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5014                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5015                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5016                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5017                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5018                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5019                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5020
5021                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5022                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5023                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5024                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5025                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5026                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5027
5028                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5029                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5030                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5031
5032                 { 0x1000, 0, 0x00000000, 0x00000001 },
5033                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5034
5035                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5036                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5037                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5038                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5039                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5040                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5041                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5042                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5043                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5044                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5045
5046                 { 0x1800, 0, 0x00000000, 0x00000001 },
5047                 { 0x1804, 0, 0x00000000, 0x00000003 },
5048
5049                 { 0x2800, 0, 0x00000000, 0x00000001 },
5050                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5051                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5052                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5053                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5054                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5055                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5056                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5057                 { 0x2840, 0, 0x00000000, 0xffffffff },
5058                 { 0x2844, 0, 0x00000000, 0xffffffff },
5059                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5060                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5061
5062                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5063                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5064
5065                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5066                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5067                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5068                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5069                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5070                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5071                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5072                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5073                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5074
5075                 { 0x5004, 0, 0x00000000, 0x0000007f },
5076                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5077
5078                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5079                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5080                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5081                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5082                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5083                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5084                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5085                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5086                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5087
5088                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5089                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5090                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5091                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5092                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5093                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5094                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5095                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5096                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5097                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5098                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5099                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5100                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5101                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5102                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5103                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5104                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5105                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5106                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5107                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5108                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5109                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5110                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5111
5112                 { 0xffff, 0, 0x00000000, 0x00000000 },
5113         };
5114
5115         ret = 0;
5116         is_5709 = 0;
5117         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5118                 is_5709 = 1;
5119
5120         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5121                 u32 offset, rw_mask, ro_mask, save_val, val;
5122                 u16 flags = reg_tbl[i].flags;
5123
5124                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5125                         continue;
5126
5127                 offset = (u32) reg_tbl[i].offset;
5128                 rw_mask = reg_tbl[i].rw_mask;
5129                 ro_mask = reg_tbl[i].ro_mask;
5130
5131                 save_val = readl(bp->regview + offset);
5132
5133                 writel(0, bp->regview + offset);
5134
5135                 val = readl(bp->regview + offset);
5136                 if ((val & rw_mask) != 0) {
5137                         goto reg_test_err;
5138                 }
5139
5140                 if ((val & ro_mask) != (save_val & ro_mask)) {
5141                         goto reg_test_err;
5142                 }
5143
5144                 writel(0xffffffff, bp->regview + offset);
5145
5146                 val = readl(bp->regview + offset);
5147                 if ((val & rw_mask) != rw_mask) {
5148                         goto reg_test_err;
5149                 }
5150
5151                 if ((val & ro_mask) != (save_val & ro_mask)) {
5152                         goto reg_test_err;
5153                 }
5154
5155                 writel(save_val, bp->regview + offset);
5156                 continue;
5157
5158 reg_test_err:
5159                 writel(save_val, bp->regview + offset);
5160                 ret = -ENODEV;
5161                 break;
5162         }
5163         return ret;
5164 }
5165
5166 static int
5167 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5168 {
5169         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5170                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5171         int i;
5172
5173         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5174                 u32 offset;
5175
5176                 for (offset = 0; offset < size; offset += 4) {
5177
5178                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5179
5180                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5181                                 test_pattern[i]) {
5182                                 return -ENODEV;
5183                         }
5184                 }
5185         }
5186         return 0;
5187 }
5188
5189 static int
5190 bnx2_test_memory(struct bnx2 *bp)
5191 {
5192         int ret = 0;
5193         int i;
5194         static struct mem_entry {
5195                 u32   offset;
5196                 u32   len;
5197         } mem_tbl_5706[] = {
5198                 { 0x60000,  0x4000 },
5199                 { 0xa0000,  0x3000 },
5200                 { 0xe0000,  0x4000 },
5201                 { 0x120000, 0x4000 },
5202                 { 0x1a0000, 0x4000 },
5203                 { 0x160000, 0x4000 },
5204                 { 0xffffffff, 0    },
5205         },
5206         mem_tbl_5709[] = {
5207                 { 0x60000,  0x4000 },
5208                 { 0xa0000,  0x3000 },
5209                 { 0xe0000,  0x4000 },
5210                 { 0x120000, 0x4000 },
5211                 { 0x1a0000, 0x4000 },
5212                 { 0xffffffff, 0    },
5213         };
5214         struct mem_entry *mem_tbl;
5215
5216         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5217                 mem_tbl = mem_tbl_5709;
5218         else
5219                 mem_tbl = mem_tbl_5706;
5220
5221         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5222                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5223                         mem_tbl[i].len)) != 0) {
5224                         return ret;
5225                 }
5226         }
5227
5228         return ret;
5229 }
5230
5231 #define BNX2_MAC_LOOPBACK       0
5232 #define BNX2_PHY_LOOPBACK       1
5233
5234 static int
5235 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5236 {
5237         unsigned int pkt_size, num_pkts, i;
5238         struct sk_buff *skb, *rx_skb;
5239         unsigned char *packet;
5240         u16 rx_start_idx, rx_idx;
5241         dma_addr_t map;
5242         struct tx_bd *txbd;
5243         struct sw_bd *rx_buf;
5244         struct l2_fhdr *rx_hdr;
5245         int ret = -ENODEV;
5246         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5247         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5248         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5249
5250         tx_napi = bnapi;
5251
5252         txr = &tx_napi->tx_ring;
5253         rxr = &bnapi->rx_ring;
5254         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5255                 bp->loopback = MAC_LOOPBACK;
5256                 bnx2_set_mac_loopback(bp);
5257         }
5258         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5259                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5260                         return 0;
5261
5262                 bp->loopback = PHY_LOOPBACK;
5263                 bnx2_set_phy_loopback(bp);
5264         }
5265         else
5266                 return -EINVAL;
5267
5268         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5269         skb = netdev_alloc_skb(bp->dev, pkt_size);
5270         if (!skb)
5271                 return -ENOMEM;
5272         packet = skb_put(skb, pkt_size);
5273         memcpy(packet, bp->dev->dev_addr, 6);
5274         memset(packet + 6, 0x0, 8);
5275         for (i = 14; i < pkt_size; i++)
5276                 packet[i] = (unsigned char) (i & 0xff);
5277
5278         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5279                 PCI_DMA_TODEVICE);
5280
5281         REG_WR(bp, BNX2_HC_COMMAND,
5282                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5283
5284         REG_RD(bp, BNX2_HC_COMMAND);
5285
5286         udelay(5);
5287         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5288
5289         num_pkts = 0;
5290
5291         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5292
5293         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5294         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5295         txbd->tx_bd_mss_nbytes = pkt_size;
5296         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5297
5298         num_pkts++;
5299         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5300         txr->tx_prod_bseq += pkt_size;
5301
5302         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5303         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5304
5305         udelay(100);
5306
5307         REG_WR(bp, BNX2_HC_COMMAND,
5308                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5309
5310         REG_RD(bp, BNX2_HC_COMMAND);
5311
5312         udelay(5);
5313
5314         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5315         dev_kfree_skb(skb);
5316
5317         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5318                 goto loopback_test_done;
5319
5320         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5321         if (rx_idx != rx_start_idx + num_pkts) {
5322                 goto loopback_test_done;
5323         }
5324
5325         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5326         rx_skb = rx_buf->skb;
5327
5328         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5329         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5330
5331         pci_dma_sync_single_for_cpu(bp->pdev,
5332                 pci_unmap_addr(rx_buf, mapping),
5333                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5334
5335         if (rx_hdr->l2_fhdr_status &
5336                 (L2_FHDR_ERRORS_BAD_CRC |
5337                 L2_FHDR_ERRORS_PHY_DECODE |
5338                 L2_FHDR_ERRORS_ALIGNMENT |
5339                 L2_FHDR_ERRORS_TOO_SHORT |
5340                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5341
5342                 goto loopback_test_done;
5343         }
5344
5345         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5346                 goto loopback_test_done;
5347         }
5348
5349         for (i = 14; i < pkt_size; i++) {
5350                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5351                         goto loopback_test_done;
5352                 }
5353         }
5354
5355         ret = 0;
5356
5357 loopback_test_done:
5358         bp->loopback = 0;
5359         return ret;
5360 }
5361
5362 #define BNX2_MAC_LOOPBACK_FAILED        1
5363 #define BNX2_PHY_LOOPBACK_FAILED        2
5364 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5365                                          BNX2_PHY_LOOPBACK_FAILED)
5366
5367 static int
5368 bnx2_test_loopback(struct bnx2 *bp)
5369 {
5370         int rc = 0;
5371
5372         if (!netif_running(bp->dev))
5373                 return BNX2_LOOPBACK_FAILED;
5374
5375         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5376         spin_lock_bh(&bp->phy_lock);
5377         bnx2_init_phy(bp, 1);
5378         spin_unlock_bh(&bp->phy_lock);
5379         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5380                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5381         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5382                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5383         return rc;
5384 }
5385
5386 #define NVRAM_SIZE 0x200
5387 #define CRC32_RESIDUAL 0xdebb20e3
5388
5389 static int
5390 bnx2_test_nvram(struct bnx2 *bp)
5391 {
5392         __be32 buf[NVRAM_SIZE / 4];
5393         u8 *data = (u8 *) buf;
5394         int rc = 0;
5395         u32 magic, csum;
5396
5397         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5398                 goto test_nvram_done;
5399
5400         magic = be32_to_cpu(buf[0]);
5401         if (magic != 0x669955aa) {
5402                 rc = -ENODEV;
5403                 goto test_nvram_done;
5404         }
5405
5406         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5407                 goto test_nvram_done;
5408
5409         csum = ether_crc_le(0x100, data);
5410         if (csum != CRC32_RESIDUAL) {
5411                 rc = -ENODEV;
5412                 goto test_nvram_done;
5413         }
5414
5415         csum = ether_crc_le(0x100, data + 0x100);
5416         if (csum != CRC32_RESIDUAL) {
5417                 rc = -ENODEV;
5418         }
5419
5420 test_nvram_done:
5421         return rc;
5422 }
5423
5424 static int
5425 bnx2_test_link(struct bnx2 *bp)
5426 {
5427         u32 bmsr;
5428
5429         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5430                 if (bp->link_up)
5431                         return 0;
5432                 return -ENODEV;
5433         }
5434         spin_lock_bh(&bp->phy_lock);
5435         bnx2_enable_bmsr1(bp);
5436         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5437         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5438         bnx2_disable_bmsr1(bp);
5439         spin_unlock_bh(&bp->phy_lock);
5440
5441         if (bmsr & BMSR_LSTATUS) {
5442                 return 0;
5443         }
5444         return -ENODEV;
5445 }
5446
5447 static int
5448 bnx2_test_intr(struct bnx2 *bp)
5449 {
5450         int i;
5451         u16 status_idx;
5452
5453         if (!netif_running(bp->dev))
5454                 return -ENODEV;
5455
5456         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5457
5458         /* This register is not touched during run-time. */
5459         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5460         REG_RD(bp, BNX2_HC_COMMAND);
5461
5462         for (i = 0; i < 10; i++) {
5463                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5464                         status_idx) {
5465
5466                         break;
5467                 }
5468
5469                 msleep_interruptible(10);
5470         }
5471         if (i < 10)
5472                 return 0;
5473
5474         return -ENODEV;
5475 }
5476
5477 /* Determining link for parallel detection. */
5478 static int
5479 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5480 {
5481         u32 mode_ctl, an_dbg, exp;
5482
5483         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5484                 return 0;
5485
5486         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5487         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5488
5489         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5490                 return 0;
5491
5492         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5493         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5494         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5495
5496         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5497                 return 0;
5498
5499         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5500         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5501         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5502
5503         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5504                 return 0;
5505
5506         return 1;
5507 }
5508
5509 static void
5510 bnx2_5706_serdes_timer(struct bnx2 *bp)
5511 {
5512         int check_link = 1;
5513
5514         spin_lock(&bp->phy_lock);
5515         if (bp->serdes_an_pending) {
5516                 bp->serdes_an_pending--;
5517                 check_link = 0;
5518         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5519                 u32 bmcr;
5520
5521                 bp->current_interval = bp->timer_interval;
5522
5523                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5524
5525                 if (bmcr & BMCR_ANENABLE) {
5526                         if (bnx2_5706_serdes_has_link(bp)) {
5527                                 bmcr &= ~BMCR_ANENABLE;
5528                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5529                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5530                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5531                         }
5532                 }
5533         }
5534         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5535                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5536                 u32 phy2;
5537
5538                 bnx2_write_phy(bp, 0x17, 0x0f01);
5539                 bnx2_read_phy(bp, 0x15, &phy2);
5540                 if (phy2 & 0x20) {
5541                         u32 bmcr;
5542
5543                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5544                         bmcr |= BMCR_ANENABLE;
5545                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5546
5547                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5548                 }
5549         } else
5550                 bp->current_interval = bp->timer_interval;
5551
5552         if (check_link) {
5553                 u32 val;
5554
5555                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5556                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5557                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5558
5559                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5560                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5561                                 bnx2_5706s_force_link_dn(bp, 1);
5562                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5563                         } else
5564                                 bnx2_set_link(bp);
5565                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5566                         bnx2_set_link(bp);
5567         }
5568         spin_unlock(&bp->phy_lock);
5569 }
5570
5571 static void
5572 bnx2_5708_serdes_timer(struct bnx2 *bp)
5573 {
5574         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5575                 return;
5576
5577         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5578                 bp->serdes_an_pending = 0;
5579                 return;
5580         }
5581
5582         spin_lock(&bp->phy_lock);
5583         if (bp->serdes_an_pending)
5584                 bp->serdes_an_pending--;
5585         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5586                 u32 bmcr;
5587
5588                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5589                 if (bmcr & BMCR_ANENABLE) {
5590                         bnx2_enable_forced_2g5(bp);
5591                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5592                 } else {
5593                         bnx2_disable_forced_2g5(bp);
5594                         bp->serdes_an_pending = 2;
5595                         bp->current_interval = bp->timer_interval;
5596                 }
5597
5598         } else
5599                 bp->current_interval = bp->timer_interval;
5600
5601         spin_unlock(&bp->phy_lock);
5602 }
5603
5604 static void
5605 bnx2_timer(unsigned long data)
5606 {
5607         struct bnx2 *bp = (struct bnx2 *) data;
5608
5609         if (!netif_running(bp->dev))
5610                 return;
5611
5612         if (atomic_read(&bp->intr_sem) != 0)
5613                 goto bnx2_restart_timer;
5614
5615         bnx2_send_heart_beat(bp);
5616
5617         bp->stats_blk->stat_FwRxDrop =
5618                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5619
5620         /* workaround occasional corrupted counters */
5621         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5622                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5623                                             BNX2_HC_COMMAND_STATS_NOW);
5624
5625         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5626                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5627                         bnx2_5706_serdes_timer(bp);
5628                 else
5629                         bnx2_5708_serdes_timer(bp);
5630         }
5631
5632 bnx2_restart_timer:
5633         mod_timer(&bp->timer, jiffies + bp->current_interval);
5634 }
5635
5636 static int
5637 bnx2_request_irq(struct bnx2 *bp)
5638 {
5639         struct net_device *dev = bp->dev;
5640         unsigned long flags;
5641         struct bnx2_irq *irq;
5642         int rc = 0, i;
5643
5644         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5645                 flags = 0;
5646         else
5647                 flags = IRQF_SHARED;
5648
5649         for (i = 0; i < bp->irq_nvecs; i++) {
5650                 irq = &bp->irq_tbl[i];
5651                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5652                                  dev);
5653                 if (rc)
5654                         break;
5655                 irq->requested = 1;
5656         }
5657         return rc;
5658 }
5659
5660 static void
5661 bnx2_free_irq(struct bnx2 *bp)
5662 {
5663         struct net_device *dev = bp->dev;
5664         struct bnx2_irq *irq;
5665         int i;
5666
5667         for (i = 0; i < bp->irq_nvecs; i++) {
5668                 irq = &bp->irq_tbl[i];
5669                 if (irq->requested)
5670                         free_irq(irq->vector, dev);
5671                 irq->requested = 0;
5672         }
5673         if (bp->flags & BNX2_FLAG_USING_MSI)
5674                 pci_disable_msi(bp->pdev);
5675         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5676                 pci_disable_msix(bp->pdev);
5677
5678         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5679 }
5680
5681 static void
5682 bnx2_enable_msix(struct bnx2 *bp)
5683 {
5684         int i, rc;
5685         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5686
5687         bnx2_setup_msix_tbl(bp);
5688         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5689         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5690         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5691
5692         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5693                 msix_ent[i].entry = i;
5694                 msix_ent[i].vector = 0;
5695
5696                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5697                 if (i == 0)
5698                         bp->irq_tbl[i].handler = bnx2_msi_1shot;
5699                 else
5700                         bp->irq_tbl[i].handler = bnx2_tx_msix;
5701         }
5702
5703         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5704         if (rc != 0)
5705                 return;
5706
5707         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5708         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5709         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5710                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5711 }
5712
5713 static void
5714 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5715 {
5716         bp->irq_tbl[0].handler = bnx2_interrupt;
5717         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5718         bp->irq_nvecs = 1;
5719         bp->irq_tbl[0].vector = bp->pdev->irq;
5720
5721         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5722                 bnx2_enable_msix(bp);
5723
5724         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5725             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5726                 if (pci_enable_msi(bp->pdev) == 0) {
5727                         bp->flags |= BNX2_FLAG_USING_MSI;
5728                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5729                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5730                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5731                         } else
5732                                 bp->irq_tbl[0].handler = bnx2_msi;
5733
5734                         bp->irq_tbl[0].vector = bp->pdev->irq;
5735                 }
5736         }
5737         bp->num_tx_rings = 1;
5738         bp->num_rx_rings = 1;
5739 }
5740
5741 /* Called with rtnl_lock */
5742 static int
5743 bnx2_open(struct net_device *dev)
5744 {
5745         struct bnx2 *bp = netdev_priv(dev);
5746         int rc;
5747
5748         netif_carrier_off(dev);
5749
5750         bnx2_set_power_state(bp, PCI_D0);
5751         bnx2_disable_int(bp);
5752
5753         bnx2_setup_int_mode(bp, disable_msi);
5754         bnx2_napi_enable(bp);
5755         rc = bnx2_alloc_mem(bp);
5756         if (rc) {
5757                 bnx2_napi_disable(bp);
5758                 bnx2_free_mem(bp);
5759                 return rc;
5760         }
5761
5762         rc = bnx2_request_irq(bp);
5763
5764         if (rc) {
5765                 bnx2_napi_disable(bp);
5766                 bnx2_free_mem(bp);
5767                 return rc;
5768         }
5769
5770         rc = bnx2_init_nic(bp, 1);
5771
5772         if (rc) {
5773                 bnx2_napi_disable(bp);
5774                 bnx2_free_irq(bp);
5775                 bnx2_free_skbs(bp);
5776                 bnx2_free_mem(bp);
5777                 return rc;
5778         }
5779
5780         mod_timer(&bp->timer, jiffies + bp->current_interval);
5781
5782         atomic_set(&bp->intr_sem, 0);
5783
5784         bnx2_enable_int(bp);
5785
5786         if (bp->flags & BNX2_FLAG_USING_MSI) {
5787                 /* Test MSI to make sure it is working
5788                  * If MSI test fails, go back to INTx mode
5789                  */
5790                 if (bnx2_test_intr(bp) != 0) {
5791                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5792                                " using MSI, switching to INTx mode. Please"
5793                                " report this failure to the PCI maintainer"
5794                                " and include system chipset information.\n",
5795                                bp->dev->name);
5796
5797                         bnx2_disable_int(bp);
5798                         bnx2_free_irq(bp);
5799
5800                         bnx2_setup_int_mode(bp, 1);
5801
5802                         rc = bnx2_init_nic(bp, 0);
5803
5804                         if (!rc)
5805                                 rc = bnx2_request_irq(bp);
5806
5807                         if (rc) {
5808                                 bnx2_napi_disable(bp);
5809                                 bnx2_free_skbs(bp);
5810                                 bnx2_free_mem(bp);
5811                                 del_timer_sync(&bp->timer);
5812                                 return rc;
5813                         }
5814                         bnx2_enable_int(bp);
5815                 }
5816         }
5817         if (bp->flags & BNX2_FLAG_USING_MSI)
5818                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5819         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5820                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5821
5822         netif_start_queue(dev);
5823
5824         return 0;
5825 }
5826
5827 static void
5828 bnx2_reset_task(struct work_struct *work)
5829 {
5830         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5831
5832         if (!netif_running(bp->dev))
5833                 return;
5834
5835         bnx2_netif_stop(bp);
5836
5837         bnx2_init_nic(bp, 1);
5838
5839         atomic_set(&bp->intr_sem, 1);
5840         bnx2_netif_start(bp);
5841 }
5842
5843 static void
5844 bnx2_tx_timeout(struct net_device *dev)
5845 {
5846         struct bnx2 *bp = netdev_priv(dev);
5847
5848         /* This allows the netif to be shutdown gracefully before resetting */
5849         schedule_work(&bp->reset_task);
5850 }
5851
5852 #ifdef BCM_VLAN
5853 /* Called with rtnl_lock */
5854 static void
5855 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5856 {
5857         struct bnx2 *bp = netdev_priv(dev);
5858
5859         bnx2_netif_stop(bp);
5860
5861         bp->vlgrp = vlgrp;
5862         bnx2_set_rx_mode(dev);
5863
5864         bnx2_netif_start(bp);
5865 }
5866 #endif
5867
5868 /* Called with netif_tx_lock.
5869  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5870  * netif_wake_queue().
5871  */
5872 static int
5873 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5874 {
5875         struct bnx2 *bp = netdev_priv(dev);
5876         dma_addr_t mapping;
5877         struct tx_bd *txbd;
5878         struct sw_bd *tx_buf;
5879         u32 len, vlan_tag_flags, last_frag, mss;
5880         u16 prod, ring_prod;
5881         int i;
5882         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5883         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5884
5885         if (unlikely(bnx2_tx_avail(bp, txr) <
5886             (skb_shinfo(skb)->nr_frags + 1))) {
5887                 netif_stop_queue(dev);
5888                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5889                         dev->name);
5890
5891                 return NETDEV_TX_BUSY;
5892         }
5893         len = skb_headlen(skb);
5894         prod = txr->tx_prod;
5895         ring_prod = TX_RING_IDX(prod);
5896
5897         vlan_tag_flags = 0;
5898         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5899                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5900         }
5901
5902         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5903                 vlan_tag_flags |=
5904                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5905         }
5906         if ((mss = skb_shinfo(skb)->gso_size)) {
5907                 u32 tcp_opt_len, ip_tcp_len;
5908                 struct iphdr *iph;
5909
5910                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5911
5912                 tcp_opt_len = tcp_optlen(skb);
5913
5914                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5915                         u32 tcp_off = skb_transport_offset(skb) -
5916                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5917
5918                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5919                                           TX_BD_FLAGS_SW_FLAGS;
5920                         if (likely(tcp_off == 0))
5921                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5922                         else {
5923                                 tcp_off >>= 3;
5924                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5925                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5926                                                   ((tcp_off & 0x10) <<
5927                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5928                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5929                         }
5930                 } else {
5931                         if (skb_header_cloned(skb) &&
5932                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5933                                 dev_kfree_skb(skb);
5934                                 return NETDEV_TX_OK;
5935                         }
5936
5937                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5938
5939                         iph = ip_hdr(skb);
5940                         iph->check = 0;
5941                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5942                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5943                                                                  iph->daddr, 0,
5944                                                                  IPPROTO_TCP,
5945                                                                  0);
5946                         if (tcp_opt_len || (iph->ihl > 5)) {
5947                                 vlan_tag_flags |= ((iph->ihl - 5) +
5948                                                    (tcp_opt_len >> 2)) << 8;
5949                         }
5950                 }
5951         } else
5952                 mss = 0;
5953
5954         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5955
5956         tx_buf = &txr->tx_buf_ring[ring_prod];
5957         tx_buf->skb = skb;
5958         pci_unmap_addr_set(tx_buf, mapping, mapping);
5959
5960         txbd = &txr->tx_desc_ring[ring_prod];
5961
5962         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5963         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5964         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5965         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5966
5967         last_frag = skb_shinfo(skb)->nr_frags;
5968
5969         for (i = 0; i < last_frag; i++) {
5970                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5971
5972                 prod = NEXT_TX_BD(prod);
5973                 ring_prod = TX_RING_IDX(prod);
5974                 txbd = &txr->tx_desc_ring[ring_prod];
5975
5976                 len = frag->size;
5977                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5978                         len, PCI_DMA_TODEVICE);
5979                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5980                                 mapping, mapping);
5981
5982                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5983                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5984                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5985                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5986
5987         }
5988         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5989
5990         prod = NEXT_TX_BD(prod);
5991         txr->tx_prod_bseq += skb->len;
5992
5993         REG_WR16(bp, txr->tx_bidx_addr, prod);
5994         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5995
5996         mmiowb();
5997
5998         txr->tx_prod = prod;
5999         dev->trans_start = jiffies;
6000
6001         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6002                 netif_stop_queue(dev);
6003                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6004                         netif_wake_queue(dev);
6005         }
6006
6007         return NETDEV_TX_OK;
6008 }
6009
6010 /* Called with rtnl_lock */
6011 static int
6012 bnx2_close(struct net_device *dev)
6013 {
6014         struct bnx2 *bp = netdev_priv(dev);
6015         u32 reset_code;
6016
6017         cancel_work_sync(&bp->reset_task);
6018
6019         bnx2_disable_int_sync(bp);
6020         bnx2_napi_disable(bp);
6021         del_timer_sync(&bp->timer);
6022         if (bp->flags & BNX2_FLAG_NO_WOL)
6023                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6024         else if (bp->wol)
6025                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6026         else
6027                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6028         bnx2_reset_chip(bp, reset_code);
6029         bnx2_free_irq(bp);
6030         bnx2_free_skbs(bp);
6031         bnx2_free_mem(bp);
6032         bp->link_up = 0;
6033         netif_carrier_off(bp->dev);
6034         bnx2_set_power_state(bp, PCI_D3hot);
6035         return 0;
6036 }
6037
6038 #define GET_NET_STATS64(ctr)                                    \
6039         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6040         (unsigned long) (ctr##_lo)
6041
6042 #define GET_NET_STATS32(ctr)            \
6043         (ctr##_lo)
6044
6045 #if (BITS_PER_LONG == 64)
6046 #define GET_NET_STATS   GET_NET_STATS64
6047 #else
6048 #define GET_NET_STATS   GET_NET_STATS32
6049 #endif
6050
6051 static struct net_device_stats *
6052 bnx2_get_stats(struct net_device *dev)
6053 {
6054         struct bnx2 *bp = netdev_priv(dev);
6055         struct statistics_block *stats_blk = bp->stats_blk;
6056         struct net_device_stats *net_stats = &bp->net_stats;
6057
6058         if (bp->stats_blk == NULL) {
6059                 return net_stats;
6060         }
6061         net_stats->rx_packets =
6062                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6063                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6064                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6065
6066         net_stats->tx_packets =
6067                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6068                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6069                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6070
6071         net_stats->rx_bytes =
6072                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6073
6074         net_stats->tx_bytes =
6075                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6076
6077         net_stats->multicast =
6078                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6079
6080         net_stats->collisions =
6081                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6082
6083         net_stats->rx_length_errors =
6084                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6085                 stats_blk->stat_EtherStatsOverrsizePkts);
6086
6087         net_stats->rx_over_errors =
6088                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6089
6090         net_stats->rx_frame_errors =
6091                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6092
6093         net_stats->rx_crc_errors =
6094                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6095
6096         net_stats->rx_errors = net_stats->rx_length_errors +
6097                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6098                 net_stats->rx_crc_errors;
6099
6100         net_stats->tx_aborted_errors =
6101                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6102                 stats_blk->stat_Dot3StatsLateCollisions);
6103
6104         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6105             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6106                 net_stats->tx_carrier_errors = 0;
6107         else {
6108                 net_stats->tx_carrier_errors =
6109                         (unsigned long)
6110                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6111         }
6112
6113         net_stats->tx_errors =
6114                 (unsigned long)
6115                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6116                 +
6117                 net_stats->tx_aborted_errors +
6118                 net_stats->tx_carrier_errors;
6119
6120         net_stats->rx_missed_errors =
6121                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6122                 stats_blk->stat_FwRxDrop);
6123
6124         return net_stats;
6125 }
6126
6127 /* All ethtool functions called with rtnl_lock */
6128
6129 static int
6130 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6131 {
6132         struct bnx2 *bp = netdev_priv(dev);
6133         int support_serdes = 0, support_copper = 0;
6134
6135         cmd->supported = SUPPORTED_Autoneg;
6136         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6137                 support_serdes = 1;
6138                 support_copper = 1;
6139         } else if (bp->phy_port == PORT_FIBRE)
6140                 support_serdes = 1;
6141         else
6142                 support_copper = 1;
6143
6144         if (support_serdes) {
6145                 cmd->supported |= SUPPORTED_1000baseT_Full |
6146                         SUPPORTED_FIBRE;
6147                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6148                         cmd->supported |= SUPPORTED_2500baseX_Full;
6149
6150         }
6151         if (support_copper) {
6152                 cmd->supported |= SUPPORTED_10baseT_Half |
6153                         SUPPORTED_10baseT_Full |
6154                         SUPPORTED_100baseT_Half |
6155                         SUPPORTED_100baseT_Full |
6156                         SUPPORTED_1000baseT_Full |
6157                         SUPPORTED_TP;
6158
6159         }
6160
6161         spin_lock_bh(&bp->phy_lock);
6162         cmd->port = bp->phy_port;
6163         cmd->advertising = bp->advertising;
6164
6165         if (bp->autoneg & AUTONEG_SPEED) {
6166                 cmd->autoneg = AUTONEG_ENABLE;
6167         }
6168         else {
6169                 cmd->autoneg = AUTONEG_DISABLE;
6170         }
6171
6172         if (netif_carrier_ok(dev)) {
6173                 cmd->speed = bp->line_speed;
6174                 cmd->duplex = bp->duplex;
6175         }
6176         else {
6177                 cmd->speed = -1;
6178                 cmd->duplex = -1;
6179         }
6180         spin_unlock_bh(&bp->phy_lock);
6181
6182         cmd->transceiver = XCVR_INTERNAL;
6183         cmd->phy_address = bp->phy_addr;
6184
6185         return 0;
6186 }
6187
6188 static int
6189 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6190 {
6191         struct bnx2 *bp = netdev_priv(dev);
6192         u8 autoneg = bp->autoneg;
6193         u8 req_duplex = bp->req_duplex;
6194         u16 req_line_speed = bp->req_line_speed;
6195         u32 advertising = bp->advertising;
6196         int err = -EINVAL;
6197
6198         spin_lock_bh(&bp->phy_lock);
6199
6200         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6201                 goto err_out_unlock;
6202
6203         if (cmd->port != bp->phy_port &&
6204             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6205                 goto err_out_unlock;
6206
6207         if (cmd->autoneg == AUTONEG_ENABLE) {
6208                 autoneg |= AUTONEG_SPEED;
6209
6210                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6211
6212                 /* allow advertising 1 speed */
6213                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6214                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6215                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6216                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6217
6218                         if (cmd->port == PORT_FIBRE)
6219                                 goto err_out_unlock;
6220
6221                         advertising = cmd->advertising;
6222
6223                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6224                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6225                             (cmd->port == PORT_TP))
6226                                 goto err_out_unlock;
6227                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6228                         advertising = cmd->advertising;
6229                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6230                         goto err_out_unlock;
6231                 else {
6232                         if (cmd->port == PORT_FIBRE)
6233                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6234                         else
6235                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6236                 }
6237                 advertising |= ADVERTISED_Autoneg;
6238         }
6239         else {
6240                 if (cmd->port == PORT_FIBRE) {
6241                         if ((cmd->speed != SPEED_1000 &&
6242                              cmd->speed != SPEED_2500) ||
6243                             (cmd->duplex != DUPLEX_FULL))
6244                                 goto err_out_unlock;
6245
6246                         if (cmd->speed == SPEED_2500 &&
6247                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6248                                 goto err_out_unlock;
6249                 }
6250                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6251                         goto err_out_unlock;
6252
6253                 autoneg &= ~AUTONEG_SPEED;
6254                 req_line_speed = cmd->speed;
6255                 req_duplex = cmd->duplex;
6256                 advertising = 0;
6257         }
6258
6259         bp->autoneg = autoneg;
6260         bp->advertising = advertising;
6261         bp->req_line_speed = req_line_speed;
6262         bp->req_duplex = req_duplex;
6263
6264         err = bnx2_setup_phy(bp, cmd->port);
6265
6266 err_out_unlock:
6267         spin_unlock_bh(&bp->phy_lock);
6268
6269         return err;
6270 }
6271
6272 static void
6273 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6274 {
6275         struct bnx2 *bp = netdev_priv(dev);
6276
6277         strcpy(info->driver, DRV_MODULE_NAME);
6278         strcpy(info->version, DRV_MODULE_VERSION);
6279         strcpy(info->bus_info, pci_name(bp->pdev));
6280         strcpy(info->fw_version, bp->fw_version);
6281 }
6282
6283 #define BNX2_REGDUMP_LEN                (32 * 1024)
6284
6285 static int
6286 bnx2_get_regs_len(struct net_device *dev)
6287 {
6288         return BNX2_REGDUMP_LEN;
6289 }
6290
6291 static void
6292 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6293 {
6294         u32 *p = _p, i, offset;
6295         u8 *orig_p = _p;
6296         struct bnx2 *bp = netdev_priv(dev);
6297         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6298                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6299                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6300                                  0x1040, 0x1048, 0x1080, 0x10a4,
6301                                  0x1400, 0x1490, 0x1498, 0x14f0,
6302                                  0x1500, 0x155c, 0x1580, 0x15dc,
6303                                  0x1600, 0x1658, 0x1680, 0x16d8,
6304                                  0x1800, 0x1820, 0x1840, 0x1854,
6305                                  0x1880, 0x1894, 0x1900, 0x1984,
6306                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6307                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6308                                  0x2000, 0x2030, 0x23c0, 0x2400,
6309                                  0x2800, 0x2820, 0x2830, 0x2850,
6310                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6311                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6312                                  0x4080, 0x4090, 0x43c0, 0x4458,
6313                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6314                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6315                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6316                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6317                                  0x6800, 0x6848, 0x684c, 0x6860,
6318                                  0x6888, 0x6910, 0x8000 };
6319
6320         regs->version = 0;
6321
6322         memset(p, 0, BNX2_REGDUMP_LEN);
6323
6324         if (!netif_running(bp->dev))
6325                 return;
6326
6327         i = 0;
6328         offset = reg_boundaries[0];
6329         p += offset;
6330         while (offset < BNX2_REGDUMP_LEN) {
6331                 *p++ = REG_RD(bp, offset);
6332                 offset += 4;
6333                 if (offset == reg_boundaries[i + 1]) {
6334                         offset = reg_boundaries[i + 2];
6335                         p = (u32 *) (orig_p + offset);
6336                         i += 2;
6337                 }
6338         }
6339 }
6340
6341 static void
6342 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6343 {
6344         struct bnx2 *bp = netdev_priv(dev);
6345
6346         if (bp->flags & BNX2_FLAG_NO_WOL) {
6347                 wol->supported = 0;
6348                 wol->wolopts = 0;
6349         }
6350         else {
6351                 wol->supported = WAKE_MAGIC;
6352                 if (bp->wol)
6353                         wol->wolopts = WAKE_MAGIC;
6354                 else
6355                         wol->wolopts = 0;
6356         }
6357         memset(&wol->sopass, 0, sizeof(wol->sopass));
6358 }
6359
6360 static int
6361 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6362 {
6363         struct bnx2 *bp = netdev_priv(dev);
6364
6365         if (wol->wolopts & ~WAKE_MAGIC)
6366                 return -EINVAL;
6367
6368         if (wol->wolopts & WAKE_MAGIC) {
6369                 if (bp->flags & BNX2_FLAG_NO_WOL)
6370                         return -EINVAL;
6371
6372                 bp->wol = 1;
6373         }
6374         else {
6375                 bp->wol = 0;
6376         }
6377         return 0;
6378 }
6379
6380 static int
6381 bnx2_nway_reset(struct net_device *dev)
6382 {
6383         struct bnx2 *bp = netdev_priv(dev);
6384         u32 bmcr;
6385
6386         if (!(bp->autoneg & AUTONEG_SPEED)) {
6387                 return -EINVAL;
6388         }
6389
6390         spin_lock_bh(&bp->phy_lock);
6391
6392         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6393                 int rc;
6394
6395                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6396                 spin_unlock_bh(&bp->phy_lock);
6397                 return rc;
6398         }
6399
6400         /* Force a link down visible on the other side */
6401         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6402                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6403                 spin_unlock_bh(&bp->phy_lock);
6404
6405                 msleep(20);
6406
6407                 spin_lock_bh(&bp->phy_lock);
6408
6409                 bp->current_interval = SERDES_AN_TIMEOUT;
6410                 bp->serdes_an_pending = 1;
6411                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6412         }
6413
6414         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6415         bmcr &= ~BMCR_LOOPBACK;
6416         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6417
6418         spin_unlock_bh(&bp->phy_lock);
6419
6420         return 0;
6421 }
6422
6423 static int
6424 bnx2_get_eeprom_len(struct net_device *dev)
6425 {
6426         struct bnx2 *bp = netdev_priv(dev);
6427
6428         if (bp->flash_info == NULL)
6429                 return 0;
6430
6431         return (int) bp->flash_size;
6432 }
6433
6434 static int
6435 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6436                 u8 *eebuf)
6437 {
6438         struct bnx2 *bp = netdev_priv(dev);
6439         int rc;
6440
6441         /* parameters already validated in ethtool_get_eeprom */
6442
6443         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6444
6445         return rc;
6446 }
6447
6448 static int
6449 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6450                 u8 *eebuf)
6451 {
6452         struct bnx2 *bp = netdev_priv(dev);
6453         int rc;
6454
6455         /* parameters already validated in ethtool_set_eeprom */
6456
6457         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6458
6459         return rc;
6460 }
6461
6462 static int
6463 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6464 {
6465         struct bnx2 *bp = netdev_priv(dev);
6466
6467         memset(coal, 0, sizeof(struct ethtool_coalesce));
6468
6469         coal->rx_coalesce_usecs = bp->rx_ticks;
6470         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6471         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6472         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6473
6474         coal->tx_coalesce_usecs = bp->tx_ticks;
6475         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6476         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6477         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6478
6479         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6480
6481         return 0;
6482 }
6483
6484 static int
6485 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6486 {
6487         struct bnx2 *bp = netdev_priv(dev);
6488
6489         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6490         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6491
6492         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6493         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6494
6495         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6496         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6497
6498         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6499         if (bp->rx_quick_cons_trip_int > 0xff)
6500                 bp->rx_quick_cons_trip_int = 0xff;
6501
6502         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6503         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6504
6505         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6506         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6507
6508         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6509         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6510
6511         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6512         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6513                 0xff;
6514
6515         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6516         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6517                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6518                         bp->stats_ticks = USEC_PER_SEC;
6519         }
6520         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6521                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6522         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6523
6524         if (netif_running(bp->dev)) {
6525                 bnx2_netif_stop(bp);
6526                 bnx2_init_nic(bp, 0);
6527                 bnx2_netif_start(bp);
6528         }
6529
6530         return 0;
6531 }
6532
6533 static void
6534 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6535 {
6536         struct bnx2 *bp = netdev_priv(dev);
6537
6538         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6539         ering->rx_mini_max_pending = 0;
6540         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6541
6542         ering->rx_pending = bp->rx_ring_size;
6543         ering->rx_mini_pending = 0;
6544         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6545
6546         ering->tx_max_pending = MAX_TX_DESC_CNT;
6547         ering->tx_pending = bp->tx_ring_size;
6548 }
6549
6550 static int
6551 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6552 {
6553         if (netif_running(bp->dev)) {
6554                 bnx2_netif_stop(bp);
6555                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6556                 bnx2_free_skbs(bp);
6557                 bnx2_free_mem(bp);
6558         }
6559
6560         bnx2_set_rx_ring_size(bp, rx);
6561         bp->tx_ring_size = tx;
6562
6563         if (netif_running(bp->dev)) {
6564                 int rc;
6565
6566                 rc = bnx2_alloc_mem(bp);
6567                 if (rc)
6568                         return rc;
6569                 bnx2_init_nic(bp, 0);
6570                 bnx2_netif_start(bp);
6571         }
6572         return 0;
6573 }
6574
6575 static int
6576 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6577 {
6578         struct bnx2 *bp = netdev_priv(dev);
6579         int rc;
6580
6581         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6582                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6583                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6584
6585                 return -EINVAL;
6586         }
6587         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6588         return rc;
6589 }
6590
6591 static void
6592 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6593 {
6594         struct bnx2 *bp = netdev_priv(dev);
6595
6596         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6597         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6598         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6599 }
6600
6601 static int
6602 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6603 {
6604         struct bnx2 *bp = netdev_priv(dev);
6605
6606         bp->req_flow_ctrl = 0;
6607         if (epause->rx_pause)
6608                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6609         if (epause->tx_pause)
6610                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6611
6612         if (epause->autoneg) {
6613                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6614         }
6615         else {
6616                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6617         }
6618
6619         spin_lock_bh(&bp->phy_lock);
6620
6621         bnx2_setup_phy(bp, bp->phy_port);
6622
6623         spin_unlock_bh(&bp->phy_lock);
6624
6625         return 0;
6626 }
6627
6628 static u32
6629 bnx2_get_rx_csum(struct net_device *dev)
6630 {
6631         struct bnx2 *bp = netdev_priv(dev);
6632
6633         return bp->rx_csum;
6634 }
6635
6636 static int
6637 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6638 {
6639         struct bnx2 *bp = netdev_priv(dev);
6640
6641         bp->rx_csum = data;
6642         return 0;
6643 }
6644
6645 static int
6646 bnx2_set_tso(struct net_device *dev, u32 data)
6647 {
6648         struct bnx2 *bp = netdev_priv(dev);
6649
6650         if (data) {
6651                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6652                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6653                         dev->features |= NETIF_F_TSO6;
6654         } else
6655                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6656                                    NETIF_F_TSO_ECN);
6657         return 0;
6658 }
6659
6660 #define BNX2_NUM_STATS 46
6661
6662 static struct {
6663         char string[ETH_GSTRING_LEN];
6664 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6665         { "rx_bytes" },
6666         { "rx_error_bytes" },
6667         { "tx_bytes" },
6668         { "tx_error_bytes" },
6669         { "rx_ucast_packets" },
6670         { "rx_mcast_packets" },
6671         { "rx_bcast_packets" },
6672         { "tx_ucast_packets" },
6673         { "tx_mcast_packets" },
6674         { "tx_bcast_packets" },
6675         { "tx_mac_errors" },
6676         { "tx_carrier_errors" },
6677         { "rx_crc_errors" },
6678         { "rx_align_errors" },
6679         { "tx_single_collisions" },
6680         { "tx_multi_collisions" },
6681         { "tx_deferred" },
6682         { "tx_excess_collisions" },
6683         { "tx_late_collisions" },
6684         { "tx_total_collisions" },
6685         { "rx_fragments" },
6686         { "rx_jabbers" },
6687         { "rx_undersize_packets" },
6688         { "rx_oversize_packets" },
6689         { "rx_64_byte_packets" },
6690         { "rx_65_to_127_byte_packets" },
6691         { "rx_128_to_255_byte_packets" },
6692         { "rx_256_to_511_byte_packets" },
6693         { "rx_512_to_1023_byte_packets" },
6694         { "rx_1024_to_1522_byte_packets" },
6695         { "rx_1523_to_9022_byte_packets" },
6696         { "tx_64_byte_packets" },
6697         { "tx_65_to_127_byte_packets" },
6698         { "tx_128_to_255_byte_packets" },
6699         { "tx_256_to_511_byte_packets" },
6700         { "tx_512_to_1023_byte_packets" },
6701         { "tx_1024_to_1522_byte_packets" },
6702         { "tx_1523_to_9022_byte_packets" },
6703         { "rx_xon_frames" },
6704         { "rx_xoff_frames" },
6705         { "tx_xon_frames" },
6706         { "tx_xoff_frames" },
6707         { "rx_mac_ctrl_frames" },
6708         { "rx_filtered_packets" },
6709         { "rx_discards" },
6710         { "rx_fw_discards" },
6711 };
6712
6713 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6714
6715 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6716     STATS_OFFSET32(stat_IfHCInOctets_hi),
6717     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6718     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6719     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6720     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6721     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6722     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6723     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6724     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6725     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6726     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6727     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6728     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6729     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6730     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6731     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6732     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6733     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6734     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6735     STATS_OFFSET32(stat_EtherStatsCollisions),
6736     STATS_OFFSET32(stat_EtherStatsFragments),
6737     STATS_OFFSET32(stat_EtherStatsJabbers),
6738     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6739     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6740     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6741     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6742     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6743     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6744     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6745     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6746     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6747     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6748     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6749     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6750     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6751     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6752     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6753     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6754     STATS_OFFSET32(stat_XonPauseFramesReceived),
6755     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6756     STATS_OFFSET32(stat_OutXonSent),
6757     STATS_OFFSET32(stat_OutXoffSent),
6758     STATS_OFFSET32(stat_MacControlFramesReceived),
6759     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6760     STATS_OFFSET32(stat_IfInMBUFDiscards),
6761     STATS_OFFSET32(stat_FwRxDrop),
6762 };
6763
6764 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6765  * skipped because of errata.
6766  */
6767 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6768         8,0,8,8,8,8,8,8,8,8,
6769         4,0,4,4,4,4,4,4,4,4,
6770         4,4,4,4,4,4,4,4,4,4,
6771         4,4,4,4,4,4,4,4,4,4,
6772         4,4,4,4,4,4,
6773 };
6774
6775 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6776         8,0,8,8,8,8,8,8,8,8,
6777         4,4,4,4,4,4,4,4,4,4,
6778         4,4,4,4,4,4,4,4,4,4,
6779         4,4,4,4,4,4,4,4,4,4,
6780         4,4,4,4,4,4,
6781 };
6782
6783 #define BNX2_NUM_TESTS 6
6784
6785 static struct {
6786         char string[ETH_GSTRING_LEN];
6787 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6788         { "register_test (offline)" },
6789         { "memory_test (offline)" },
6790         { "loopback_test (offline)" },
6791         { "nvram_test (online)" },
6792         { "interrupt_test (online)" },
6793         { "link_test (online)" },
6794 };
6795
6796 static int
6797 bnx2_get_sset_count(struct net_device *dev, int sset)
6798 {
6799         switch (sset) {
6800         case ETH_SS_TEST:
6801                 return BNX2_NUM_TESTS;
6802         case ETH_SS_STATS:
6803                 return BNX2_NUM_STATS;
6804         default:
6805                 return -EOPNOTSUPP;
6806         }
6807 }
6808
6809 static void
6810 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6811 {
6812         struct bnx2 *bp = netdev_priv(dev);
6813
6814         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6815         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6816                 int i;
6817
6818                 bnx2_netif_stop(bp);
6819                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6820                 bnx2_free_skbs(bp);
6821
6822                 if (bnx2_test_registers(bp) != 0) {
6823                         buf[0] = 1;
6824                         etest->flags |= ETH_TEST_FL_FAILED;
6825                 }
6826                 if (bnx2_test_memory(bp) != 0) {
6827                         buf[1] = 1;
6828                         etest->flags |= ETH_TEST_FL_FAILED;
6829                 }
6830                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6831                         etest->flags |= ETH_TEST_FL_FAILED;
6832
6833                 if (!netif_running(bp->dev)) {
6834                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6835                 }
6836                 else {
6837                         bnx2_init_nic(bp, 1);
6838                         bnx2_netif_start(bp);
6839                 }
6840
6841                 /* wait for link up */
6842                 for (i = 0; i < 7; i++) {
6843                         if (bp->link_up)
6844                                 break;
6845                         msleep_interruptible(1000);
6846                 }
6847         }
6848
6849         if (bnx2_test_nvram(bp) != 0) {
6850                 buf[3] = 1;
6851                 etest->flags |= ETH_TEST_FL_FAILED;
6852         }
6853         if (bnx2_test_intr(bp) != 0) {
6854                 buf[4] = 1;
6855                 etest->flags |= ETH_TEST_FL_FAILED;
6856         }
6857
6858         if (bnx2_test_link(bp) != 0) {
6859                 buf[5] = 1;
6860                 etest->flags |= ETH_TEST_FL_FAILED;
6861
6862         }
6863 }
6864
6865 static void
6866 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6867 {
6868         switch (stringset) {
6869         case ETH_SS_STATS:
6870                 memcpy(buf, bnx2_stats_str_arr,
6871                         sizeof(bnx2_stats_str_arr));
6872                 break;
6873         case ETH_SS_TEST:
6874                 memcpy(buf, bnx2_tests_str_arr,
6875                         sizeof(bnx2_tests_str_arr));
6876                 break;
6877         }
6878 }
6879
6880 static void
6881 bnx2_get_ethtool_stats(struct net_device *dev,
6882                 struct ethtool_stats *stats, u64 *buf)
6883 {
6884         struct bnx2 *bp = netdev_priv(dev);
6885         int i;
6886         u32 *hw_stats = (u32 *) bp->stats_blk;
6887         u8 *stats_len_arr = NULL;
6888
6889         if (hw_stats == NULL) {
6890                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6891                 return;
6892         }
6893
6894         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6895             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6896             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6897             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6898                 stats_len_arr = bnx2_5706_stats_len_arr;
6899         else
6900                 stats_len_arr = bnx2_5708_stats_len_arr;
6901
6902         for (i = 0; i < BNX2_NUM_STATS; i++) {
6903                 if (stats_len_arr[i] == 0) {
6904                         /* skip this counter */
6905                         buf[i] = 0;
6906                         continue;
6907                 }
6908                 if (stats_len_arr[i] == 4) {
6909                         /* 4-byte counter */
6910                         buf[i] = (u64)
6911                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6912                         continue;
6913                 }
6914                 /* 8-byte counter */
6915                 buf[i] = (((u64) *(hw_stats +
6916                                         bnx2_stats_offset_arr[i])) << 32) +
6917                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6918         }
6919 }
6920
6921 static int
6922 bnx2_phys_id(struct net_device *dev, u32 data)
6923 {
6924         struct bnx2 *bp = netdev_priv(dev);
6925         int i;
6926         u32 save;
6927
6928         if (data == 0)
6929                 data = 2;
6930
6931         save = REG_RD(bp, BNX2_MISC_CFG);
6932         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6933
6934         for (i = 0; i < (data * 2); i++) {
6935                 if ((i % 2) == 0) {
6936                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6937                 }
6938                 else {
6939                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6940                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6941                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6942                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6943                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6944                                 BNX2_EMAC_LED_TRAFFIC);
6945                 }
6946                 msleep_interruptible(500);
6947                 if (signal_pending(current))
6948                         break;
6949         }
6950         REG_WR(bp, BNX2_EMAC_LED, 0);
6951         REG_WR(bp, BNX2_MISC_CFG, save);
6952         return 0;
6953 }
6954
6955 static int
6956 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6957 {
6958         struct bnx2 *bp = netdev_priv(dev);
6959
6960         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6961                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6962         else
6963                 return (ethtool_op_set_tx_csum(dev, data));
6964 }
6965
6966 static const struct ethtool_ops bnx2_ethtool_ops = {
6967         .get_settings           = bnx2_get_settings,
6968         .set_settings           = bnx2_set_settings,
6969         .get_drvinfo            = bnx2_get_drvinfo,
6970         .get_regs_len           = bnx2_get_regs_len,
6971         .get_regs               = bnx2_get_regs,
6972         .get_wol                = bnx2_get_wol,
6973         .set_wol                = bnx2_set_wol,
6974         .nway_reset             = bnx2_nway_reset,
6975         .get_link               = ethtool_op_get_link,
6976         .get_eeprom_len         = bnx2_get_eeprom_len,
6977         .get_eeprom             = bnx2_get_eeprom,
6978         .set_eeprom             = bnx2_set_eeprom,
6979         .get_coalesce           = bnx2_get_coalesce,
6980         .set_coalesce           = bnx2_set_coalesce,
6981         .get_ringparam          = bnx2_get_ringparam,
6982         .set_ringparam          = bnx2_set_ringparam,
6983         .get_pauseparam         = bnx2_get_pauseparam,
6984         .set_pauseparam         = bnx2_set_pauseparam,
6985         .get_rx_csum            = bnx2_get_rx_csum,
6986         .set_rx_csum            = bnx2_set_rx_csum,
6987         .set_tx_csum            = bnx2_set_tx_csum,
6988         .set_sg                 = ethtool_op_set_sg,
6989         .set_tso                = bnx2_set_tso,
6990         .self_test              = bnx2_self_test,
6991         .get_strings            = bnx2_get_strings,
6992         .phys_id                = bnx2_phys_id,
6993         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6994         .get_sset_count         = bnx2_get_sset_count,
6995 };
6996
6997 /* Called with rtnl_lock */
6998 static int
6999 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7000 {
7001         struct mii_ioctl_data *data = if_mii(ifr);
7002         struct bnx2 *bp = netdev_priv(dev);
7003         int err;
7004
7005         switch(cmd) {
7006         case SIOCGMIIPHY:
7007                 data->phy_id = bp->phy_addr;
7008
7009                 /* fallthru */
7010         case SIOCGMIIREG: {
7011                 u32 mii_regval;
7012
7013                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7014                         return -EOPNOTSUPP;
7015
7016                 if (!netif_running(dev))
7017                         return -EAGAIN;
7018
7019                 spin_lock_bh(&bp->phy_lock);
7020                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7021                 spin_unlock_bh(&bp->phy_lock);
7022
7023                 data->val_out = mii_regval;
7024
7025                 return err;
7026         }
7027
7028         case SIOCSMIIREG:
7029                 if (!capable(CAP_NET_ADMIN))
7030                         return -EPERM;
7031
7032                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7033                         return -EOPNOTSUPP;
7034
7035                 if (!netif_running(dev))
7036                         return -EAGAIN;
7037
7038                 spin_lock_bh(&bp->phy_lock);
7039                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7040                 spin_unlock_bh(&bp->phy_lock);
7041
7042                 return err;
7043
7044         default:
7045                 /* do nothing */
7046                 break;
7047         }
7048         return -EOPNOTSUPP;
7049 }
7050
7051 /* Called with rtnl_lock */
7052 static int
7053 bnx2_change_mac_addr(struct net_device *dev, void *p)
7054 {
7055         struct sockaddr *addr = p;
7056         struct bnx2 *bp = netdev_priv(dev);
7057
7058         if (!is_valid_ether_addr(addr->sa_data))
7059                 return -EINVAL;
7060
7061         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7062         if (netif_running(dev))
7063                 bnx2_set_mac_addr(bp);
7064
7065         return 0;
7066 }
7067
7068 /* Called with rtnl_lock */
7069 static int
7070 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7071 {
7072         struct bnx2 *bp = netdev_priv(dev);
7073
7074         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7075                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7076                 return -EINVAL;
7077
7078         dev->mtu = new_mtu;
7079         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7080 }
7081
7082 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7083 static void
7084 poll_bnx2(struct net_device *dev)
7085 {
7086         struct bnx2 *bp = netdev_priv(dev);
7087
7088         disable_irq(bp->pdev->irq);
7089         bnx2_interrupt(bp->pdev->irq, dev);
7090         enable_irq(bp->pdev->irq);
7091 }
7092 #endif
7093
7094 static void __devinit
7095 bnx2_get_5709_media(struct bnx2 *bp)
7096 {
7097         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7098         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7099         u32 strap;
7100
7101         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7102                 return;
7103         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7104                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7105                 return;
7106         }
7107
7108         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7109                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7110         else
7111                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7112
7113         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7114                 switch (strap) {
7115                 case 0x4:
7116                 case 0x5:
7117                 case 0x6:
7118                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7119                         return;
7120                 }
7121         } else {
7122                 switch (strap) {
7123                 case 0x1:
7124                 case 0x2:
7125                 case 0x4:
7126                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7127                         return;
7128                 }
7129         }
7130 }
7131
7132 static void __devinit
7133 bnx2_get_pci_speed(struct bnx2 *bp)
7134 {
7135         u32 reg;
7136
7137         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7138         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7139                 u32 clkreg;
7140
7141                 bp->flags |= BNX2_FLAG_PCIX;
7142
7143                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7144
7145                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7146                 switch (clkreg) {
7147                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7148                         bp->bus_speed_mhz = 133;
7149                         break;
7150
7151                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7152                         bp->bus_speed_mhz = 100;
7153                         break;
7154
7155                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7156                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7157                         bp->bus_speed_mhz = 66;
7158                         break;
7159
7160                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7161                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7162                         bp->bus_speed_mhz = 50;
7163                         break;
7164
7165                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7166                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7167                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7168                         bp->bus_speed_mhz = 33;
7169                         break;
7170                 }
7171         }
7172         else {
7173                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7174                         bp->bus_speed_mhz = 66;
7175                 else
7176                         bp->bus_speed_mhz = 33;
7177         }
7178
7179         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7180                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7181
7182 }
7183
7184 static int __devinit
7185 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7186 {
7187         struct bnx2 *bp;
7188         unsigned long mem_len;
7189         int rc, i, j;
7190         u32 reg;
7191         u64 dma_mask, persist_dma_mask;
7192
7193         SET_NETDEV_DEV(dev, &pdev->dev);
7194         bp = netdev_priv(dev);
7195
7196         bp->flags = 0;
7197         bp->phy_flags = 0;
7198
7199         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7200         rc = pci_enable_device(pdev);
7201         if (rc) {
7202                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7203                 goto err_out;
7204         }
7205
7206         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7207                 dev_err(&pdev->dev,
7208                         "Cannot find PCI device base address, aborting.\n");
7209                 rc = -ENODEV;
7210                 goto err_out_disable;
7211         }
7212
7213         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7214         if (rc) {
7215                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7216                 goto err_out_disable;
7217         }
7218
7219         pci_set_master(pdev);
7220         pci_save_state(pdev);
7221
7222         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7223         if (bp->pm_cap == 0) {
7224                 dev_err(&pdev->dev,
7225                         "Cannot find power management capability, aborting.\n");
7226                 rc = -EIO;
7227                 goto err_out_release;
7228         }
7229
7230         bp->dev = dev;
7231         bp->pdev = pdev;
7232
7233         spin_lock_init(&bp->phy_lock);
7234         spin_lock_init(&bp->indirect_lock);
7235         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7236
7237         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7238         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7239         dev->mem_end = dev->mem_start + mem_len;
7240         dev->irq = pdev->irq;
7241
7242         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7243
7244         if (!bp->regview) {
7245                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7246                 rc = -ENOMEM;
7247                 goto err_out_release;
7248         }
7249
7250         /* Configure byte swap and enable write to the reg_window registers.
7251          * Rely on CPU to do target byte swapping on big endian systems
7252          * The chip's target access swapping will not swap all accesses
7253          */
7254         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7255                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7256                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7257
7258         bnx2_set_power_state(bp, PCI_D0);
7259
7260         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7261
7262         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7263                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7264                         dev_err(&pdev->dev,
7265                                 "Cannot find PCIE capability, aborting.\n");
7266                         rc = -EIO;
7267                         goto err_out_unmap;
7268                 }
7269                 bp->flags |= BNX2_FLAG_PCIE;
7270                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7271                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7272         } else {
7273                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7274                 if (bp->pcix_cap == 0) {
7275                         dev_err(&pdev->dev,
7276                                 "Cannot find PCIX capability, aborting.\n");
7277                         rc = -EIO;
7278                         goto err_out_unmap;
7279                 }
7280         }
7281
7282         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7283                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7284                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7285         }
7286
7287         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7288                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7289                         bp->flags |= BNX2_FLAG_MSI_CAP;
7290         }
7291
7292         /* 5708 cannot support DMA addresses > 40-bit.  */
7293         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7294                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7295         else
7296                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7297
7298         /* Configure DMA attributes. */
7299         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7300                 dev->features |= NETIF_F_HIGHDMA;
7301                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7302                 if (rc) {
7303                         dev_err(&pdev->dev,
7304                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7305                         goto err_out_unmap;
7306                 }
7307         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7308                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7309                 goto err_out_unmap;
7310         }
7311
7312         if (!(bp->flags & BNX2_FLAG_PCIE))
7313                 bnx2_get_pci_speed(bp);
7314
7315         /* 5706A0 may falsely detect SERR and PERR. */
7316         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7317                 reg = REG_RD(bp, PCI_COMMAND);
7318                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7319                 REG_WR(bp, PCI_COMMAND, reg);
7320         }
7321         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7322                 !(bp->flags & BNX2_FLAG_PCIX)) {
7323
7324                 dev_err(&pdev->dev,
7325                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7326                 goto err_out_unmap;
7327         }
7328
7329         bnx2_init_nvram(bp);
7330
7331         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7332
7333         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7334             BNX2_SHM_HDR_SIGNATURE_SIG) {
7335                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7336
7337                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7338         } else
7339                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7340
7341         /* Get the permanent MAC address.  First we need to make sure the
7342          * firmware is actually running.
7343          */
7344         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7345
7346         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7347             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7348                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7349                 rc = -ENODEV;
7350                 goto err_out_unmap;
7351         }
7352
7353         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7354         for (i = 0, j = 0; i < 3; i++) {
7355                 u8 num, k, skip0;
7356
7357                 num = (u8) (reg >> (24 - (i * 8)));
7358                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7359                         if (num >= k || !skip0 || k == 1) {
7360                                 bp->fw_version[j++] = (num / k) + '0';
7361                                 skip0 = 0;
7362                         }
7363                 }
7364                 if (i != 2)
7365                         bp->fw_version[j++] = '.';
7366         }
7367         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7368         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7369                 bp->wol = 1;
7370
7371         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7372                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7373
7374                 for (i = 0; i < 30; i++) {
7375                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7376                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7377                                 break;
7378                         msleep(10);
7379                 }
7380         }
7381         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7382         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7383         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7384             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7385                 int i;
7386                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7387
7388                 bp->fw_version[j++] = ' ';
7389                 for (i = 0; i < 3; i++) {
7390                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7391                         reg = swab32(reg);
7392                         memcpy(&bp->fw_version[j], &reg, 4);
7393                         j += 4;
7394                 }
7395         }
7396
7397         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7398         bp->mac_addr[0] = (u8) (reg >> 8);
7399         bp->mac_addr[1] = (u8) reg;
7400
7401         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7402         bp->mac_addr[2] = (u8) (reg >> 24);
7403         bp->mac_addr[3] = (u8) (reg >> 16);
7404         bp->mac_addr[4] = (u8) (reg >> 8);
7405         bp->mac_addr[5] = (u8) reg;
7406
7407         bp->tx_ring_size = MAX_TX_DESC_CNT;
7408         bnx2_set_rx_ring_size(bp, 255);
7409
7410         bp->rx_csum = 1;
7411
7412         bp->tx_quick_cons_trip_int = 20;
7413         bp->tx_quick_cons_trip = 20;
7414         bp->tx_ticks_int = 80;
7415         bp->tx_ticks = 80;
7416
7417         bp->rx_quick_cons_trip_int = 6;
7418         bp->rx_quick_cons_trip = 6;
7419         bp->rx_ticks_int = 18;
7420         bp->rx_ticks = 18;
7421
7422         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7423
7424         bp->timer_interval =  HZ;
7425         bp->current_interval =  HZ;
7426
7427         bp->phy_addr = 1;
7428
7429         /* Disable WOL support if we are running on a SERDES chip. */
7430         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7431                 bnx2_get_5709_media(bp);
7432         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7433                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7434
7435         bp->phy_port = PORT_TP;
7436         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7437                 bp->phy_port = PORT_FIBRE;
7438                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7439                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7440                         bp->flags |= BNX2_FLAG_NO_WOL;
7441                         bp->wol = 0;
7442                 }
7443                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7444                         /* Don't do parallel detect on this board because of
7445                          * some board problems.  The link will not go down
7446                          * if we do parallel detect.
7447                          */
7448                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7449                             pdev->subsystem_device == 0x310c)
7450                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7451                 } else {
7452                         bp->phy_addr = 2;
7453                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7454                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7455                 }
7456                 bnx2_init_remote_phy(bp);
7457
7458         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7459                    CHIP_NUM(bp) == CHIP_NUM_5708)
7460                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7461         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7462                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7463                   CHIP_REV(bp) == CHIP_REV_Bx))
7464                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7465
7466         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7467             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7468             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7469                 bp->flags |= BNX2_FLAG_NO_WOL;
7470                 bp->wol = 0;
7471         }
7472
7473         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7474                 bp->tx_quick_cons_trip_int =
7475                         bp->tx_quick_cons_trip;
7476                 bp->tx_ticks_int = bp->tx_ticks;
7477                 bp->rx_quick_cons_trip_int =
7478                         bp->rx_quick_cons_trip;
7479                 bp->rx_ticks_int = bp->rx_ticks;
7480                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7481                 bp->com_ticks_int = bp->com_ticks;
7482                 bp->cmd_ticks_int = bp->cmd_ticks;
7483         }
7484
7485         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7486          *
7487          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7488          * with byte enables disabled on the unused 32-bit word.  This is legal
7489          * but causes problems on the AMD 8132 which will eventually stop
7490          * responding after a while.
7491          *
7492          * AMD believes this incompatibility is unique to the 5706, and
7493          * prefers to locally disable MSI rather than globally disabling it.
7494          */
7495         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7496                 struct pci_dev *amd_8132 = NULL;
7497
7498                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7499                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7500                                                   amd_8132))) {
7501
7502                         if (amd_8132->revision >= 0x10 &&
7503                             amd_8132->revision <= 0x13) {
7504                                 disable_msi = 1;
7505                                 pci_dev_put(amd_8132);
7506                                 break;
7507                         }
7508                 }
7509         }
7510
7511         bnx2_set_default_link(bp);
7512         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7513
7514         init_timer(&bp->timer);
7515         bp->timer.expires = RUN_AT(bp->timer_interval);
7516         bp->timer.data = (unsigned long) bp;
7517         bp->timer.function = bnx2_timer;
7518
7519         return 0;
7520
7521 err_out_unmap:
7522         if (bp->regview) {
7523                 iounmap(bp->regview);
7524                 bp->regview = NULL;
7525         }
7526
7527 err_out_release:
7528         pci_release_regions(pdev);
7529
7530 err_out_disable:
7531         pci_disable_device(pdev);
7532         pci_set_drvdata(pdev, NULL);
7533
7534 err_out:
7535         return rc;
7536 }
7537
7538 static char * __devinit
7539 bnx2_bus_string(struct bnx2 *bp, char *str)
7540 {
7541         char *s = str;
7542
7543         if (bp->flags & BNX2_FLAG_PCIE) {
7544                 s += sprintf(s, "PCI Express");
7545         } else {
7546                 s += sprintf(s, "PCI");
7547                 if (bp->flags & BNX2_FLAG_PCIX)
7548                         s += sprintf(s, "-X");
7549                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7550                         s += sprintf(s, " 32-bit");
7551                 else
7552                         s += sprintf(s, " 64-bit");
7553                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7554         }
7555         return str;
7556 }
7557
7558 static void __devinit
7559 bnx2_init_napi(struct bnx2 *bp)
7560 {
7561         int i;
7562
7563         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7564                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7565                 int (*poll)(struct napi_struct *, int);
7566
7567                 if (i == 0)
7568                         poll = bnx2_poll;
7569                 else
7570                         poll = bnx2_tx_poll;
7571
7572                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7573                 bnapi->bp = bp;
7574         }
7575 }
7576
7577 static int __devinit
7578 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7579 {
7580         static int version_printed = 0;
7581         struct net_device *dev = NULL;
7582         struct bnx2 *bp;
7583         int rc;
7584         char str[40];
7585         DECLARE_MAC_BUF(mac);
7586
7587         if (version_printed++ == 0)
7588                 printk(KERN_INFO "%s", version);
7589
7590         /* dev zeroed in init_etherdev */
7591         dev = alloc_etherdev(sizeof(*bp));
7592
7593         if (!dev)
7594                 return -ENOMEM;
7595
7596         rc = bnx2_init_board(pdev, dev);
7597         if (rc < 0) {
7598                 free_netdev(dev);
7599                 return rc;
7600         }
7601
7602         dev->open = bnx2_open;
7603         dev->hard_start_xmit = bnx2_start_xmit;
7604         dev->stop = bnx2_close;
7605         dev->get_stats = bnx2_get_stats;
7606         dev->set_multicast_list = bnx2_set_rx_mode;
7607         dev->do_ioctl = bnx2_ioctl;
7608         dev->set_mac_address = bnx2_change_mac_addr;
7609         dev->change_mtu = bnx2_change_mtu;
7610         dev->tx_timeout = bnx2_tx_timeout;
7611         dev->watchdog_timeo = TX_TIMEOUT;
7612 #ifdef BCM_VLAN
7613         dev->vlan_rx_register = bnx2_vlan_rx_register;
7614 #endif
7615         dev->ethtool_ops = &bnx2_ethtool_ops;
7616
7617         bp = netdev_priv(dev);
7618         bnx2_init_napi(bp);
7619
7620 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7621         dev->poll_controller = poll_bnx2;
7622 #endif
7623
7624         pci_set_drvdata(pdev, dev);
7625
7626         memcpy(dev->dev_addr, bp->mac_addr, 6);
7627         memcpy(dev->perm_addr, bp->mac_addr, 6);
7628         bp->name = board_info[ent->driver_data].name;
7629
7630         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7631         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7632                 dev->features |= NETIF_F_IPV6_CSUM;
7633
7634 #ifdef BCM_VLAN
7635         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7636 #endif
7637         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7638         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7639                 dev->features |= NETIF_F_TSO6;
7640
7641         if ((rc = register_netdev(dev))) {
7642                 dev_err(&pdev->dev, "Cannot register net device\n");
7643                 if (bp->regview)
7644                         iounmap(bp->regview);
7645                 pci_release_regions(pdev);
7646                 pci_disable_device(pdev);
7647                 pci_set_drvdata(pdev, NULL);
7648                 free_netdev(dev);
7649                 return rc;
7650         }
7651
7652         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7653                 "IRQ %d, node addr %s\n",
7654                 dev->name,
7655                 bp->name,
7656                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7657                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7658                 bnx2_bus_string(bp, str),
7659                 dev->base_addr,
7660                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7661
7662         return 0;
7663 }
7664
7665 static void __devexit
7666 bnx2_remove_one(struct pci_dev *pdev)
7667 {
7668         struct net_device *dev = pci_get_drvdata(pdev);
7669         struct bnx2 *bp = netdev_priv(dev);
7670
7671         flush_scheduled_work();
7672
7673         unregister_netdev(dev);
7674
7675         if (bp->regview)
7676                 iounmap(bp->regview);
7677
7678         free_netdev(dev);
7679         pci_release_regions(pdev);
7680         pci_disable_device(pdev);
7681         pci_set_drvdata(pdev, NULL);
7682 }
7683
7684 static int
7685 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7686 {
7687         struct net_device *dev = pci_get_drvdata(pdev);
7688         struct bnx2 *bp = netdev_priv(dev);
7689         u32 reset_code;
7690
7691         /* PCI register 4 needs to be saved whether netif_running() or not.
7692          * MSI address and data need to be saved if using MSI and
7693          * netif_running().
7694          */
7695         pci_save_state(pdev);
7696         if (!netif_running(dev))
7697                 return 0;
7698
7699         flush_scheduled_work();
7700         bnx2_netif_stop(bp);
7701         netif_device_detach(dev);
7702         del_timer_sync(&bp->timer);
7703         if (bp->flags & BNX2_FLAG_NO_WOL)
7704                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7705         else if (bp->wol)
7706                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7707         else
7708                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7709         bnx2_reset_chip(bp, reset_code);
7710         bnx2_free_skbs(bp);
7711         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7712         return 0;
7713 }
7714
7715 static int
7716 bnx2_resume(struct pci_dev *pdev)
7717 {
7718         struct net_device *dev = pci_get_drvdata(pdev);
7719         struct bnx2 *bp = netdev_priv(dev);
7720
7721         pci_restore_state(pdev);
7722         if (!netif_running(dev))
7723                 return 0;
7724
7725         bnx2_set_power_state(bp, PCI_D0);
7726         netif_device_attach(dev);
7727         bnx2_init_nic(bp, 1);
7728         bnx2_netif_start(bp);
7729         return 0;
7730 }
7731
7732 /**
7733  * bnx2_io_error_detected - called when PCI error is detected
7734  * @pdev: Pointer to PCI device
7735  * @state: The current pci connection state
7736  *
7737  * This function is called after a PCI bus error affecting
7738  * this device has been detected.
7739  */
7740 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7741                                                pci_channel_state_t state)
7742 {
7743         struct net_device *dev = pci_get_drvdata(pdev);
7744         struct bnx2 *bp = netdev_priv(dev);
7745
7746         rtnl_lock();
7747         netif_device_detach(dev);
7748
7749         if (netif_running(dev)) {
7750                 bnx2_netif_stop(bp);
7751                 del_timer_sync(&bp->timer);
7752                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7753         }
7754
7755         pci_disable_device(pdev);
7756         rtnl_unlock();
7757
7758         /* Request a slot slot reset. */
7759         return PCI_ERS_RESULT_NEED_RESET;
7760 }
7761
7762 /**
7763  * bnx2_io_slot_reset - called after the pci bus has been reset.
7764  * @pdev: Pointer to PCI device
7765  *
7766  * Restart the card from scratch, as if from a cold-boot.
7767  */
7768 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7769 {
7770         struct net_device *dev = pci_get_drvdata(pdev);
7771         struct bnx2 *bp = netdev_priv(dev);
7772
7773         rtnl_lock();
7774         if (pci_enable_device(pdev)) {
7775                 dev_err(&pdev->dev,
7776                         "Cannot re-enable PCI device after reset.\n");
7777                 rtnl_unlock();
7778                 return PCI_ERS_RESULT_DISCONNECT;
7779         }
7780         pci_set_master(pdev);
7781         pci_restore_state(pdev);
7782
7783         if (netif_running(dev)) {
7784                 bnx2_set_power_state(bp, PCI_D0);
7785                 bnx2_init_nic(bp, 1);
7786         }
7787
7788         rtnl_unlock();
7789         return PCI_ERS_RESULT_RECOVERED;
7790 }
7791
7792 /**
7793  * bnx2_io_resume - called when traffic can start flowing again.
7794  * @pdev: Pointer to PCI device
7795  *
7796  * This callback is called when the error recovery driver tells us that
7797  * its OK to resume normal operation.
7798  */
7799 static void bnx2_io_resume(struct pci_dev *pdev)
7800 {
7801         struct net_device *dev = pci_get_drvdata(pdev);
7802         struct bnx2 *bp = netdev_priv(dev);
7803
7804         rtnl_lock();
7805         if (netif_running(dev))
7806                 bnx2_netif_start(bp);
7807
7808         netif_device_attach(dev);
7809         rtnl_unlock();
7810 }
7811
7812 static struct pci_error_handlers bnx2_err_handler = {
7813         .error_detected = bnx2_io_error_detected,
7814         .slot_reset     = bnx2_io_slot_reset,
7815         .resume         = bnx2_io_resume,
7816 };
7817
7818 static struct pci_driver bnx2_pci_driver = {
7819         .name           = DRV_MODULE_NAME,
7820         .id_table       = bnx2_pci_tbl,
7821         .probe          = bnx2_init_one,
7822         .remove         = __devexit_p(bnx2_remove_one),
7823         .suspend        = bnx2_suspend,
7824         .resume         = bnx2_resume,
7825         .err_handler    = &bnx2_err_handler,
7826 };
7827
7828 static int __init bnx2_init(void)
7829 {
7830         return pci_register_driver(&bnx2_pci_driver);
7831 }
7832
7833 static void __exit bnx2_cleanup(void)
7834 {
7835         pci_unregister_driver(&bnx2_pci_driver);
7836 }
7837
7838 module_init(bnx2_init);
7839 module_exit(bnx2_cleanup);
7840
7841
7842