bnx2: Add ack parameter to bnx2_fw_sync().
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.7"
60 #define DRV_MODULE_RELDATE      "June 17, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90         BCM5716,
91 } board_t;
92
93 /* indexed by board_t, above */
94 static struct {
95         char *name;
96 } board_info[] __devinitdata = {
97         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
98         { "HP NC370T Multifunction Gigabit Server Adapter" },
99         { "HP NC370i Multifunction Gigabit Server Adapter" },
100         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
101         { "HP NC370F Multifunction Gigabit Server Adapter" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
103         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
105         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
106         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
107         };
108
109 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
128         { PCI_VENDOR_ID_BROADCOM, 0x163b,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
130         { 0, }
131 };
132
133 static struct flash_spec flash_table[] =
134 {
135 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
136 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
137         /* Slow EEPROM */
138         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
139          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
140          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
141          "EEPROM - slow"},
142         /* Expansion entry 0001 */
143         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
144          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
146          "Entry 0001"},
147         /* Saifun SA25F010 (non-buffered flash) */
148         /* strap, cfg1, & write1 need updates */
149         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
150          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
152          "Non-buffered flash (128kB)"},
153         /* Saifun SA25F020 (non-buffered flash) */
154         /* strap, cfg1, & write1 need updates */
155         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
156          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
157          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
158          "Non-buffered flash (256kB)"},
159         /* Expansion entry 0100 */
160         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0100"},
164         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
165         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
167          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
168          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
169         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
170         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
173          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
174         /* Saifun SA25F005 (non-buffered flash) */
175         /* strap, cfg1, & write1 need updates */
176         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
179          "Non-buffered flash (64kB)"},
180         /* Fast EEPROM */
181         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
182          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
183          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
184          "EEPROM - fast"},
185         /* Expansion entry 1001 */
186         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189          "Entry 1001"},
190         /* Expansion entry 1010 */
191         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194          "Entry 1010"},
195         /* ATMEL AT45DB011B (buffered flash) */
196         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
197          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
199          "Buffered flash (128kB)"},
200         /* Expansion entry 1100 */
201         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1100"},
205         /* Expansion entry 1101 */
206         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1101"},
210         /* Ateml Expansion entry 1110 */
211         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
212          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1110 (Atmel)"},
215         /* ATMEL AT45DB021B (buffered flash) */
216         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
217          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
219          "Buffered flash (256kB)"},
220 };
221
222 static struct flash_spec flash_5709 = {
223         .flags          = BNX2_NV_BUFFERED,
224         .page_bits      = BCM5709_FLASH_PAGE_BITS,
225         .page_size      = BCM5709_FLASH_PAGE_SIZE,
226         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
227         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
228         .name           = "5709 Buffered flash (256kB)",
229 };
230
231 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
232
233 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
234 {
235         u32 diff;
236
237         smp_mb();
238
239         /* The ring uses 256 indices for 255 entries, one of them
240          * needs to be skipped.
241          */
242         diff = txr->tx_prod - txr->tx_cons;
243         if (unlikely(diff >= TX_DESC_CNT)) {
244                 diff &= 0xffff;
245                 if (diff == TX_DESC_CNT)
246                         diff = MAX_TX_DESC_CNT;
247         }
248         return (bp->tx_ring_size - diff);
249 }
250
251 static u32
252 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
253 {
254         u32 val;
255
256         spin_lock_bh(&bp->indirect_lock);
257         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
258         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
259         spin_unlock_bh(&bp->indirect_lock);
260         return val;
261 }
262
263 static void
264 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
265 {
266         spin_lock_bh(&bp->indirect_lock);
267         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
269         spin_unlock_bh(&bp->indirect_lock);
270 }
271
272 static void
273 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
274 {
275         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
276 }
277
278 static u32
279 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
280 {
281         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
282 }
283
284 static void
285 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
286 {
287         offset += cid_addr;
288         spin_lock_bh(&bp->indirect_lock);
289         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
290                 int i;
291
292                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
293                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
294                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
295                 for (i = 0; i < 5; i++) {
296                         u32 val;
297                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299                                 break;
300                         udelay(5);
301                 }
302         } else {
303                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304                 REG_WR(bp, BNX2_CTX_DATA, val);
305         }
306         spin_unlock_bh(&bp->indirect_lock);
307 }
308
309 static int
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 {
312         u32 val1;
313         int i, ret;
314
315         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322                 udelay(40);
323         }
324
325         val1 = (bp->phy_addr << 21) | (reg << 16) |
326                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327                 BNX2_EMAC_MDIO_COMM_START_BUSY;
328         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
329
330         for (i = 0; i < 50; i++) {
331                 udelay(10);
332
333                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
335                         udelay(5);
336
337                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
339
340                         break;
341                 }
342         }
343
344         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
345                 *val = 0x0;
346                 ret = -EBUSY;
347         }
348         else {
349                 *val = val1;
350                 ret = 0;
351         }
352
353         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356
357                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359
360                 udelay(40);
361         }
362
363         return ret;
364 }
365
366 static int
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 {
369         u32 val1;
370         int i, ret;
371
372         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
386
387         for (i = 0; i < 50; i++) {
388                 udelay(10);
389
390                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
392                         udelay(5);
393                         break;
394                 }
395         }
396
397         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398                 ret = -EBUSY;
399         else
400                 ret = 0;
401
402         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
405
406                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408
409                 udelay(40);
410         }
411
412         return ret;
413 }
414
415 static void
416 bnx2_disable_int(struct bnx2 *bp)
417 {
418         int i;
419         struct bnx2_napi *bnapi;
420
421         for (i = 0; i < bp->irq_nvecs; i++) {
422                 bnapi = &bp->bnx2_napi[i];
423                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
425         }
426         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
427 }
428
429 static void
430 bnx2_enable_int(struct bnx2 *bp)
431 {
432         int i;
433         struct bnx2_napi *bnapi;
434
435         for (i = 0; i < bp->irq_nvecs; i++) {
436                 bnapi = &bp->bnx2_napi[i];
437
438                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441                        bnapi->last_status_idx);
442
443                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445                        bnapi->last_status_idx);
446         }
447         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
448 }
449
450 static void
451 bnx2_disable_int_sync(struct bnx2 *bp)
452 {
453         int i;
454
455         atomic_inc(&bp->intr_sem);
456         bnx2_disable_int(bp);
457         for (i = 0; i < bp->irq_nvecs; i++)
458                 synchronize_irq(bp->irq_tbl[i].vector);
459 }
460
461 static void
462 bnx2_napi_disable(struct bnx2 *bp)
463 {
464         int i;
465
466         for (i = 0; i < bp->irq_nvecs; i++)
467                 napi_disable(&bp->bnx2_napi[i].napi);
468 }
469
470 static void
471 bnx2_napi_enable(struct bnx2 *bp)
472 {
473         int i;
474
475         for (i = 0; i < bp->irq_nvecs; i++)
476                 napi_enable(&bp->bnx2_napi[i].napi);
477 }
478
479 static void
480 bnx2_netif_stop(struct bnx2 *bp)
481 {
482         bnx2_disable_int_sync(bp);
483         if (netif_running(bp->dev)) {
484                 bnx2_napi_disable(bp);
485                 netif_tx_disable(bp->dev);
486                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487         }
488 }
489
490 static void
491 bnx2_netif_start(struct bnx2 *bp)
492 {
493         if (atomic_dec_and_test(&bp->intr_sem)) {
494                 if (netif_running(bp->dev)) {
495                         netif_wake_queue(bp->dev);
496                         bnx2_napi_enable(bp);
497                         bnx2_enable_int(bp);
498                 }
499         }
500 }
501
502 static void
503 bnx2_free_tx_mem(struct bnx2 *bp)
504 {
505         int i;
506
507         for (i = 0; i < bp->num_tx_rings; i++) {
508                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
510
511                 if (txr->tx_desc_ring) {
512                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513                                             txr->tx_desc_ring,
514                                             txr->tx_desc_mapping);
515                         txr->tx_desc_ring = NULL;
516                 }
517                 kfree(txr->tx_buf_ring);
518                 txr->tx_buf_ring = NULL;
519         }
520 }
521
522 static void
523 bnx2_free_rx_mem(struct bnx2 *bp)
524 {
525         int i;
526
527         for (i = 0; i < bp->num_rx_rings; i++) {
528                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530                 int j;
531
532                 for (j = 0; j < bp->rx_max_ring; j++) {
533                         if (rxr->rx_desc_ring[j])
534                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535                                                     rxr->rx_desc_ring[j],
536                                                     rxr->rx_desc_mapping[j]);
537                         rxr->rx_desc_ring[j] = NULL;
538                 }
539                 if (rxr->rx_buf_ring)
540                         vfree(rxr->rx_buf_ring);
541                 rxr->rx_buf_ring = NULL;
542
543                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544                         if (rxr->rx_pg_desc_ring[j])
545                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546                                                     rxr->rx_pg_desc_ring[i],
547                                                     rxr->rx_pg_desc_mapping[i]);
548                         rxr->rx_pg_desc_ring[i] = NULL;
549                 }
550                 if (rxr->rx_pg_ring)
551                         vfree(rxr->rx_pg_ring);
552                 rxr->rx_pg_ring = NULL;
553         }
554 }
555
556 static int
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
558 {
559         int i;
560
561         for (i = 0; i < bp->num_tx_rings; i++) {
562                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
564
565                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566                 if (txr->tx_buf_ring == NULL)
567                         return -ENOMEM;
568
569                 txr->tx_desc_ring =
570                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571                                              &txr->tx_desc_mapping);
572                 if (txr->tx_desc_ring == NULL)
573                         return -ENOMEM;
574         }
575         return 0;
576 }
577
578 static int
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
580 {
581         int i;
582
583         for (i = 0; i < bp->num_rx_rings; i++) {
584                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586                 int j;
587
588                 rxr->rx_buf_ring =
589                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590                 if (rxr->rx_buf_ring == NULL)
591                         return -ENOMEM;
592
593                 memset(rxr->rx_buf_ring, 0,
594                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
595
596                 for (j = 0; j < bp->rx_max_ring; j++) {
597                         rxr->rx_desc_ring[j] =
598                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599                                                      &rxr->rx_desc_mapping[j]);
600                         if (rxr->rx_desc_ring[j] == NULL)
601                                 return -ENOMEM;
602
603                 }
604
605                 if (bp->rx_pg_ring_size) {
606                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
607                                                   bp->rx_max_pg_ring);
608                         if (rxr->rx_pg_ring == NULL)
609                                 return -ENOMEM;
610
611                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612                                bp->rx_max_pg_ring);
613                 }
614
615                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616                         rxr->rx_pg_desc_ring[j] =
617                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618                                                 &rxr->rx_pg_desc_mapping[j]);
619                         if (rxr->rx_pg_desc_ring[j] == NULL)
620                                 return -ENOMEM;
621
622                 }
623         }
624         return 0;
625 }
626
627 static void
628 bnx2_free_mem(struct bnx2 *bp)
629 {
630         int i;
631         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
632
633         bnx2_free_tx_mem(bp);
634         bnx2_free_rx_mem(bp);
635
636         for (i = 0; i < bp->ctx_pages; i++) {
637                 if (bp->ctx_blk[i]) {
638                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639                                             bp->ctx_blk[i],
640                                             bp->ctx_blk_mapping[i]);
641                         bp->ctx_blk[i] = NULL;
642                 }
643         }
644         if (bnapi->status_blk.msi) {
645                 pci_free_consistent(bp->pdev, bp->status_stats_size,
646                                     bnapi->status_blk.msi,
647                                     bp->status_blk_mapping);
648                 bnapi->status_blk.msi = NULL;
649                 bp->stats_blk = NULL;
650         }
651 }
652
653 static int
654 bnx2_alloc_mem(struct bnx2 *bp)
655 {
656         int i, status_blk_size, err;
657         struct bnx2_napi *bnapi;
658         void *status_blk;
659
660         /* Combine status and statistics blocks into one allocation. */
661         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662         if (bp->flags & BNX2_FLAG_MSIX_CAP)
663                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
665         bp->status_stats_size = status_blk_size +
666                                 sizeof(struct statistics_block);
667
668         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669                                           &bp->status_blk_mapping);
670         if (status_blk == NULL)
671                 goto alloc_mem_err;
672
673         memset(status_blk, 0, bp->status_stats_size);
674
675         bnapi = &bp->bnx2_napi[0];
676         bnapi->status_blk.msi = status_blk;
677         bnapi->hw_tx_cons_ptr =
678                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679         bnapi->hw_rx_cons_ptr =
680                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683                         struct status_block_msix *sblk;
684
685                         bnapi = &bp->bnx2_napi[i];
686
687                         sblk = (void *) (status_blk +
688                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689                         bnapi->status_blk.msix = sblk;
690                         bnapi->hw_tx_cons_ptr =
691                                 &sblk->status_tx_quick_consumer_index;
692                         bnapi->hw_rx_cons_ptr =
693                                 &sblk->status_rx_quick_consumer_index;
694                         bnapi->int_num = i << 24;
695                 }
696         }
697
698         bp->stats_blk = status_blk + status_blk_size;
699
700         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
701
702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704                 if (bp->ctx_pages == 0)
705                         bp->ctx_pages = 1;
706                 for (i = 0; i < bp->ctx_pages; i++) {
707                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
708                                                 BCM_PAGE_SIZE,
709                                                 &bp->ctx_blk_mapping[i]);
710                         if (bp->ctx_blk[i] == NULL)
711                                 goto alloc_mem_err;
712                 }
713         }
714
715         err = bnx2_alloc_rx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         err = bnx2_alloc_tx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         return 0;
724
725 alloc_mem_err:
726         bnx2_free_mem(bp);
727         return -ENOMEM;
728 }
729
730 static void
731 bnx2_report_fw_link(struct bnx2 *bp)
732 {
733         u32 fw_link_status = 0;
734
735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
736                 return;
737
738         if (bp->link_up) {
739                 u32 bmsr;
740
741                 switch (bp->line_speed) {
742                 case SPEED_10:
743                         if (bp->duplex == DUPLEX_HALF)
744                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
745                         else
746                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
747                         break;
748                 case SPEED_100:
749                         if (bp->duplex == DUPLEX_HALF)
750                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
751                         else
752                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
753                         break;
754                 case SPEED_1000:
755                         if (bp->duplex == DUPLEX_HALF)
756                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
757                         else
758                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
759                         break;
760                 case SPEED_2500:
761                         if (bp->duplex == DUPLEX_HALF)
762                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
763                         else
764                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
765                         break;
766                 }
767
768                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
769
770                 if (bp->autoneg) {
771                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
772
773                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
775
776                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
779                         else
780                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
781                 }
782         }
783         else
784                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
785
786         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
787 }
788
789 static char *
790 bnx2_xceiver_str(struct bnx2 *bp)
791 {
792         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
794                  "Copper"));
795 }
796
797 static void
798 bnx2_report_link(struct bnx2 *bp)
799 {
800         if (bp->link_up) {
801                 netif_carrier_on(bp->dev);
802                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803                        bnx2_xceiver_str(bp));
804
805                 printk("%d Mbps ", bp->line_speed);
806
807                 if (bp->duplex == DUPLEX_FULL)
808                         printk("full duplex");
809                 else
810                         printk("half duplex");
811
812                 if (bp->flow_ctrl) {
813                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
814                                 printk(", receive ");
815                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
816                                         printk("& transmit ");
817                         }
818                         else {
819                                 printk(", transmit ");
820                         }
821                         printk("flow control ON");
822                 }
823                 printk("\n");
824         }
825         else {
826                 netif_carrier_off(bp->dev);
827                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828                        bnx2_xceiver_str(bp));
829         }
830
831         bnx2_report_fw_link(bp);
832 }
833
834 static void
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
836 {
837         u32 local_adv, remote_adv;
838
839         bp->flow_ctrl = 0;
840         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
842
843                 if (bp->duplex == DUPLEX_FULL) {
844                         bp->flow_ctrl = bp->req_flow_ctrl;
845                 }
846                 return;
847         }
848
849         if (bp->duplex != DUPLEX_FULL) {
850                 return;
851         }
852
853         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
855                 u32 val;
856
857                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859                         bp->flow_ctrl |= FLOW_CTRL_TX;
860                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861                         bp->flow_ctrl |= FLOW_CTRL_RX;
862                 return;
863         }
864
865         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
867
868         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869                 u32 new_local_adv = 0;
870                 u32 new_remote_adv = 0;
871
872                 if (local_adv & ADVERTISE_1000XPAUSE)
873                         new_local_adv |= ADVERTISE_PAUSE_CAP;
874                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
876                 if (remote_adv & ADVERTISE_1000XPAUSE)
877                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
878                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
880
881                 local_adv = new_local_adv;
882                 remote_adv = new_remote_adv;
883         }
884
885         /* See Table 28B-3 of 802.3ab-1999 spec. */
886         if (local_adv & ADVERTISE_PAUSE_CAP) {
887                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
889                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
890                         }
891                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892                                 bp->flow_ctrl = FLOW_CTRL_RX;
893                         }
894                 }
895                 else {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                 }
900         }
901         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
904
905                         bp->flow_ctrl = FLOW_CTRL_TX;
906                 }
907         }
908 }
909
910 static int
911 bnx2_5709s_linkup(struct bnx2 *bp)
912 {
913         u32 val, speed;
914
915         bp->link_up = 1;
916
917         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
920
921         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922                 bp->line_speed = bp->req_line_speed;
923                 bp->duplex = bp->req_duplex;
924                 return 0;
925         }
926         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
927         switch (speed) {
928                 case MII_BNX2_GP_TOP_AN_SPEED_10:
929                         bp->line_speed = SPEED_10;
930                         break;
931                 case MII_BNX2_GP_TOP_AN_SPEED_100:
932                         bp->line_speed = SPEED_100;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936                         bp->line_speed = SPEED_1000;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939                         bp->line_speed = SPEED_2500;
940                         break;
941         }
942         if (val & MII_BNX2_GP_TOP_AN_FD)
943                 bp->duplex = DUPLEX_FULL;
944         else
945                 bp->duplex = DUPLEX_HALF;
946         return 0;
947 }
948
949 static int
950 bnx2_5708s_linkup(struct bnx2 *bp)
951 {
952         u32 val;
953
954         bp->link_up = 1;
955         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957                 case BCM5708S_1000X_STAT1_SPEED_10:
958                         bp->line_speed = SPEED_10;
959                         break;
960                 case BCM5708S_1000X_STAT1_SPEED_100:
961                         bp->line_speed = SPEED_100;
962                         break;
963                 case BCM5708S_1000X_STAT1_SPEED_1G:
964                         bp->line_speed = SPEED_1000;
965                         break;
966                 case BCM5708S_1000X_STAT1_SPEED_2G5:
967                         bp->line_speed = SPEED_2500;
968                         break;
969         }
970         if (val & BCM5708S_1000X_STAT1_FD)
971                 bp->duplex = DUPLEX_FULL;
972         else
973                 bp->duplex = DUPLEX_HALF;
974
975         return 0;
976 }
977
978 static int
979 bnx2_5706s_linkup(struct bnx2 *bp)
980 {
981         u32 bmcr, local_adv, remote_adv, common;
982
983         bp->link_up = 1;
984         bp->line_speed = SPEED_1000;
985
986         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987         if (bmcr & BMCR_FULLDPLX) {
988                 bp->duplex = DUPLEX_FULL;
989         }
990         else {
991                 bp->duplex = DUPLEX_HALF;
992         }
993
994         if (!(bmcr & BMCR_ANENABLE)) {
995                 return 0;
996         }
997
998         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1000
1001         common = local_adv & remote_adv;
1002         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1003
1004                 if (common & ADVERTISE_1000XFULL) {
1005                         bp->duplex = DUPLEX_FULL;
1006                 }
1007                 else {
1008                         bp->duplex = DUPLEX_HALF;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 bnx2_copper_linkup(struct bnx2 *bp)
1017 {
1018         u32 bmcr;
1019
1020         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021         if (bmcr & BMCR_ANENABLE) {
1022                 u32 local_adv, remote_adv, common;
1023
1024                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1026
1027                 common = local_adv & (remote_adv >> 2);
1028                 if (common & ADVERTISE_1000FULL) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_FULL;
1031                 }
1032                 else if (common & ADVERTISE_1000HALF) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_HALF;
1035                 }
1036                 else {
1037                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1039
1040                         common = local_adv & remote_adv;
1041                         if (common & ADVERTISE_100FULL) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_FULL;
1044                         }
1045                         else if (common & ADVERTISE_100HALF) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_HALF;
1048                         }
1049                         else if (common & ADVERTISE_10FULL) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_10HALF) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else {
1058                                 bp->line_speed = 0;
1059                                 bp->link_up = 0;
1060                         }
1061                 }
1062         }
1063         else {
1064                 if (bmcr & BMCR_SPEED100) {
1065                         bp->line_speed = SPEED_100;
1066                 }
1067                 else {
1068                         bp->line_speed = SPEED_10;
1069                 }
1070                 if (bmcr & BMCR_FULLDPLX) {
1071                         bp->duplex = DUPLEX_FULL;
1072                 }
1073                 else {
1074                         bp->duplex = DUPLEX_HALF;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static void
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1083 {
1084         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1085
1086         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1088         val |= 0x02 << 8;
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091                 u32 lo_water, hi_water;
1092
1093                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1095                 else
1096                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097                 if (lo_water >= bp->rx_ring_size)
1098                         lo_water = 0;
1099
1100                 hi_water = bp->rx_ring_size / 4;
1101
1102                 if (hi_water <= lo_water)
1103                         lo_water = 0;
1104
1105                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1107
1108                 if (hi_water > 0xf)
1109                         hi_water = 0xf;
1110                 else if (hi_water == 0)
1111                         lo_water = 0;
1112                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1113         }
1114         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1115 }
1116
1117 static void
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119 {
1120         int i;
1121         u32 cid;
1122
1123         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124                 if (i == 1)
1125                         cid = RX_RSS_CID;
1126                 bnx2_init_rx_context(bp, cid);
1127         }
1128 }
1129
1130 static int
1131 bnx2_set_mac_link(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137                 (bp->duplex == DUPLEX_HALF)) {
1138                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1139         }
1140
1141         /* Configure the EMAC mode register. */
1142         val = REG_RD(bp, BNX2_EMAC_MODE);
1143
1144         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146                 BNX2_EMAC_MODE_25G_MODE);
1147
1148         if (bp->link_up) {
1149                 switch (bp->line_speed) {
1150                         case SPEED_10:
1151                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1153                                         break;
1154                                 }
1155                                 /* fall through */
1156                         case SPEED_100:
1157                                 val |= BNX2_EMAC_MODE_PORT_MII;
1158                                 break;
1159                         case SPEED_2500:
1160                                 val |= BNX2_EMAC_MODE_25G_MODE;
1161                                 /* fall through */
1162                         case SPEED_1000:
1163                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1164                                 break;
1165                 }
1166         }
1167         else {
1168                 val |= BNX2_EMAC_MODE_PORT_GMII;
1169         }
1170
1171         /* Set the MAC to operate in the appropriate duplex mode. */
1172         if (bp->duplex == DUPLEX_HALF)
1173                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174         REG_WR(bp, BNX2_EMAC_MODE, val);
1175
1176         /* Enable/disable rx PAUSE. */
1177         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1178
1179         if (bp->flow_ctrl & FLOW_CTRL_RX)
1180                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1182
1183         /* Enable/disable tx PAUSE. */
1184         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_TX)
1188                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1190
1191         /* Acknowledge the interrupt. */
1192         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1193
1194         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195                 bnx2_init_all_rx_contexts(bp);
1196
1197         return 0;
1198 }
1199
1200 static void
1201 bnx2_enable_bmsr1(struct bnx2 *bp)
1202 {
1203         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1204             (CHIP_NUM(bp) == CHIP_NUM_5709))
1205                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206                                MII_BNX2_BLK_ADDR_GP_STATUS);
1207 }
1208
1209 static void
1210 bnx2_disable_bmsr1(struct bnx2 *bp)
1211 {
1212         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1213             (CHIP_NUM(bp) == CHIP_NUM_5709))
1214                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1216 }
1217
1218 static int
1219 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1220 {
1221         u32 up1;
1222         int ret = 1;
1223
1224         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1225                 return 0;
1226
1227         if (bp->autoneg & AUTONEG_SPEED)
1228                 bp->advertising |= ADVERTISED_2500baseX_Full;
1229
1230         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1232
1233         bnx2_read_phy(bp, bp->mii_up1, &up1);
1234         if (!(up1 & BCM5708S_UP1_2G5)) {
1235                 up1 |= BCM5708S_UP1_2G5;
1236                 bnx2_write_phy(bp, bp->mii_up1, up1);
1237                 ret = 0;
1238         }
1239
1240         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1243
1244         return ret;
1245 }
1246
1247 static int
1248 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1249 {
1250         u32 up1;
1251         int ret = 0;
1252
1253         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1254                 return 0;
1255
1256         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1258
1259         bnx2_read_phy(bp, bp->mii_up1, &up1);
1260         if (up1 & BCM5708S_UP1_2G5) {
1261                 up1 &= ~BCM5708S_UP1_2G5;
1262                 bnx2_write_phy(bp, bp->mii_up1, up1);
1263                 ret = 1;
1264         }
1265
1266         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1269
1270         return ret;
1271 }
1272
1273 static void
1274 bnx2_enable_forced_2g5(struct bnx2 *bp)
1275 {
1276         u32 bmcr;
1277
1278         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1279                 return;
1280
1281         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1282                 u32 val;
1283
1284                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1286                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1290
1291                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1294
1295         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1296                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1298         }
1299
1300         if (bp->autoneg & AUTONEG_SPEED) {
1301                 bmcr &= ~BMCR_ANENABLE;
1302                 if (bp->req_duplex == DUPLEX_FULL)
1303                         bmcr |= BMCR_FULLDPLX;
1304         }
1305         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1306 }
1307
1308 static void
1309 bnx2_disable_forced_2g5(struct bnx2 *bp)
1310 {
1311         u32 bmcr;
1312
1313         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1314                 return;
1315
1316         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1317                 u32 val;
1318
1319                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1321                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1324
1325                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1328
1329         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1330                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1332         }
1333
1334         if (bp->autoneg & AUTONEG_SPEED)
1335                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1337 }
1338
1339 static void
1340 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1341 {
1342         u32 val;
1343
1344         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1346         if (start)
1347                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1348         else
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1350 }
1351
1352 static int
1353 bnx2_set_link(struct bnx2 *bp)
1354 {
1355         u32 bmsr;
1356         u8 link_up;
1357
1358         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1359                 bp->link_up = 1;
1360                 return 0;
1361         }
1362
1363         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1364                 return 0;
1365
1366         link_up = bp->link_up;
1367
1368         bnx2_enable_bmsr1(bp);
1369         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371         bnx2_disable_bmsr1(bp);
1372
1373         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1374             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1375                 u32 val, an_dbg;
1376
1377                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1378                         bnx2_5706s_force_link_dn(bp, 0);
1379                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1380                 }
1381                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1382
1383                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1386
1387                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1389                         bmsr |= BMSR_LSTATUS;
1390                 else
1391                         bmsr &= ~BMSR_LSTATUS;
1392         }
1393
1394         if (bmsr & BMSR_LSTATUS) {
1395                 bp->link_up = 1;
1396
1397                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1398                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399                                 bnx2_5706s_linkup(bp);
1400                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401                                 bnx2_5708s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403                                 bnx2_5709s_linkup(bp);
1404                 }
1405                 else {
1406                         bnx2_copper_linkup(bp);
1407                 }
1408                 bnx2_resolve_flow_ctrl(bp);
1409         }
1410         else {
1411                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1412                     (bp->autoneg & AUTONEG_SPEED))
1413                         bnx2_disable_forced_2g5(bp);
1414
1415                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1416                         u32 bmcr;
1417
1418                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419                         bmcr |= BMCR_ANENABLE;
1420                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1421
1422                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1423                 }
1424                 bp->link_up = 0;
1425         }
1426
1427         if (bp->link_up != link_up) {
1428                 bnx2_report_link(bp);
1429         }
1430
1431         bnx2_set_mac_link(bp);
1432
1433         return 0;
1434 }
1435
1436 static int
1437 bnx2_reset_phy(struct bnx2 *bp)
1438 {
1439         int i;
1440         u32 reg;
1441
1442         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1443
1444 #define PHY_RESET_MAX_WAIT 100
1445         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1446                 udelay(10);
1447
1448                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1449                 if (!(reg & BMCR_RESET)) {
1450                         udelay(20);
1451                         break;
1452                 }
1453         }
1454         if (i == PHY_RESET_MAX_WAIT) {
1455                 return -EBUSY;
1456         }
1457         return 0;
1458 }
1459
1460 static u32
1461 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1462 {
1463         u32 adv = 0;
1464
1465         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1467
1468                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1469                         adv = ADVERTISE_1000XPAUSE;
1470                 }
1471                 else {
1472                         adv = ADVERTISE_PAUSE_CAP;
1473                 }
1474         }
1475         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1476                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477                         adv = ADVERTISE_1000XPSE_ASYM;
1478                 }
1479                 else {
1480                         adv = ADVERTISE_PAUSE_ASYM;
1481                 }
1482         }
1483         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1484                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1486                 }
1487                 else {
1488                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1489                 }
1490         }
1491         return adv;
1492 }
1493
1494 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1495
1496 static int
1497 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1498 {
1499         u32 speed_arg = 0, pause_adv;
1500
1501         pause_adv = bnx2_phy_get_pause_adv(bp);
1502
1503         if (bp->autoneg & AUTONEG_SPEED) {
1504                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505                 if (bp->advertising & ADVERTISED_10baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507                 if (bp->advertising & ADVERTISED_10baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509                 if (bp->advertising & ADVERTISED_100baseT_Half)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511                 if (bp->advertising & ADVERTISED_100baseT_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1517         } else {
1518                 if (bp->req_line_speed == SPEED_2500)
1519                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520                 else if (bp->req_line_speed == SPEED_1000)
1521                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522                 else if (bp->req_line_speed == SPEED_100) {
1523                         if (bp->req_duplex == DUPLEX_FULL)
1524                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1525                         else
1526                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527                 } else if (bp->req_line_speed == SPEED_10) {
1528                         if (bp->req_duplex == DUPLEX_FULL)
1529                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1530                         else
1531                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1532                 }
1533         }
1534
1535         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1537         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1538                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1539
1540         if (port == PORT_TP)
1541                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1543
1544         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1545
1546         spin_unlock_bh(&bp->phy_lock);
1547         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1548         spin_lock_bh(&bp->phy_lock);
1549
1550         return 0;
1551 }
1552
1553 static int
1554 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1555 {
1556         u32 adv, bmcr;
1557         u32 new_adv = 0;
1558
1559         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560                 return (bnx2_setup_remote_phy(bp, port));
1561
1562         if (!(bp->autoneg & AUTONEG_SPEED)) {
1563                 u32 new_bmcr;
1564                 int force_link_down = 0;
1565
1566                 if (bp->req_line_speed == SPEED_2500) {
1567                         if (!bnx2_test_and_enable_2g5(bp))
1568                                 force_link_down = 1;
1569                 } else if (bp->req_line_speed == SPEED_1000) {
1570                         if (bnx2_test_and_disable_2g5(bp))
1571                                 force_link_down = 1;
1572                 }
1573                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1574                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1575
1576                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1578                 new_bmcr |= BMCR_SPEED1000;
1579
1580                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581                         if (bp->req_line_speed == SPEED_2500)
1582                                 bnx2_enable_forced_2g5(bp);
1583                         else if (bp->req_line_speed == SPEED_1000) {
1584                                 bnx2_disable_forced_2g5(bp);
1585                                 new_bmcr &= ~0x2000;
1586                         }
1587
1588                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1589                         if (bp->req_line_speed == SPEED_2500)
1590                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1591                         else
1592                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1593                 }
1594
1595                 if (bp->req_duplex == DUPLEX_FULL) {
1596                         adv |= ADVERTISE_1000XFULL;
1597                         new_bmcr |= BMCR_FULLDPLX;
1598                 }
1599                 else {
1600                         adv |= ADVERTISE_1000XHALF;
1601                         new_bmcr &= ~BMCR_FULLDPLX;
1602                 }
1603                 if ((new_bmcr != bmcr) || (force_link_down)) {
1604                         /* Force a link down visible on the other side */
1605                         if (bp->link_up) {
1606                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1607                                                ~(ADVERTISE_1000XFULL |
1608                                                  ADVERTISE_1000XHALF));
1609                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1610                                         BMCR_ANRESTART | BMCR_ANENABLE);
1611
1612                                 bp->link_up = 0;
1613                                 netif_carrier_off(bp->dev);
1614                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                                 bnx2_report_link(bp);
1616                         }
1617                         bnx2_write_phy(bp, bp->mii_adv, adv);
1618                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1619                 } else {
1620                         bnx2_resolve_flow_ctrl(bp);
1621                         bnx2_set_mac_link(bp);
1622                 }
1623                 return 0;
1624         }
1625
1626         bnx2_test_and_enable_2g5(bp);
1627
1628         if (bp->advertising & ADVERTISED_1000baseT_Full)
1629                 new_adv |= ADVERTISE_1000XFULL;
1630
1631         new_adv |= bnx2_phy_get_pause_adv(bp);
1632
1633         bnx2_read_phy(bp, bp->mii_adv, &adv);
1634         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1635
1636         bp->serdes_an_pending = 0;
1637         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638                 /* Force a link down visible on the other side */
1639                 if (bp->link_up) {
1640                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1641                         spin_unlock_bh(&bp->phy_lock);
1642                         msleep(20);
1643                         spin_lock_bh(&bp->phy_lock);
1644                 }
1645
1646                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1648                         BMCR_ANENABLE);
1649                 /* Speed up link-up time when the link partner
1650                  * does not autonegotiate which is very common
1651                  * in blade servers. Some blade servers use
1652                  * IPMI for kerboard input and it's important
1653                  * to minimize link disruptions. Autoneg. involves
1654                  * exchanging base pages plus 3 next pages and
1655                  * normally completes in about 120 msec.
1656                  */
1657                 bp->current_interval = SERDES_AN_TIMEOUT;
1658                 bp->serdes_an_pending = 1;
1659                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1660         } else {
1661                 bnx2_resolve_flow_ctrl(bp);
1662                 bnx2_set_mac_link(bp);
1663         }
1664
1665         return 0;
1666 }
1667
1668 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1669         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1670                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671                 (ADVERTISED_1000baseT_Full)
1672
1673 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1674         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1675         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1676         ADVERTISED_1000baseT_Full)
1677
1678 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1680
1681 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1682
1683 static void
1684 bnx2_set_default_remote_link(struct bnx2 *bp)
1685 {
1686         u32 link;
1687
1688         if (bp->phy_port == PORT_TP)
1689                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1690         else
1691                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1692
1693         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694                 bp->req_line_speed = 0;
1695                 bp->autoneg |= AUTONEG_SPEED;
1696                 bp->advertising = ADVERTISED_Autoneg;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698                         bp->advertising |= ADVERTISED_10baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700                         bp->advertising |= ADVERTISED_10baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702                         bp->advertising |= ADVERTISED_100baseT_Half;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704                         bp->advertising |= ADVERTISED_100baseT_Full;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706                         bp->advertising |= ADVERTISED_1000baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708                         bp->advertising |= ADVERTISED_2500baseX_Full;
1709         } else {
1710                 bp->autoneg = 0;
1711                 bp->advertising = 0;
1712                 bp->req_duplex = DUPLEX_FULL;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714                         bp->req_line_speed = SPEED_10;
1715                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716                                 bp->req_duplex = DUPLEX_HALF;
1717                 }
1718                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719                         bp->req_line_speed = SPEED_100;
1720                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721                                 bp->req_duplex = DUPLEX_HALF;
1722                 }
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724                         bp->req_line_speed = SPEED_1000;
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726                         bp->req_line_speed = SPEED_2500;
1727         }
1728 }
1729
1730 static void
1731 bnx2_set_default_link(struct bnx2 *bp)
1732 {
1733         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734                 bnx2_set_default_remote_link(bp);
1735                 return;
1736         }
1737
1738         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739         bp->req_line_speed = 0;
1740         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1741                 u32 reg;
1742
1743                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1744
1745                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1746                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1748                         bp->autoneg = 0;
1749                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1750                         bp->req_duplex = DUPLEX_FULL;
1751                 }
1752         } else
1753                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1754 }
1755
1756 static void
1757 bnx2_send_heart_beat(struct bnx2 *bp)
1758 {
1759         u32 msg;
1760         u32 addr;
1761
1762         spin_lock(&bp->indirect_lock);
1763         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767         spin_unlock(&bp->indirect_lock);
1768 }
1769
1770 static void
1771 bnx2_remote_phy_event(struct bnx2 *bp)
1772 {
1773         u32 msg;
1774         u8 link_up = bp->link_up;
1775         u8 old_port;
1776
1777         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1778
1779         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780                 bnx2_send_heart_beat(bp);
1781
1782         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1783
1784         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1785                 bp->link_up = 0;
1786         else {
1787                 u32 speed;
1788
1789                 bp->link_up = 1;
1790                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791                 bp->duplex = DUPLEX_FULL;
1792                 switch (speed) {
1793                         case BNX2_LINK_STATUS_10HALF:
1794                                 bp->duplex = DUPLEX_HALF;
1795                         case BNX2_LINK_STATUS_10FULL:
1796                                 bp->line_speed = SPEED_10;
1797                                 break;
1798                         case BNX2_LINK_STATUS_100HALF:
1799                                 bp->duplex = DUPLEX_HALF;
1800                         case BNX2_LINK_STATUS_100BASE_T4:
1801                         case BNX2_LINK_STATUS_100FULL:
1802                                 bp->line_speed = SPEED_100;
1803                                 break;
1804                         case BNX2_LINK_STATUS_1000HALF:
1805                                 bp->duplex = DUPLEX_HALF;
1806                         case BNX2_LINK_STATUS_1000FULL:
1807                                 bp->line_speed = SPEED_1000;
1808                                 break;
1809                         case BNX2_LINK_STATUS_2500HALF:
1810                                 bp->duplex = DUPLEX_HALF;
1811                         case BNX2_LINK_STATUS_2500FULL:
1812                                 bp->line_speed = SPEED_2500;
1813                                 break;
1814                         default:
1815                                 bp->line_speed = 0;
1816                                 break;
1817                 }
1818
1819                 bp->flow_ctrl = 0;
1820                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822                         if (bp->duplex == DUPLEX_FULL)
1823                                 bp->flow_ctrl = bp->req_flow_ctrl;
1824                 } else {
1825                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1827                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1829                 }
1830
1831                 old_port = bp->phy_port;
1832                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833                         bp->phy_port = PORT_FIBRE;
1834                 else
1835                         bp->phy_port = PORT_TP;
1836
1837                 if (old_port != bp->phy_port)
1838                         bnx2_set_default_link(bp);
1839
1840         }
1841         if (bp->link_up != link_up)
1842                 bnx2_report_link(bp);
1843
1844         bnx2_set_mac_link(bp);
1845 }
1846
1847 static int
1848 bnx2_set_remote_link(struct bnx2 *bp)
1849 {
1850         u32 evt_code;
1851
1852         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1853         switch (evt_code) {
1854                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855                         bnx2_remote_phy_event(bp);
1856                         break;
1857                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1858                 default:
1859                         bnx2_send_heart_beat(bp);
1860                         break;
1861         }
1862         return 0;
1863 }
1864
1865 static int
1866 bnx2_setup_copper_phy(struct bnx2 *bp)
1867 {
1868         u32 bmcr;
1869         u32 new_bmcr;
1870
1871         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1872
1873         if (bp->autoneg & AUTONEG_SPEED) {
1874                 u32 adv_reg, adv1000_reg;
1875                 u32 new_adv_reg = 0;
1876                 u32 new_adv1000_reg = 0;
1877
1878                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1879                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880                         ADVERTISE_PAUSE_ASYM);
1881
1882                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883                 adv1000_reg &= PHY_ALL_1000_SPEED;
1884
1885                 if (bp->advertising & ADVERTISED_10baseT_Half)
1886                         new_adv_reg |= ADVERTISE_10HALF;
1887                 if (bp->advertising & ADVERTISED_10baseT_Full)
1888                         new_adv_reg |= ADVERTISE_10FULL;
1889                 if (bp->advertising & ADVERTISED_100baseT_Half)
1890                         new_adv_reg |= ADVERTISE_100HALF;
1891                 if (bp->advertising & ADVERTISED_100baseT_Full)
1892                         new_adv_reg |= ADVERTISE_100FULL;
1893                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894                         new_adv1000_reg |= ADVERTISE_1000FULL;
1895
1896                 new_adv_reg |= ADVERTISE_CSMA;
1897
1898                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1899
1900                 if ((adv1000_reg != new_adv1000_reg) ||
1901                         (adv_reg != new_adv_reg) ||
1902                         ((bmcr & BMCR_ANENABLE) == 0)) {
1903
1904                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1905                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1906                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1907                                 BMCR_ANENABLE);
1908                 }
1909                 else if (bp->link_up) {
1910                         /* Flow ctrl may have changed from auto to forced */
1911                         /* or vice-versa. */
1912
1913                         bnx2_resolve_flow_ctrl(bp);
1914                         bnx2_set_mac_link(bp);
1915                 }
1916                 return 0;
1917         }
1918
1919         new_bmcr = 0;
1920         if (bp->req_line_speed == SPEED_100) {
1921                 new_bmcr |= BMCR_SPEED100;
1922         }
1923         if (bp->req_duplex == DUPLEX_FULL) {
1924                 new_bmcr |= BMCR_FULLDPLX;
1925         }
1926         if (new_bmcr != bmcr) {
1927                 u32 bmsr;
1928
1929                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1931
1932                 if (bmsr & BMSR_LSTATUS) {
1933                         /* Force link down */
1934                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1935                         spin_unlock_bh(&bp->phy_lock);
1936                         msleep(50);
1937                         spin_lock_bh(&bp->phy_lock);
1938
1939                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1941                 }
1942
1943                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1944
1945                 /* Normally, the new speed is setup after the link has
1946                  * gone down and up again. In some cases, link will not go
1947                  * down so we need to set up the new speed here.
1948                  */
1949                 if (bmsr & BMSR_LSTATUS) {
1950                         bp->line_speed = bp->req_line_speed;
1951                         bp->duplex = bp->req_duplex;
1952                         bnx2_resolve_flow_ctrl(bp);
1953                         bnx2_set_mac_link(bp);
1954                 }
1955         } else {
1956                 bnx2_resolve_flow_ctrl(bp);
1957                 bnx2_set_mac_link(bp);
1958         }
1959         return 0;
1960 }
1961
1962 static int
1963 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1964 {
1965         if (bp->loopback == MAC_LOOPBACK)
1966                 return 0;
1967
1968         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1969                 return (bnx2_setup_serdes_phy(bp, port));
1970         }
1971         else {
1972                 return (bnx2_setup_copper_phy(bp));
1973         }
1974 }
1975
1976 static int
1977 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1978 {
1979         u32 val;
1980
1981         bp->mii_bmcr = MII_BMCR + 0x10;
1982         bp->mii_bmsr = MII_BMSR + 0x10;
1983         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984         bp->mii_adv = MII_ADVERTISE + 0x10;
1985         bp->mii_lpa = MII_LPA + 0x10;
1986         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1987
1988         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1992         if (reset_phy)
1993                 bnx2_reset_phy(bp);
1994
1995         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1996
1997         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2001
2002         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2004         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2005                 val |= BCM5708S_UP1_2G5;
2006         else
2007                 val &= ~BCM5708S_UP1_2G5;
2008         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2009
2010         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2014
2015         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2016
2017         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2020
2021         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2022
2023         return 0;
2024 }
2025
2026 static int
2027 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2028 {
2029         u32 val;
2030
2031         if (reset_phy)
2032                 bnx2_reset_phy(bp);
2033
2034         bp->mii_up1 = BCM5708S_UP1;
2035
2036         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2043
2044         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2047
2048         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2049                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050                 val |= BCM5708S_UP1_2G5;
2051                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2052         }
2053
2054         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2055             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2057                 /* increase tx signal amplitude */
2058                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059                                BCM5708S_BLK_ADDR_TX_MISC);
2060                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2064         }
2065
2066         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2067               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2068
2069         if (val) {
2070                 u32 is_backplane;
2071
2072                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2073                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075                                        BCM5708S_BLK_ADDR_TX_MISC);
2076                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078                                        BCM5708S_BLK_ADDR_DIG);
2079                 }
2080         }
2081         return 0;
2082 }
2083
2084 static int
2085 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2086 {
2087         if (reset_phy)
2088                 bnx2_reset_phy(bp);
2089
2090         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2091
2092         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2094
2095         if (bp->dev->mtu > 1500) {
2096                 u32 val;
2097
2098                 /* Set extended packet length bit */
2099                 bnx2_write_phy(bp, 0x18, 0x7);
2100                 bnx2_read_phy(bp, 0x18, &val);
2101                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2102
2103                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104                 bnx2_read_phy(bp, 0x1c, &val);
2105                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2106         }
2107         else {
2108                 u32 val;
2109
2110                 bnx2_write_phy(bp, 0x18, 0x7);
2111                 bnx2_read_phy(bp, 0x18, &val);
2112                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2113
2114                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115                 bnx2_read_phy(bp, 0x1c, &val);
2116                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int
2123 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2124 {
2125         u32 val;
2126
2127         if (reset_phy)
2128                 bnx2_reset_phy(bp);
2129
2130         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2131                 bnx2_write_phy(bp, 0x18, 0x0c00);
2132                 bnx2_write_phy(bp, 0x17, 0x000a);
2133                 bnx2_write_phy(bp, 0x15, 0x310b);
2134                 bnx2_write_phy(bp, 0x17, 0x201f);
2135                 bnx2_write_phy(bp, 0x15, 0x9506);
2136                 bnx2_write_phy(bp, 0x17, 0x401f);
2137                 bnx2_write_phy(bp, 0x15, 0x14e2);
2138                 bnx2_write_phy(bp, 0x18, 0x0400);
2139         }
2140
2141         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2144                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2145                 val &= ~(1 << 8);
2146                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2147         }
2148
2149         if (bp->dev->mtu > 1500) {
2150                 /* Set extended packet length bit */
2151                 bnx2_write_phy(bp, 0x18, 0x7);
2152                 bnx2_read_phy(bp, 0x18, &val);
2153                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2154
2155                 bnx2_read_phy(bp, 0x10, &val);
2156                 bnx2_write_phy(bp, 0x10, val | 0x1);
2157         }
2158         else {
2159                 bnx2_write_phy(bp, 0x18, 0x7);
2160                 bnx2_read_phy(bp, 0x18, &val);
2161                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2162
2163                 bnx2_read_phy(bp, 0x10, &val);
2164                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2165         }
2166
2167         /* ethernet@wirespeed */
2168         bnx2_write_phy(bp, 0x18, 0x7007);
2169         bnx2_read_phy(bp, 0x18, &val);
2170         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2171         return 0;
2172 }
2173
2174
2175 static int
2176 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2177 {
2178         u32 val;
2179         int rc = 0;
2180
2181         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2183
2184         bp->mii_bmcr = MII_BMCR;
2185         bp->mii_bmsr = MII_BMSR;
2186         bp->mii_bmsr1 = MII_BMSR;
2187         bp->mii_adv = MII_ADVERTISE;
2188         bp->mii_lpa = MII_LPA;
2189
2190         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2191
2192         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2193                 goto setup_phy;
2194
2195         bnx2_read_phy(bp, MII_PHYSID1, &val);
2196         bp->phy_id = val << 16;
2197         bnx2_read_phy(bp, MII_PHYSID2, &val);
2198         bp->phy_id |= val & 0xffff;
2199
2200         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2201                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2202                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2203                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2204                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2205                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2206                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2207         }
2208         else {
2209                 rc = bnx2_init_copper_phy(bp, reset_phy);
2210         }
2211
2212 setup_phy:
2213         if (!rc)
2214                 rc = bnx2_setup_phy(bp, bp->phy_port);
2215
2216         return rc;
2217 }
2218
2219 static int
2220 bnx2_set_mac_loopback(struct bnx2 *bp)
2221 {
2222         u32 mac_mode;
2223
2224         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2228         bp->link_up = 1;
2229         return 0;
2230 }
2231
2232 static int bnx2_test_link(struct bnx2 *);
2233
2234 static int
2235 bnx2_set_phy_loopback(struct bnx2 *bp)
2236 {
2237         u32 mac_mode;
2238         int rc, i;
2239
2240         spin_lock_bh(&bp->phy_lock);
2241         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2242                             BMCR_SPEED1000);
2243         spin_unlock_bh(&bp->phy_lock);
2244         if (rc)
2245                 return rc;
2246
2247         for (i = 0; i < 10; i++) {
2248                 if (bnx2_test_link(bp) == 0)
2249                         break;
2250                 msleep(100);
2251         }
2252
2253         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2256                       BNX2_EMAC_MODE_25G_MODE);
2257
2258         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2260         bp->link_up = 1;
2261         return 0;
2262 }
2263
2264 static int
2265 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2266 {
2267         int i;
2268         u32 val;
2269
2270         bp->fw_wr_seq++;
2271         msg_data |= bp->fw_wr_seq;
2272
2273         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2274
2275         if (!ack)
2276                 return 0;
2277
2278         /* wait for an acknowledgement. */
2279         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2280                 msleep(10);
2281
2282                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2283
2284                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2285                         break;
2286         }
2287         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2288                 return 0;
2289
2290         /* If we timed out, inform the firmware that this is the case. */
2291         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2292                 if (!silent)
2293                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2294                                             "%x\n", msg_data);
2295
2296                 msg_data &= ~BNX2_DRV_MSG_CODE;
2297                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2298
2299                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2300
2301                 return -EBUSY;
2302         }
2303
2304         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2305                 return -EIO;
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_5709_context(struct bnx2 *bp)
2312 {
2313         int i, ret = 0;
2314         u32 val;
2315
2316         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2317         val |= (BCM_PAGE_BITS - 8) << 16;
2318         REG_WR(bp, BNX2_CTX_COMMAND, val);
2319         for (i = 0; i < 10; i++) {
2320                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2321                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2322                         break;
2323                 udelay(2);
2324         }
2325         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2326                 return -EBUSY;
2327
2328         for (i = 0; i < bp->ctx_pages; i++) {
2329                 int j;
2330
2331                 if (bp->ctx_blk[i])
2332                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2333                 else
2334                         return -ENOMEM;
2335
2336                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2337                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2338                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2339                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2340                        (u64) bp->ctx_blk_mapping[i] >> 32);
2341                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2342                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2343                 for (j = 0; j < 10; j++) {
2344
2345                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2346                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2347                                 break;
2348                         udelay(5);
2349                 }
2350                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2351                         ret = -EBUSY;
2352                         break;
2353                 }
2354         }
2355         return ret;
2356 }
2357
2358 static void
2359 bnx2_init_context(struct bnx2 *bp)
2360 {
2361         u32 vcid;
2362
2363         vcid = 96;
2364         while (vcid) {
2365                 u32 vcid_addr, pcid_addr, offset;
2366                 int i;
2367
2368                 vcid--;
2369
2370                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2371                         u32 new_vcid;
2372
2373                         vcid_addr = GET_PCID_ADDR(vcid);
2374                         if (vcid & 0x8) {
2375                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2376                         }
2377                         else {
2378                                 new_vcid = vcid;
2379                         }
2380                         pcid_addr = GET_PCID_ADDR(new_vcid);
2381                 }
2382                 else {
2383                         vcid_addr = GET_CID_ADDR(vcid);
2384                         pcid_addr = vcid_addr;
2385                 }
2386
2387                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2388                         vcid_addr += (i << PHY_CTX_SHIFT);
2389                         pcid_addr += (i << PHY_CTX_SHIFT);
2390
2391                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2392                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2393
2394                         /* Zero out the context. */
2395                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2396                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2397                 }
2398         }
2399 }
2400
2401 static int
2402 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2403 {
2404         u16 *good_mbuf;
2405         u32 good_mbuf_cnt;
2406         u32 val;
2407
2408         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2409         if (good_mbuf == NULL) {
2410                 printk(KERN_ERR PFX "Failed to allocate memory in "
2411                                     "bnx2_alloc_bad_rbuf\n");
2412                 return -ENOMEM;
2413         }
2414
2415         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2416                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2417
2418         good_mbuf_cnt = 0;
2419
2420         /* Allocate a bunch of mbufs and save the good ones in an array. */
2421         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2422         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2423                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2424                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2425
2426                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2427
2428                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2429
2430                 /* The addresses with Bit 9 set are bad memory blocks. */
2431                 if (!(val & (1 << 9))) {
2432                         good_mbuf[good_mbuf_cnt] = (u16) val;
2433                         good_mbuf_cnt++;
2434                 }
2435
2436                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2437         }
2438
2439         /* Free the good ones back to the mbuf pool thus discarding
2440          * all the bad ones. */
2441         while (good_mbuf_cnt) {
2442                 good_mbuf_cnt--;
2443
2444                 val = good_mbuf[good_mbuf_cnt];
2445                 val = (val << 9) | val | 1;
2446
2447                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2448         }
2449         kfree(good_mbuf);
2450         return 0;
2451 }
2452
2453 static void
2454 bnx2_set_mac_addr(struct bnx2 *bp)
2455 {
2456         u32 val;
2457         u8 *mac_addr = bp->dev->dev_addr;
2458
2459         val = (mac_addr[0] << 8) | mac_addr[1];
2460
2461         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2462
2463         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2464                 (mac_addr[4] << 8) | mac_addr[5];
2465
2466         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2467 }
2468
2469 static inline int
2470 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2471 {
2472         dma_addr_t mapping;
2473         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2474         struct rx_bd *rxbd =
2475                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2476         struct page *page = alloc_page(GFP_ATOMIC);
2477
2478         if (!page)
2479                 return -ENOMEM;
2480         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2481                                PCI_DMA_FROMDEVICE);
2482         rx_pg->page = page;
2483         pci_unmap_addr_set(rx_pg, mapping, mapping);
2484         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2485         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2486         return 0;
2487 }
2488
2489 static void
2490 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2491 {
2492         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2493         struct page *page = rx_pg->page;
2494
2495         if (!page)
2496                 return;
2497
2498         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2499                        PCI_DMA_FROMDEVICE);
2500
2501         __free_page(page);
2502         rx_pg->page = NULL;
2503 }
2504
2505 static inline int
2506 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2507 {
2508         struct sk_buff *skb;
2509         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2510         dma_addr_t mapping;
2511         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2512         unsigned long align;
2513
2514         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2515         if (skb == NULL) {
2516                 return -ENOMEM;
2517         }
2518
2519         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2520                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2521
2522         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2523                 PCI_DMA_FROMDEVICE);
2524
2525         rx_buf->skb = skb;
2526         pci_unmap_addr_set(rx_buf, mapping, mapping);
2527
2528         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2529         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2530
2531         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2532
2533         return 0;
2534 }
2535
2536 static int
2537 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2538 {
2539         struct status_block *sblk = bnapi->status_blk.msi;
2540         u32 new_link_state, old_link_state;
2541         int is_set = 1;
2542
2543         new_link_state = sblk->status_attn_bits & event;
2544         old_link_state = sblk->status_attn_bits_ack & event;
2545         if (new_link_state != old_link_state) {
2546                 if (new_link_state)
2547                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2548                 else
2549                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2550         } else
2551                 is_set = 0;
2552
2553         return is_set;
2554 }
2555
2556 static void
2557 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2558 {
2559         spin_lock(&bp->phy_lock);
2560
2561         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2562                 bnx2_set_link(bp);
2563         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2564                 bnx2_set_remote_link(bp);
2565
2566         spin_unlock(&bp->phy_lock);
2567
2568 }
2569
2570 static inline u16
2571 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2572 {
2573         u16 cons;
2574
2575         /* Tell compiler that status block fields can change. */
2576         barrier();
2577         cons = *bnapi->hw_tx_cons_ptr;
2578         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2579                 cons++;
2580         return cons;
2581 }
2582
2583 static int
2584 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2585 {
2586         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2587         u16 hw_cons, sw_cons, sw_ring_cons;
2588         int tx_pkt = 0;
2589
2590         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2591         sw_cons = txr->tx_cons;
2592
2593         while (sw_cons != hw_cons) {
2594                 struct sw_bd *tx_buf;
2595                 struct sk_buff *skb;
2596                 int i, last;
2597
2598                 sw_ring_cons = TX_RING_IDX(sw_cons);
2599
2600                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2601                 skb = tx_buf->skb;
2602
2603                 /* partial BD completions possible with TSO packets */
2604                 if (skb_is_gso(skb)) {
2605                         u16 last_idx, last_ring_idx;
2606
2607                         last_idx = sw_cons +
2608                                 skb_shinfo(skb)->nr_frags + 1;
2609                         last_ring_idx = sw_ring_cons +
2610                                 skb_shinfo(skb)->nr_frags + 1;
2611                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2612                                 last_idx++;
2613                         }
2614                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2615                                 break;
2616                         }
2617                 }
2618
2619                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2620                         skb_headlen(skb), PCI_DMA_TODEVICE);
2621
2622                 tx_buf->skb = NULL;
2623                 last = skb_shinfo(skb)->nr_frags;
2624
2625                 for (i = 0; i < last; i++) {
2626                         sw_cons = NEXT_TX_BD(sw_cons);
2627
2628                         pci_unmap_page(bp->pdev,
2629                                 pci_unmap_addr(
2630                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2631                                         mapping),
2632                                 skb_shinfo(skb)->frags[i].size,
2633                                 PCI_DMA_TODEVICE);
2634                 }
2635
2636                 sw_cons = NEXT_TX_BD(sw_cons);
2637
2638                 dev_kfree_skb(skb);
2639                 tx_pkt++;
2640                 if (tx_pkt == budget)
2641                         break;
2642
2643                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2644         }
2645
2646         txr->hw_tx_cons = hw_cons;
2647         txr->tx_cons = sw_cons;
2648         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2649          * before checking for netif_queue_stopped().  Without the
2650          * memory barrier, there is a small possibility that bnx2_start_xmit()
2651          * will miss it and cause the queue to be stopped forever.
2652          */
2653         smp_mb();
2654
2655         if (unlikely(netif_queue_stopped(bp->dev)) &&
2656                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2657                 netif_tx_lock(bp->dev);
2658                 if ((netif_queue_stopped(bp->dev)) &&
2659                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2660                         netif_wake_queue(bp->dev);
2661                 netif_tx_unlock(bp->dev);
2662         }
2663         return tx_pkt;
2664 }
2665
2666 static void
2667 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2668                         struct sk_buff *skb, int count)
2669 {
2670         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2671         struct rx_bd *cons_bd, *prod_bd;
2672         dma_addr_t mapping;
2673         int i;
2674         u16 hw_prod = rxr->rx_pg_prod, prod;
2675         u16 cons = rxr->rx_pg_cons;
2676
2677         for (i = 0; i < count; i++) {
2678                 prod = RX_PG_RING_IDX(hw_prod);
2679
2680                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2681                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2682                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2683                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2684
2685                 if (i == 0 && skb) {
2686                         struct page *page;
2687                         struct skb_shared_info *shinfo;
2688
2689                         shinfo = skb_shinfo(skb);
2690                         shinfo->nr_frags--;
2691                         page = shinfo->frags[shinfo->nr_frags].page;
2692                         shinfo->frags[shinfo->nr_frags].page = NULL;
2693                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2694                                                PCI_DMA_FROMDEVICE);
2695                         cons_rx_pg->page = page;
2696                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2697                         dev_kfree_skb(skb);
2698                 }
2699                 if (prod != cons) {
2700                         prod_rx_pg->page = cons_rx_pg->page;
2701                         cons_rx_pg->page = NULL;
2702                         pci_unmap_addr_set(prod_rx_pg, mapping,
2703                                 pci_unmap_addr(cons_rx_pg, mapping));
2704
2705                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2706                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2707
2708                 }
2709                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2710                 hw_prod = NEXT_RX_BD(hw_prod);
2711         }
2712         rxr->rx_pg_prod = hw_prod;
2713         rxr->rx_pg_cons = cons;
2714 }
2715
2716 static inline void
2717 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2718                   struct sk_buff *skb, u16 cons, u16 prod)
2719 {
2720         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2721         struct rx_bd *cons_bd, *prod_bd;
2722
2723         cons_rx_buf = &rxr->rx_buf_ring[cons];
2724         prod_rx_buf = &rxr->rx_buf_ring[prod];
2725
2726         pci_dma_sync_single_for_device(bp->pdev,
2727                 pci_unmap_addr(cons_rx_buf, mapping),
2728                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2729
2730         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2731
2732         prod_rx_buf->skb = skb;
2733
2734         if (cons == prod)
2735                 return;
2736
2737         pci_unmap_addr_set(prod_rx_buf, mapping,
2738                         pci_unmap_addr(cons_rx_buf, mapping));
2739
2740         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2741         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2742         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2743         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2744 }
2745
2746 static int
2747 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2748             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2749             u32 ring_idx)
2750 {
2751         int err;
2752         u16 prod = ring_idx & 0xffff;
2753
2754         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2755         if (unlikely(err)) {
2756                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2757                 if (hdr_len) {
2758                         unsigned int raw_len = len + 4;
2759                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2760
2761                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2762                 }
2763                 return err;
2764         }
2765
2766         skb_reserve(skb, BNX2_RX_OFFSET);
2767         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2768                          PCI_DMA_FROMDEVICE);
2769
2770         if (hdr_len == 0) {
2771                 skb_put(skb, len);
2772                 return 0;
2773         } else {
2774                 unsigned int i, frag_len, frag_size, pages;
2775                 struct sw_pg *rx_pg;
2776                 u16 pg_cons = rxr->rx_pg_cons;
2777                 u16 pg_prod = rxr->rx_pg_prod;
2778
2779                 frag_size = len + 4 - hdr_len;
2780                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2781                 skb_put(skb, hdr_len);
2782
2783                 for (i = 0; i < pages; i++) {
2784                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2785                         if (unlikely(frag_len <= 4)) {
2786                                 unsigned int tail = 4 - frag_len;
2787
2788                                 rxr->rx_pg_cons = pg_cons;
2789                                 rxr->rx_pg_prod = pg_prod;
2790                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2791                                                         pages - i);
2792                                 skb->len -= tail;
2793                                 if (i == 0) {
2794                                         skb->tail -= tail;
2795                                 } else {
2796                                         skb_frag_t *frag =
2797                                                 &skb_shinfo(skb)->frags[i - 1];
2798                                         frag->size -= tail;
2799                                         skb->data_len -= tail;
2800                                         skb->truesize -= tail;
2801                                 }
2802                                 return 0;
2803                         }
2804                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2805
2806                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2807                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2808
2809                         if (i == pages - 1)
2810                                 frag_len -= 4;
2811
2812                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2813                         rx_pg->page = NULL;
2814
2815                         err = bnx2_alloc_rx_page(bp, rxr,
2816                                                  RX_PG_RING_IDX(pg_prod));
2817                         if (unlikely(err)) {
2818                                 rxr->rx_pg_cons = pg_cons;
2819                                 rxr->rx_pg_prod = pg_prod;
2820                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2821                                                         pages - i);
2822                                 return err;
2823                         }
2824
2825                         frag_size -= frag_len;
2826                         skb->data_len += frag_len;
2827                         skb->truesize += frag_len;
2828                         skb->len += frag_len;
2829
2830                         pg_prod = NEXT_RX_BD(pg_prod);
2831                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2832                 }
2833                 rxr->rx_pg_prod = pg_prod;
2834                 rxr->rx_pg_cons = pg_cons;
2835         }
2836         return 0;
2837 }
2838
2839 static inline u16
2840 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2841 {
2842         u16 cons;
2843
2844         /* Tell compiler that status block fields can change. */
2845         barrier();
2846         cons = *bnapi->hw_rx_cons_ptr;
2847         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2848                 cons++;
2849         return cons;
2850 }
2851
2852 static int
2853 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2854 {
2855         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2856         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2857         struct l2_fhdr *rx_hdr;
2858         int rx_pkt = 0, pg_ring_used = 0;
2859
2860         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2861         sw_cons = rxr->rx_cons;
2862         sw_prod = rxr->rx_prod;
2863
2864         /* Memory barrier necessary as speculative reads of the rx
2865          * buffer can be ahead of the index in the status block
2866          */
2867         rmb();
2868         while (sw_cons != hw_cons) {
2869                 unsigned int len, hdr_len;
2870                 u32 status;
2871                 struct sw_bd *rx_buf;
2872                 struct sk_buff *skb;
2873                 dma_addr_t dma_addr;
2874
2875                 sw_ring_cons = RX_RING_IDX(sw_cons);
2876                 sw_ring_prod = RX_RING_IDX(sw_prod);
2877
2878                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2879                 skb = rx_buf->skb;
2880
2881                 rx_buf->skb = NULL;
2882
2883                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2884
2885                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2886                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2887                         PCI_DMA_FROMDEVICE);
2888
2889                 rx_hdr = (struct l2_fhdr *) skb->data;
2890                 len = rx_hdr->l2_fhdr_pkt_len;
2891
2892                 if ((status = rx_hdr->l2_fhdr_status) &
2893                         (L2_FHDR_ERRORS_BAD_CRC |
2894                         L2_FHDR_ERRORS_PHY_DECODE |
2895                         L2_FHDR_ERRORS_ALIGNMENT |
2896                         L2_FHDR_ERRORS_TOO_SHORT |
2897                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2898
2899                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2900                                           sw_ring_prod);
2901                         goto next_rx;
2902                 }
2903                 hdr_len = 0;
2904                 if (status & L2_FHDR_STATUS_SPLIT) {
2905                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2906                         pg_ring_used = 1;
2907                 } else if (len > bp->rx_jumbo_thresh) {
2908                         hdr_len = bp->rx_jumbo_thresh;
2909                         pg_ring_used = 1;
2910                 }
2911
2912                 len -= 4;
2913
2914                 if (len <= bp->rx_copy_thresh) {
2915                         struct sk_buff *new_skb;
2916
2917                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2918                         if (new_skb == NULL) {
2919                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2920                                                   sw_ring_prod);
2921                                 goto next_rx;
2922                         }
2923
2924                         /* aligned copy */
2925                         skb_copy_from_linear_data_offset(skb,
2926                                                          BNX2_RX_OFFSET - 2,
2927                                       new_skb->data, len + 2);
2928                         skb_reserve(new_skb, 2);
2929                         skb_put(new_skb, len);
2930
2931                         bnx2_reuse_rx_skb(bp, rxr, skb,
2932                                 sw_ring_cons, sw_ring_prod);
2933
2934                         skb = new_skb;
2935                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2936                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2937                         goto next_rx;
2938
2939                 skb->protocol = eth_type_trans(skb, bp->dev);
2940
2941                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2942                         (ntohs(skb->protocol) != 0x8100)) {
2943
2944                         dev_kfree_skb(skb);
2945                         goto next_rx;
2946
2947                 }
2948
2949                 skb->ip_summed = CHECKSUM_NONE;
2950                 if (bp->rx_csum &&
2951                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2952                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2953
2954                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2955                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2956                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2957                 }
2958
2959 #ifdef BCM_VLAN
2960                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2961                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2962                                 rx_hdr->l2_fhdr_vlan_tag);
2963                 }
2964                 else
2965 #endif
2966                         netif_receive_skb(skb);
2967
2968                 bp->dev->last_rx = jiffies;
2969                 rx_pkt++;
2970
2971 next_rx:
2972                 sw_cons = NEXT_RX_BD(sw_cons);
2973                 sw_prod = NEXT_RX_BD(sw_prod);
2974
2975                 if ((rx_pkt == budget))
2976                         break;
2977
2978                 /* Refresh hw_cons to see if there is new work */
2979                 if (sw_cons == hw_cons) {
2980                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2981                         rmb();
2982                 }
2983         }
2984         rxr->rx_cons = sw_cons;
2985         rxr->rx_prod = sw_prod;
2986
2987         if (pg_ring_used)
2988                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2989
2990         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2991
2992         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2993
2994         mmiowb();
2995
2996         return rx_pkt;
2997
2998 }
2999
3000 /* MSI ISR - The only difference between this and the INTx ISR
3001  * is that the MSI interrupt is always serviced.
3002  */
3003 static irqreturn_t
3004 bnx2_msi(int irq, void *dev_instance)
3005 {
3006         struct bnx2_napi *bnapi = dev_instance;
3007         struct bnx2 *bp = bnapi->bp;
3008         struct net_device *dev = bp->dev;
3009
3010         prefetch(bnapi->status_blk.msi);
3011         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3012                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3013                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3014
3015         /* Return here if interrupt is disabled. */
3016         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3017                 return IRQ_HANDLED;
3018
3019         netif_rx_schedule(dev, &bnapi->napi);
3020
3021         return IRQ_HANDLED;
3022 }
3023
3024 static irqreturn_t
3025 bnx2_msi_1shot(int irq, void *dev_instance)
3026 {
3027         struct bnx2_napi *bnapi = dev_instance;
3028         struct bnx2 *bp = bnapi->bp;
3029         struct net_device *dev = bp->dev;
3030
3031         prefetch(bnapi->status_blk.msi);
3032
3033         /* Return here if interrupt is disabled. */
3034         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3035                 return IRQ_HANDLED;
3036
3037         netif_rx_schedule(dev, &bnapi->napi);
3038
3039         return IRQ_HANDLED;
3040 }
3041
3042 static irqreturn_t
3043 bnx2_interrupt(int irq, void *dev_instance)
3044 {
3045         struct bnx2_napi *bnapi = dev_instance;
3046         struct bnx2 *bp = bnapi->bp;
3047         struct net_device *dev = bp->dev;
3048         struct status_block *sblk = bnapi->status_blk.msi;
3049
3050         /* When using INTx, it is possible for the interrupt to arrive
3051          * at the CPU before the status block posted prior to the
3052          * interrupt. Reading a register will flush the status block.
3053          * When using MSI, the MSI message will always complete after
3054          * the status block write.
3055          */
3056         if ((sblk->status_idx == bnapi->last_status_idx) &&
3057             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3058              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3059                 return IRQ_NONE;
3060
3061         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3062                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3063                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3064
3065         /* Read back to deassert IRQ immediately to avoid too many
3066          * spurious interrupts.
3067          */
3068         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3069
3070         /* Return here if interrupt is shared and is disabled. */
3071         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3072                 return IRQ_HANDLED;
3073
3074         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3075                 bnapi->last_status_idx = sblk->status_idx;
3076                 __netif_rx_schedule(dev, &bnapi->napi);
3077         }
3078
3079         return IRQ_HANDLED;
3080 }
3081
3082 static inline int
3083 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3084 {
3085         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3086         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3087
3088         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3089             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3090                 return 1;
3091         return 0;
3092 }
3093
3094 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3095                                  STATUS_ATTN_BITS_TIMER_ABORT)
3096
3097 static inline int
3098 bnx2_has_work(struct bnx2_napi *bnapi)
3099 {
3100         struct status_block *sblk = bnapi->status_blk.msi;
3101
3102         if (bnx2_has_fast_work(bnapi))
3103                 return 1;
3104
3105         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3106             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3107                 return 1;
3108
3109         return 0;
3110 }
3111
3112 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3113 {
3114         struct status_block *sblk = bnapi->status_blk.msi;
3115         u32 status_attn_bits = sblk->status_attn_bits;
3116         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3117
3118         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3119             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3120
3121                 bnx2_phy_int(bp, bnapi);
3122
3123                 /* This is needed to take care of transient status
3124                  * during link changes.
3125                  */
3126                 REG_WR(bp, BNX2_HC_COMMAND,
3127                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3128                 REG_RD(bp, BNX2_HC_COMMAND);
3129         }
3130 }
3131
3132 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3133                           int work_done, int budget)
3134 {
3135         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3136         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3137
3138         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3139                 bnx2_tx_int(bp, bnapi, 0);
3140
3141         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3142                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3143
3144         return work_done;
3145 }
3146
3147 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3148 {
3149         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3150         struct bnx2 *bp = bnapi->bp;
3151         int work_done = 0;
3152         struct status_block_msix *sblk = bnapi->status_blk.msix;
3153
3154         while (1) {
3155                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3156                 if (unlikely(work_done >= budget))
3157                         break;
3158
3159                 bnapi->last_status_idx = sblk->status_idx;
3160                 /* status idx must be read before checking for more work. */
3161                 rmb();
3162                 if (likely(!bnx2_has_fast_work(bnapi))) {
3163
3164                         netif_rx_complete(bp->dev, napi);
3165                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3166                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3167                                bnapi->last_status_idx);
3168                         break;
3169                 }
3170         }
3171         return work_done;
3172 }
3173
3174 static int bnx2_poll(struct napi_struct *napi, int budget)
3175 {
3176         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3177         struct bnx2 *bp = bnapi->bp;
3178         int work_done = 0;
3179         struct status_block *sblk = bnapi->status_blk.msi;
3180
3181         while (1) {
3182                 bnx2_poll_link(bp, bnapi);
3183
3184                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3185
3186                 if (unlikely(work_done >= budget))
3187                         break;
3188
3189                 /* bnapi->last_status_idx is used below to tell the hw how
3190                  * much work has been processed, so we must read it before
3191                  * checking for more work.
3192                  */
3193                 bnapi->last_status_idx = sblk->status_idx;
3194                 rmb();
3195                 if (likely(!bnx2_has_work(bnapi))) {
3196                         netif_rx_complete(bp->dev, napi);
3197                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3198                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3199                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3200                                        bnapi->last_status_idx);
3201                                 break;
3202                         }
3203                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3204                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3205                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3206                                bnapi->last_status_idx);
3207
3208                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3209                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3210                                bnapi->last_status_idx);
3211                         break;
3212                 }
3213         }
3214
3215         return work_done;
3216 }
3217
3218 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3219  * from set_multicast.
3220  */
3221 static void
3222 bnx2_set_rx_mode(struct net_device *dev)
3223 {
3224         struct bnx2 *bp = netdev_priv(dev);
3225         u32 rx_mode, sort_mode;
3226         int i;
3227
3228         spin_lock_bh(&bp->phy_lock);
3229
3230         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3231                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3232         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3233 #ifdef BCM_VLAN
3234         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3235                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3236 #else
3237         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3238                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3239 #endif
3240         if (dev->flags & IFF_PROMISC) {
3241                 /* Promiscuous mode. */
3242                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3243                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3244                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3245         }
3246         else if (dev->flags & IFF_ALLMULTI) {
3247                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3248                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3249                                0xffffffff);
3250                 }
3251                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3252         }
3253         else {
3254                 /* Accept one or more multicast(s). */
3255                 struct dev_mc_list *mclist;
3256                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3257                 u32 regidx;
3258                 u32 bit;
3259                 u32 crc;
3260
3261                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3262
3263                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3264                      i++, mclist = mclist->next) {
3265
3266                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3267                         bit = crc & 0xff;
3268                         regidx = (bit & 0xe0) >> 5;
3269                         bit &= 0x1f;
3270                         mc_filter[regidx] |= (1 << bit);
3271                 }
3272
3273                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3274                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3275                                mc_filter[i]);
3276                 }
3277
3278                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3279         }
3280
3281         if (rx_mode != bp->rx_mode) {
3282                 bp->rx_mode = rx_mode;
3283                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3284         }
3285
3286         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3287         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3288         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3289
3290         spin_unlock_bh(&bp->phy_lock);
3291 }
3292
3293 static void
3294 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3295         u32 rv2p_proc)
3296 {
3297         int i;
3298         u32 val;
3299
3300         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3301                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3302                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3303                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3304                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3305         }
3306
3307         for (i = 0; i < rv2p_code_len; i += 8) {
3308                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3309                 rv2p_code++;
3310                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3311                 rv2p_code++;
3312
3313                 if (rv2p_proc == RV2P_PROC1) {
3314                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3315                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3316                 }
3317                 else {
3318                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3319                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3320                 }
3321         }
3322
3323         /* Reset the processor, un-stall is done later. */
3324         if (rv2p_proc == RV2P_PROC1) {
3325                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3326         }
3327         else {
3328                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3329         }
3330 }
3331
3332 static int
3333 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3334 {
3335         u32 offset;
3336         u32 val;
3337         int rc;
3338
3339         /* Halt the CPU. */
3340         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3341         val |= cpu_reg->mode_value_halt;
3342         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3343         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3344
3345         /* Load the Text area. */
3346         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3347         if (fw->gz_text) {
3348                 int j;
3349
3350                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3351                                        fw->gz_text_len);
3352                 if (rc < 0)
3353                         return rc;
3354
3355                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3356                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3357                 }
3358         }
3359
3360         /* Load the Data area. */
3361         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3362         if (fw->data) {
3363                 int j;
3364
3365                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3366                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3367                 }
3368         }
3369
3370         /* Load the SBSS area. */
3371         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3372         if (fw->sbss_len) {
3373                 int j;
3374
3375                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3376                         bnx2_reg_wr_ind(bp, offset, 0);
3377                 }
3378         }
3379
3380         /* Load the BSS area. */
3381         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3382         if (fw->bss_len) {
3383                 int j;
3384
3385                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3386                         bnx2_reg_wr_ind(bp, offset, 0);
3387                 }
3388         }
3389
3390         /* Load the Read-Only area. */
3391         offset = cpu_reg->spad_base +
3392                 (fw->rodata_addr - cpu_reg->mips_view_base);
3393         if (fw->rodata) {
3394                 int j;
3395
3396                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3397                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3398                 }
3399         }
3400
3401         /* Clear the pre-fetch instruction. */
3402         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3403         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3404
3405         /* Start the CPU. */
3406         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3407         val &= ~cpu_reg->mode_value_halt;
3408         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3409         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3410
3411         return 0;
3412 }
3413
3414 static int
3415 bnx2_init_cpus(struct bnx2 *bp)
3416 {
3417         struct fw_info *fw;
3418         int rc, rv2p_len;
3419         void *text, *rv2p;
3420
3421         /* Initialize the RV2P processor. */
3422         text = vmalloc(FW_BUF_SIZE);
3423         if (!text)
3424                 return -ENOMEM;
3425         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3426                 rv2p = bnx2_xi_rv2p_proc1;
3427                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3428         } else {
3429                 rv2p = bnx2_rv2p_proc1;
3430                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3431         }
3432         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3433         if (rc < 0)
3434                 goto init_cpu_err;
3435
3436         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3437
3438         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3439                 rv2p = bnx2_xi_rv2p_proc2;
3440                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3441         } else {
3442                 rv2p = bnx2_rv2p_proc2;
3443                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3444         }
3445         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3446         if (rc < 0)
3447                 goto init_cpu_err;
3448
3449         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3450
3451         /* Initialize the RX Processor. */
3452         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3453                 fw = &bnx2_rxp_fw_09;
3454         else
3455                 fw = &bnx2_rxp_fw_06;
3456
3457         fw->text = text;
3458         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3459         if (rc)
3460                 goto init_cpu_err;
3461
3462         /* Initialize the TX Processor. */
3463         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3464                 fw = &bnx2_txp_fw_09;
3465         else
3466                 fw = &bnx2_txp_fw_06;
3467
3468         fw->text = text;
3469         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3470         if (rc)
3471                 goto init_cpu_err;
3472
3473         /* Initialize the TX Patch-up Processor. */
3474         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3475                 fw = &bnx2_tpat_fw_09;
3476         else
3477                 fw = &bnx2_tpat_fw_06;
3478
3479         fw->text = text;
3480         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3481         if (rc)
3482                 goto init_cpu_err;
3483
3484         /* Initialize the Completion Processor. */
3485         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3486                 fw = &bnx2_com_fw_09;
3487         else
3488                 fw = &bnx2_com_fw_06;
3489
3490         fw->text = text;
3491         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3492         if (rc)
3493                 goto init_cpu_err;
3494
3495         /* Initialize the Command Processor. */
3496         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3497                 fw = &bnx2_cp_fw_09;
3498         else
3499                 fw = &bnx2_cp_fw_06;
3500
3501         fw->text = text;
3502         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3503
3504 init_cpu_err:
3505         vfree(text);
3506         return rc;
3507 }
3508
3509 static int
3510 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3511 {
3512         u16 pmcsr;
3513
3514         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3515
3516         switch (state) {
3517         case PCI_D0: {
3518                 u32 val;
3519
3520                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3521                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3522                         PCI_PM_CTRL_PME_STATUS);
3523
3524                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3525                         /* delay required during transition out of D3hot */
3526                         msleep(20);
3527
3528                 val = REG_RD(bp, BNX2_EMAC_MODE);
3529                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3530                 val &= ~BNX2_EMAC_MODE_MPKT;
3531                 REG_WR(bp, BNX2_EMAC_MODE, val);
3532
3533                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3534                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3535                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3536                 break;
3537         }
3538         case PCI_D3hot: {
3539                 int i;
3540                 u32 val, wol_msg;
3541
3542                 if (bp->wol) {
3543                         u32 advertising;
3544                         u8 autoneg;
3545
3546                         autoneg = bp->autoneg;
3547                         advertising = bp->advertising;
3548
3549                         if (bp->phy_port == PORT_TP) {
3550                                 bp->autoneg = AUTONEG_SPEED;
3551                                 bp->advertising = ADVERTISED_10baseT_Half |
3552                                         ADVERTISED_10baseT_Full |
3553                                         ADVERTISED_100baseT_Half |
3554                                         ADVERTISED_100baseT_Full |
3555                                         ADVERTISED_Autoneg;
3556                         }
3557
3558                         spin_lock_bh(&bp->phy_lock);
3559                         bnx2_setup_phy(bp, bp->phy_port);
3560                         spin_unlock_bh(&bp->phy_lock);
3561
3562                         bp->autoneg = autoneg;
3563                         bp->advertising = advertising;
3564
3565                         bnx2_set_mac_addr(bp);
3566
3567                         val = REG_RD(bp, BNX2_EMAC_MODE);
3568
3569                         /* Enable port mode. */
3570                         val &= ~BNX2_EMAC_MODE_PORT;
3571                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3572                                BNX2_EMAC_MODE_ACPI_RCVD |
3573                                BNX2_EMAC_MODE_MPKT;
3574                         if (bp->phy_port == PORT_TP)
3575                                 val |= BNX2_EMAC_MODE_PORT_MII;
3576                         else {
3577                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3578                                 if (bp->line_speed == SPEED_2500)
3579                                         val |= BNX2_EMAC_MODE_25G_MODE;
3580                         }
3581
3582                         REG_WR(bp, BNX2_EMAC_MODE, val);
3583
3584                         /* receive all multicast */
3585                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3586                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3587                                        0xffffffff);
3588                         }
3589                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3590                                BNX2_EMAC_RX_MODE_SORT_MODE);
3591
3592                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3593                               BNX2_RPM_SORT_USER0_MC_EN;
3594                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3595                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3596                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3597                                BNX2_RPM_SORT_USER0_ENA);
3598
3599                         /* Need to enable EMAC and RPM for WOL. */
3600                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3601                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3602                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3603                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3604
3605                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3606                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3607                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3608
3609                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3610                 }
3611                 else {
3612                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3613                 }
3614
3615                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3616                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3617                                      1, 0);
3618
3619                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3620                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3621                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3622
3623                         if (bp->wol)
3624                                 pmcsr |= 3;
3625                 }
3626                 else {
3627                         pmcsr |= 3;
3628                 }
3629                 if (bp->wol) {
3630                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3631                 }
3632                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3633                                       pmcsr);
3634
3635                 /* No more memory access after this point until
3636                  * device is brought back to D0.
3637                  */
3638                 udelay(50);
3639                 break;
3640         }
3641         default:
3642                 return -EINVAL;
3643         }
3644         return 0;
3645 }
3646
3647 static int
3648 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3649 {
3650         u32 val;
3651         int j;
3652
3653         /* Request access to the flash interface. */
3654         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3655         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3656                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3657                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3658                         break;
3659
3660                 udelay(5);
3661         }
3662
3663         if (j >= NVRAM_TIMEOUT_COUNT)
3664                 return -EBUSY;
3665
3666         return 0;
3667 }
3668
3669 static int
3670 bnx2_release_nvram_lock(struct bnx2 *bp)
3671 {
3672         int j;
3673         u32 val;
3674
3675         /* Relinquish nvram interface. */
3676         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3677
3678         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3679                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3680                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3681                         break;
3682
3683                 udelay(5);
3684         }
3685
3686         if (j >= NVRAM_TIMEOUT_COUNT)
3687                 return -EBUSY;
3688
3689         return 0;
3690 }
3691
3692
3693 static int
3694 bnx2_enable_nvram_write(struct bnx2 *bp)
3695 {
3696         u32 val;
3697
3698         val = REG_RD(bp, BNX2_MISC_CFG);
3699         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3700
3701         if (bp->flash_info->flags & BNX2_NV_WREN) {
3702                 int j;
3703
3704                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3705                 REG_WR(bp, BNX2_NVM_COMMAND,
3706                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3707
3708                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3709                         udelay(5);
3710
3711                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3712                         if (val & BNX2_NVM_COMMAND_DONE)
3713                                 break;
3714                 }
3715
3716                 if (j >= NVRAM_TIMEOUT_COUNT)
3717                         return -EBUSY;
3718         }
3719         return 0;
3720 }
3721
3722 static void
3723 bnx2_disable_nvram_write(struct bnx2 *bp)
3724 {
3725         u32 val;
3726
3727         val = REG_RD(bp, BNX2_MISC_CFG);
3728         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3729 }
3730
3731
3732 static void
3733 bnx2_enable_nvram_access(struct bnx2 *bp)
3734 {
3735         u32 val;
3736
3737         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3738         /* Enable both bits, even on read. */
3739         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3740                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3741 }
3742
3743 static void
3744 bnx2_disable_nvram_access(struct bnx2 *bp)
3745 {
3746         u32 val;
3747
3748         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3749         /* Disable both bits, even after read. */
3750         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3751                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3752                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3753 }
3754
3755 static int
3756 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3757 {
3758         u32 cmd;
3759         int j;
3760
3761         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3762                 /* Buffered flash, no erase needed */
3763                 return 0;
3764
3765         /* Build an erase command */
3766         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3767               BNX2_NVM_COMMAND_DOIT;
3768
3769         /* Need to clear DONE bit separately. */
3770         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3771
3772         /* Address of the NVRAM to read from. */
3773         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3774
3775         /* Issue an erase command. */
3776         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3777
3778         /* Wait for completion. */
3779         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3780                 u32 val;
3781
3782                 udelay(5);
3783
3784                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3785                 if (val & BNX2_NVM_COMMAND_DONE)
3786                         break;
3787         }
3788
3789         if (j >= NVRAM_TIMEOUT_COUNT)
3790                 return -EBUSY;
3791
3792         return 0;
3793 }
3794
3795 static int
3796 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3797 {
3798         u32 cmd;
3799         int j;
3800
3801         /* Build the command word. */
3802         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3803
3804         /* Calculate an offset of a buffered flash, not needed for 5709. */
3805         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3806                 offset = ((offset / bp->flash_info->page_size) <<
3807                            bp->flash_info->page_bits) +
3808                           (offset % bp->flash_info->page_size);
3809         }
3810
3811         /* Need to clear DONE bit separately. */
3812         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3813
3814         /* Address of the NVRAM to read from. */
3815         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3816
3817         /* Issue a read command. */
3818         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3819
3820         /* Wait for completion. */
3821         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3822                 u32 val;
3823
3824                 udelay(5);
3825
3826                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3827                 if (val & BNX2_NVM_COMMAND_DONE) {
3828                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3829                         memcpy(ret_val, &v, 4);
3830                         break;
3831                 }
3832         }
3833         if (j >= NVRAM_TIMEOUT_COUNT)
3834                 return -EBUSY;
3835
3836         return 0;
3837 }
3838
3839
3840 static int
3841 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3842 {
3843         u32 cmd;
3844         __be32 val32;
3845         int j;
3846
3847         /* Build the command word. */
3848         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3849
3850         /* Calculate an offset of a buffered flash, not needed for 5709. */
3851         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3852                 offset = ((offset / bp->flash_info->page_size) <<
3853                           bp->flash_info->page_bits) +
3854                          (offset % bp->flash_info->page_size);
3855         }
3856
3857         /* Need to clear DONE bit separately. */
3858         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3859
3860         memcpy(&val32, val, 4);
3861
3862         /* Write the data. */
3863         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3864
3865         /* Address of the NVRAM to write to. */
3866         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3867
3868         /* Issue the write command. */
3869         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3870
3871         /* Wait for completion. */
3872         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3873                 udelay(5);
3874
3875                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3876                         break;
3877         }
3878         if (j >= NVRAM_TIMEOUT_COUNT)
3879                 return -EBUSY;
3880
3881         return 0;
3882 }
3883
3884 static int
3885 bnx2_init_nvram(struct bnx2 *bp)
3886 {
3887         u32 val;
3888         int j, entry_count, rc = 0;
3889         struct flash_spec *flash;
3890
3891         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3892                 bp->flash_info = &flash_5709;
3893                 goto get_flash_size;
3894         }
3895
3896         /* Determine the selected interface. */
3897         val = REG_RD(bp, BNX2_NVM_CFG1);
3898
3899         entry_count = ARRAY_SIZE(flash_table);
3900
3901         if (val & 0x40000000) {
3902
3903                 /* Flash interface has been reconfigured */
3904                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3905                      j++, flash++) {
3906                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3907                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3908                                 bp->flash_info = flash;
3909                                 break;
3910                         }
3911                 }
3912         }
3913         else {
3914                 u32 mask;
3915                 /* Not yet been reconfigured */
3916
3917                 if (val & (1 << 23))
3918                         mask = FLASH_BACKUP_STRAP_MASK;
3919                 else
3920                         mask = FLASH_STRAP_MASK;
3921
3922                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3923                         j++, flash++) {
3924
3925                         if ((val & mask) == (flash->strapping & mask)) {
3926                                 bp->flash_info = flash;
3927
3928                                 /* Request access to the flash interface. */
3929                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3930                                         return rc;
3931
3932                                 /* Enable access to flash interface */
3933                                 bnx2_enable_nvram_access(bp);
3934
3935                                 /* Reconfigure the flash interface */
3936                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3937                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3938                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3939                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3940
3941                                 /* Disable access to flash interface */
3942                                 bnx2_disable_nvram_access(bp);
3943                                 bnx2_release_nvram_lock(bp);
3944
3945                                 break;
3946                         }
3947                 }
3948         } /* if (val & 0x40000000) */
3949
3950         if (j == entry_count) {
3951                 bp->flash_info = NULL;
3952                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3953                 return -ENODEV;
3954         }
3955
3956 get_flash_size:
3957         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3958         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3959         if (val)
3960                 bp->flash_size = val;
3961         else
3962                 bp->flash_size = bp->flash_info->total_size;
3963
3964         return rc;
3965 }
3966
3967 static int
3968 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3969                 int buf_size)
3970 {
3971         int rc = 0;
3972         u32 cmd_flags, offset32, len32, extra;
3973
3974         if (buf_size == 0)
3975                 return 0;
3976
3977         /* Request access to the flash interface. */
3978         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3979                 return rc;
3980
3981         /* Enable access to flash interface */
3982         bnx2_enable_nvram_access(bp);
3983
3984         len32 = buf_size;
3985         offset32 = offset;
3986         extra = 0;
3987
3988         cmd_flags = 0;
3989
3990         if (offset32 & 3) {
3991                 u8 buf[4];
3992                 u32 pre_len;
3993
3994                 offset32 &= ~3;
3995                 pre_len = 4 - (offset & 3);
3996
3997                 if (pre_len >= len32) {
3998                         pre_len = len32;
3999                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4000                                     BNX2_NVM_COMMAND_LAST;
4001                 }
4002                 else {
4003                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4004                 }
4005
4006                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4007
4008                 if (rc)
4009                         return rc;
4010
4011                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4012
4013                 offset32 += 4;
4014                 ret_buf += pre_len;
4015                 len32 -= pre_len;
4016         }
4017         if (len32 & 3) {
4018                 extra = 4 - (len32 & 3);
4019                 len32 = (len32 + 4) & ~3;
4020         }
4021
4022         if (len32 == 4) {
4023                 u8 buf[4];
4024
4025                 if (cmd_flags)
4026                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4027                 else
4028                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4029                                     BNX2_NVM_COMMAND_LAST;
4030
4031                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4032
4033                 memcpy(ret_buf, buf, 4 - extra);
4034         }
4035         else if (len32 > 0) {
4036                 u8 buf[4];
4037
4038                 /* Read the first word. */
4039                 if (cmd_flags)
4040                         cmd_flags = 0;
4041                 else
4042                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4043
4044                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4045
4046                 /* Advance to the next dword. */
4047                 offset32 += 4;
4048                 ret_buf += 4;
4049                 len32 -= 4;
4050
4051                 while (len32 > 4 && rc == 0) {
4052                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4053
4054                         /* Advance to the next dword. */
4055                         offset32 += 4;
4056                         ret_buf += 4;
4057                         len32 -= 4;
4058                 }
4059
4060                 if (rc)
4061                         return rc;
4062
4063                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4064                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4065
4066                 memcpy(ret_buf, buf, 4 - extra);
4067         }
4068
4069         /* Disable access to flash interface */
4070         bnx2_disable_nvram_access(bp);
4071
4072         bnx2_release_nvram_lock(bp);
4073
4074         return rc;
4075 }
4076
4077 static int
4078 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4079                 int buf_size)
4080 {
4081         u32 written, offset32, len32;
4082         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4083         int rc = 0;
4084         int align_start, align_end;
4085
4086         buf = data_buf;
4087         offset32 = offset;
4088         len32 = buf_size;
4089         align_start = align_end = 0;
4090
4091         if ((align_start = (offset32 & 3))) {
4092                 offset32 &= ~3;
4093                 len32 += align_start;
4094                 if (len32 < 4)
4095                         len32 = 4;
4096                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4097                         return rc;
4098         }
4099
4100         if (len32 & 3) {
4101                 align_end = 4 - (len32 & 3);
4102                 len32 += align_end;
4103                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4104                         return rc;
4105         }
4106
4107         if (align_start || align_end) {
4108                 align_buf = kmalloc(len32, GFP_KERNEL);
4109                 if (align_buf == NULL)
4110                         return -ENOMEM;
4111                 if (align_start) {
4112                         memcpy(align_buf, start, 4);
4113                 }
4114                 if (align_end) {
4115                         memcpy(align_buf + len32 - 4, end, 4);
4116                 }
4117                 memcpy(align_buf + align_start, data_buf, buf_size);
4118                 buf = align_buf;
4119         }
4120
4121         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4122                 flash_buffer = kmalloc(264, GFP_KERNEL);
4123                 if (flash_buffer == NULL) {
4124                         rc = -ENOMEM;
4125                         goto nvram_write_end;
4126                 }
4127         }
4128
4129         written = 0;
4130         while ((written < len32) && (rc == 0)) {
4131                 u32 page_start, page_end, data_start, data_end;
4132                 u32 addr, cmd_flags;
4133                 int i;
4134
4135                 /* Find the page_start addr */
4136                 page_start = offset32 + written;
4137                 page_start -= (page_start % bp->flash_info->page_size);
4138                 /* Find the page_end addr */
4139                 page_end = page_start + bp->flash_info->page_size;
4140                 /* Find the data_start addr */
4141                 data_start = (written == 0) ? offset32 : page_start;
4142                 /* Find the data_end addr */
4143                 data_end = (page_end > offset32 + len32) ?
4144                         (offset32 + len32) : page_end;
4145
4146                 /* Request access to the flash interface. */
4147                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4148                         goto nvram_write_end;
4149
4150                 /* Enable access to flash interface */
4151                 bnx2_enable_nvram_access(bp);
4152
4153                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4154                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4155                         int j;
4156
4157                         /* Read the whole page into the buffer
4158                          * (non-buffer flash only) */
4159                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4160                                 if (j == (bp->flash_info->page_size - 4)) {
4161                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4162                                 }
4163                                 rc = bnx2_nvram_read_dword(bp,
4164                                         page_start + j,
4165                                         &flash_buffer[j],
4166                                         cmd_flags);
4167
4168                                 if (rc)
4169                                         goto nvram_write_end;
4170
4171                                 cmd_flags = 0;
4172                         }
4173                 }
4174
4175                 /* Enable writes to flash interface (unlock write-protect) */
4176                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4177                         goto nvram_write_end;
4178
4179                 /* Loop to write back the buffer data from page_start to
4180                  * data_start */
4181                 i = 0;
4182                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4183                         /* Erase the page */
4184                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4185                                 goto nvram_write_end;
4186
4187                         /* Re-enable the write again for the actual write */
4188                         bnx2_enable_nvram_write(bp);
4189
4190                         for (addr = page_start; addr < data_start;
4191                                 addr += 4, i += 4) {
4192
4193                                 rc = bnx2_nvram_write_dword(bp, addr,
4194                                         &flash_buffer[i], cmd_flags);
4195
4196                                 if (rc != 0)
4197                                         goto nvram_write_end;
4198
4199                                 cmd_flags = 0;
4200                         }
4201                 }
4202
4203                 /* Loop to write the new data from data_start to data_end */
4204                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4205                         if ((addr == page_end - 4) ||
4206                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4207                                  (addr == data_end - 4))) {
4208
4209                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4210                         }
4211                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4212                                 cmd_flags);
4213
4214                         if (rc != 0)
4215                                 goto nvram_write_end;
4216
4217                         cmd_flags = 0;
4218                         buf += 4;
4219                 }
4220
4221                 /* Loop to write back the buffer data from data_end
4222                  * to page_end */
4223                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4224                         for (addr = data_end; addr < page_end;
4225                                 addr += 4, i += 4) {
4226
4227                                 if (addr == page_end-4) {
4228                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4229                                 }
4230                                 rc = bnx2_nvram_write_dword(bp, addr,
4231                                         &flash_buffer[i], cmd_flags);
4232
4233                                 if (rc != 0)
4234                                         goto nvram_write_end;
4235
4236                                 cmd_flags = 0;
4237                         }
4238                 }
4239
4240                 /* Disable writes to flash interface (lock write-protect) */
4241                 bnx2_disable_nvram_write(bp);
4242
4243                 /* Disable access to flash interface */
4244                 bnx2_disable_nvram_access(bp);
4245                 bnx2_release_nvram_lock(bp);
4246
4247                 /* Increment written */
4248                 written += data_end - data_start;
4249         }
4250
4251 nvram_write_end:
4252         kfree(flash_buffer);
4253         kfree(align_buf);
4254         return rc;
4255 }
4256
4257 static void
4258 bnx2_init_remote_phy(struct bnx2 *bp)
4259 {
4260         u32 val;
4261
4262         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4263         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4264                 return;
4265
4266         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4267         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4268                 return;
4269
4270         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4271                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4272
4273                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4274                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4275                         bp->phy_port = PORT_FIBRE;
4276                 else
4277                         bp->phy_port = PORT_TP;
4278
4279                 if (netif_running(bp->dev)) {
4280                         u32 sig;
4281
4282                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4283                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4284                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4285                 }
4286         }
4287 }
4288
4289 static void
4290 bnx2_setup_msix_tbl(struct bnx2 *bp)
4291 {
4292         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4293
4294         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4295         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4296 }
4297
4298 static int
4299 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4300 {
4301         u32 val;
4302         int i, rc = 0;
4303         u8 old_port;
4304
4305         /* Wait for the current PCI transaction to complete before
4306          * issuing a reset. */
4307         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4308                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4309                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4310                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4311                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4312         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4313         udelay(5);
4314
4315         /* Wait for the firmware to tell us it is ok to issue a reset. */
4316         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4317
4318         /* Deposit a driver reset signature so the firmware knows that
4319          * this is a soft reset. */
4320         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4321                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4322
4323         /* Do a dummy read to force the chip to complete all current transaction
4324          * before we issue a reset. */
4325         val = REG_RD(bp, BNX2_MISC_ID);
4326
4327         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4328                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4329                 REG_RD(bp, BNX2_MISC_COMMAND);
4330                 udelay(5);
4331
4332                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4333                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4334
4335                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4336
4337         } else {
4338                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4339                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4340                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4341
4342                 /* Chip reset. */
4343                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4344
4345                 /* Reading back any register after chip reset will hang the
4346                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4347                  * of margin for write posting.
4348                  */
4349                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4350                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4351                         msleep(20);
4352
4353                 /* Reset takes approximate 30 usec */
4354                 for (i = 0; i < 10; i++) {
4355                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4356                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4357                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4358                                 break;
4359                         udelay(10);
4360                 }
4361
4362                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4363                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4364                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4365                         return -EBUSY;
4366                 }
4367         }
4368
4369         /* Make sure byte swapping is properly configured. */
4370         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4371         if (val != 0x01020304) {
4372                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4373                 return -ENODEV;
4374         }
4375
4376         /* Wait for the firmware to finish its initialization. */
4377         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4378         if (rc)
4379                 return rc;
4380
4381         spin_lock_bh(&bp->phy_lock);
4382         old_port = bp->phy_port;
4383         bnx2_init_remote_phy(bp);
4384         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4385             old_port != bp->phy_port)
4386                 bnx2_set_default_remote_link(bp);
4387         spin_unlock_bh(&bp->phy_lock);
4388
4389         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4390                 /* Adjust the voltage regular to two steps lower.  The default
4391                  * of this register is 0x0000000e. */
4392                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4393
4394                 /* Remove bad rbuf memory from the free pool. */
4395                 rc = bnx2_alloc_bad_rbuf(bp);
4396         }
4397
4398         if (bp->flags & BNX2_FLAG_USING_MSIX)
4399                 bnx2_setup_msix_tbl(bp);
4400
4401         return rc;
4402 }
4403
4404 static int
4405 bnx2_init_chip(struct bnx2 *bp)
4406 {
4407         u32 val;
4408         int rc, i;
4409
4410         /* Make sure the interrupt is not active. */
4411         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4412
4413         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4414               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4415 #ifdef __BIG_ENDIAN
4416               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4417 #endif
4418               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4419               DMA_READ_CHANS << 12 |
4420               DMA_WRITE_CHANS << 16;
4421
4422         val |= (0x2 << 20) | (1 << 11);
4423
4424         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4425                 val |= (1 << 23);
4426
4427         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4428             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4429                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4430
4431         REG_WR(bp, BNX2_DMA_CONFIG, val);
4432
4433         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4434                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4435                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4436                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4437         }
4438
4439         if (bp->flags & BNX2_FLAG_PCIX) {
4440                 u16 val16;
4441
4442                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4443                                      &val16);
4444                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4445                                       val16 & ~PCI_X_CMD_ERO);
4446         }
4447
4448         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4449                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4450                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4451                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4452
4453         /* Initialize context mapping and zero out the quick contexts.  The
4454          * context block must have already been enabled. */
4455         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4456                 rc = bnx2_init_5709_context(bp);
4457                 if (rc)
4458                         return rc;
4459         } else
4460                 bnx2_init_context(bp);
4461
4462         if ((rc = bnx2_init_cpus(bp)) != 0)
4463                 return rc;
4464
4465         bnx2_init_nvram(bp);
4466
4467         bnx2_set_mac_addr(bp);
4468
4469         val = REG_RD(bp, BNX2_MQ_CONFIG);
4470         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4471         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4472         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4473                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4474
4475         REG_WR(bp, BNX2_MQ_CONFIG, val);
4476
4477         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4478         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4479         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4480
4481         val = (BCM_PAGE_BITS - 8) << 24;
4482         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4483
4484         /* Configure page size. */
4485         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4486         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4487         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4488         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4489
4490         val = bp->mac_addr[0] +
4491               (bp->mac_addr[1] << 8) +
4492               (bp->mac_addr[2] << 16) +
4493               bp->mac_addr[3] +
4494               (bp->mac_addr[4] << 8) +
4495               (bp->mac_addr[5] << 16);
4496         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4497
4498         /* Program the MTU.  Also include 4 bytes for CRC32. */
4499         val = bp->dev->mtu + ETH_HLEN + 4;
4500         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4501                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4502         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4503
4504         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4505                 bp->bnx2_napi[i].last_status_idx = 0;
4506
4507         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4508
4509         /* Set up how to generate a link change interrupt. */
4510         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4511
4512         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4513                (u64) bp->status_blk_mapping & 0xffffffff);
4514         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4515
4516         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4517                (u64) bp->stats_blk_mapping & 0xffffffff);
4518         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4519                (u64) bp->stats_blk_mapping >> 32);
4520
4521         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4522                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4523
4524         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4525                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4526
4527         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4528                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4529
4530         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4531
4532         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4533
4534         REG_WR(bp, BNX2_HC_COM_TICKS,
4535                (bp->com_ticks_int << 16) | bp->com_ticks);
4536
4537         REG_WR(bp, BNX2_HC_CMD_TICKS,
4538                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4539
4540         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4541                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4542         else
4543                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4544         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4545
4546         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4547                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4548         else {
4549                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4550                       BNX2_HC_CONFIG_COLLECT_STATS;
4551         }
4552
4553         if (bp->irq_nvecs > 1) {
4554                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4555                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4556
4557                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4558         }
4559
4560         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4561                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4562
4563         REG_WR(bp, BNX2_HC_CONFIG, val);
4564
4565         for (i = 1; i < bp->irq_nvecs; i++) {
4566                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4567                            BNX2_HC_SB_CONFIG_1;
4568
4569                 REG_WR(bp, base,
4570                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4571                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4572                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4573
4574                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4575                         (bp->tx_quick_cons_trip_int << 16) |
4576                          bp->tx_quick_cons_trip);
4577
4578                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4579                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4580
4581                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4582                        (bp->rx_quick_cons_trip_int << 16) |
4583                         bp->rx_quick_cons_trip);
4584
4585                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4586                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4587         }
4588
4589         /* Clear internal stats counters. */
4590         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4591
4592         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4593
4594         /* Initialize the receive filter. */
4595         bnx2_set_rx_mode(bp->dev);
4596
4597         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4598                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4599                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4600                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4601         }
4602         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4603                           1, 0);
4604
4605         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4606         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4607
4608         udelay(20);
4609
4610         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4611
4612         return rc;
4613 }
4614
4615 static void
4616 bnx2_clear_ring_states(struct bnx2 *bp)
4617 {
4618         struct bnx2_napi *bnapi;
4619         struct bnx2_tx_ring_info *txr;
4620         struct bnx2_rx_ring_info *rxr;
4621         int i;
4622
4623         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4624                 bnapi = &bp->bnx2_napi[i];
4625                 txr = &bnapi->tx_ring;
4626                 rxr = &bnapi->rx_ring;
4627
4628                 txr->tx_cons = 0;
4629                 txr->hw_tx_cons = 0;
4630                 rxr->rx_prod_bseq = 0;
4631                 rxr->rx_prod = 0;
4632                 rxr->rx_cons = 0;
4633                 rxr->rx_pg_prod = 0;
4634                 rxr->rx_pg_cons = 0;
4635         }
4636 }
4637
4638 static void
4639 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4640 {
4641         u32 val, offset0, offset1, offset2, offset3;
4642         u32 cid_addr = GET_CID_ADDR(cid);
4643
4644         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4645                 offset0 = BNX2_L2CTX_TYPE_XI;
4646                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4647                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4648                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4649         } else {
4650                 offset0 = BNX2_L2CTX_TYPE;
4651                 offset1 = BNX2_L2CTX_CMD_TYPE;
4652                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4653                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4654         }
4655         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4656         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4657
4658         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4659         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4660
4661         val = (u64) txr->tx_desc_mapping >> 32;
4662         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4663
4664         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4665         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4666 }
4667
4668 static void
4669 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4670 {
4671         struct tx_bd *txbd;
4672         u32 cid = TX_CID;
4673         struct bnx2_napi *bnapi;
4674         struct bnx2_tx_ring_info *txr;
4675
4676         bnapi = &bp->bnx2_napi[ring_num];
4677         txr = &bnapi->tx_ring;
4678
4679         if (ring_num == 0)
4680                 cid = TX_CID;
4681         else
4682                 cid = TX_TSS_CID + ring_num - 1;
4683
4684         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4685
4686         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4687
4688         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4689         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4690
4691         txr->tx_prod = 0;
4692         txr->tx_prod_bseq = 0;
4693
4694         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4695         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4696
4697         bnx2_init_tx_context(bp, cid, txr);
4698 }
4699
4700 static void
4701 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4702                      int num_rings)
4703 {
4704         int i;
4705         struct rx_bd *rxbd;
4706
4707         for (i = 0; i < num_rings; i++) {
4708                 int j;
4709
4710                 rxbd = &rx_ring[i][0];
4711                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4712                         rxbd->rx_bd_len = buf_size;
4713                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4714                 }
4715                 if (i == (num_rings - 1))
4716                         j = 0;
4717                 else
4718                         j = i + 1;
4719                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4720                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4721         }
4722 }
4723
4724 static void
4725 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4726 {
4727         int i;
4728         u16 prod, ring_prod;
4729         u32 cid, rx_cid_addr, val;
4730         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4731         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4732
4733         if (ring_num == 0)
4734                 cid = RX_CID;
4735         else
4736                 cid = RX_RSS_CID + ring_num - 1;
4737
4738         rx_cid_addr = GET_CID_ADDR(cid);
4739
4740         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4741                              bp->rx_buf_use_size, bp->rx_max_ring);
4742
4743         bnx2_init_rx_context(bp, cid);
4744
4745         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4746                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4747                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4748         }
4749
4750         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4751         if (bp->rx_pg_ring_size) {
4752                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4753                                      rxr->rx_pg_desc_mapping,
4754                                      PAGE_SIZE, bp->rx_max_pg_ring);
4755                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4756                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4757                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4758                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4759
4760                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4761                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4762
4763                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4764                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4765
4766                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4767                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4768         }
4769
4770         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4771         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4772
4773         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4774         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4775
4776         ring_prod = prod = rxr->rx_pg_prod;
4777         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4778                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4779                         break;
4780                 prod = NEXT_RX_BD(prod);
4781                 ring_prod = RX_PG_RING_IDX(prod);
4782         }
4783         rxr->rx_pg_prod = prod;
4784
4785         ring_prod = prod = rxr->rx_prod;
4786         for (i = 0; i < bp->rx_ring_size; i++) {
4787                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4788                         break;
4789                 prod = NEXT_RX_BD(prod);
4790                 ring_prod = RX_RING_IDX(prod);
4791         }
4792         rxr->rx_prod = prod;
4793
4794         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4795         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4796         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4797
4798         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4799         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4800
4801         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4802 }
4803
4804 static void
4805 bnx2_init_all_rings(struct bnx2 *bp)
4806 {
4807         int i;
4808         u32 val;
4809
4810         bnx2_clear_ring_states(bp);
4811
4812         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4813         for (i = 0; i < bp->num_tx_rings; i++)
4814                 bnx2_init_tx_ring(bp, i);
4815
4816         if (bp->num_tx_rings > 1)
4817                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4818                        (TX_TSS_CID << 7));
4819
4820         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4821         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4822
4823         for (i = 0; i < bp->num_rx_rings; i++)
4824                 bnx2_init_rx_ring(bp, i);
4825
4826         if (bp->num_rx_rings > 1) {
4827                 u32 tbl_32;
4828                 u8 *tbl = (u8 *) &tbl_32;
4829
4830                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4831                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4832
4833                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4834                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4835                         if ((i % 4) == 3)
4836                                 bnx2_reg_wr_ind(bp,
4837                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4838                                                 cpu_to_be32(tbl_32));
4839                 }
4840
4841                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4842                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4843
4844                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4845
4846         }
4847 }
4848
4849 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4850 {
4851         u32 max, num_rings = 1;
4852
4853         while (ring_size > MAX_RX_DESC_CNT) {
4854                 ring_size -= MAX_RX_DESC_CNT;
4855                 num_rings++;
4856         }
4857         /* round to next power of 2 */
4858         max = max_size;
4859         while ((max & num_rings) == 0)
4860                 max >>= 1;
4861
4862         if (num_rings != max)
4863                 max <<= 1;
4864
4865         return max;
4866 }
4867
4868 static void
4869 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4870 {
4871         u32 rx_size, rx_space, jumbo_size;
4872
4873         /* 8 for CRC and VLAN */
4874         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4875
4876         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4877                 sizeof(struct skb_shared_info);
4878
4879         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4880         bp->rx_pg_ring_size = 0;
4881         bp->rx_max_pg_ring = 0;
4882         bp->rx_max_pg_ring_idx = 0;
4883         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4884                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4885
4886                 jumbo_size = size * pages;
4887                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4888                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4889
4890                 bp->rx_pg_ring_size = jumbo_size;
4891                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4892                                                         MAX_RX_PG_RINGS);
4893                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4894                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4895                 bp->rx_copy_thresh = 0;
4896         }
4897
4898         bp->rx_buf_use_size = rx_size;
4899         /* hw alignment */
4900         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4901         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4902         bp->rx_ring_size = size;
4903         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4904         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4905 }
4906
4907 static void
4908 bnx2_free_tx_skbs(struct bnx2 *bp)
4909 {
4910         int i;
4911
4912         for (i = 0; i < bp->num_tx_rings; i++) {
4913                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4914                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4915                 int j;
4916
4917                 if (txr->tx_buf_ring == NULL)
4918                         continue;
4919
4920                 for (j = 0; j < TX_DESC_CNT; ) {
4921                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4922                         struct sk_buff *skb = tx_buf->skb;
4923                         int k, last;
4924
4925                         if (skb == NULL) {
4926                                 j++;
4927                                 continue;
4928                         }
4929
4930                         pci_unmap_single(bp->pdev,
4931                                          pci_unmap_addr(tx_buf, mapping),
4932                         skb_headlen(skb), PCI_DMA_TODEVICE);
4933
4934                         tx_buf->skb = NULL;
4935
4936                         last = skb_shinfo(skb)->nr_frags;
4937                         for (k = 0; k < last; k++) {
4938                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4939                                 pci_unmap_page(bp->pdev,
4940                                         pci_unmap_addr(tx_buf, mapping),
4941                                         skb_shinfo(skb)->frags[j].size,
4942                                         PCI_DMA_TODEVICE);
4943                         }
4944                         dev_kfree_skb(skb);
4945                         j += k + 1;
4946                 }
4947         }
4948 }
4949
4950 static void
4951 bnx2_free_rx_skbs(struct bnx2 *bp)
4952 {
4953         int i;
4954
4955         for (i = 0; i < bp->num_rx_rings; i++) {
4956                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4957                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4958                 int j;
4959
4960                 if (rxr->rx_buf_ring == NULL)
4961                         return;
4962
4963                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4964                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4965                         struct sk_buff *skb = rx_buf->skb;
4966
4967                         if (skb == NULL)
4968                                 continue;
4969
4970                         pci_unmap_single(bp->pdev,
4971                                          pci_unmap_addr(rx_buf, mapping),
4972                                          bp->rx_buf_use_size,
4973                                          PCI_DMA_FROMDEVICE);
4974
4975                         rx_buf->skb = NULL;
4976
4977                         dev_kfree_skb(skb);
4978                 }
4979                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4980                         bnx2_free_rx_page(bp, rxr, j);
4981         }
4982 }
4983
4984 static void
4985 bnx2_free_skbs(struct bnx2 *bp)
4986 {
4987         bnx2_free_tx_skbs(bp);
4988         bnx2_free_rx_skbs(bp);
4989 }
4990
4991 static int
4992 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4993 {
4994         int rc;
4995
4996         rc = bnx2_reset_chip(bp, reset_code);
4997         bnx2_free_skbs(bp);
4998         if (rc)
4999                 return rc;
5000
5001         if ((rc = bnx2_init_chip(bp)) != 0)
5002                 return rc;
5003
5004         bnx2_init_all_rings(bp);
5005         return 0;
5006 }
5007
5008 static int
5009 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5010 {
5011         int rc;
5012
5013         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5014                 return rc;
5015
5016         spin_lock_bh(&bp->phy_lock);
5017         bnx2_init_phy(bp, reset_phy);
5018         bnx2_set_link(bp);
5019         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5020                 bnx2_remote_phy_event(bp);
5021         spin_unlock_bh(&bp->phy_lock);
5022         return 0;
5023 }
5024
5025 static int
5026 bnx2_test_registers(struct bnx2 *bp)
5027 {
5028         int ret;
5029         int i, is_5709;
5030         static const struct {
5031                 u16   offset;
5032                 u16   flags;
5033 #define BNX2_FL_NOT_5709        1
5034                 u32   rw_mask;
5035                 u32   ro_mask;
5036         } reg_tbl[] = {
5037                 { 0x006c, 0, 0x00000000, 0x0000003f },
5038                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5039                 { 0x0094, 0, 0x00000000, 0x00000000 },
5040
5041                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5042                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5043                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5044                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5045                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5046                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5047                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5048                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5049                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5050
5051                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5052                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5053                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5054                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5055                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5056                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5057
5058                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5059                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5060                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5061
5062                 { 0x1000, 0, 0x00000000, 0x00000001 },
5063                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5064
5065                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5066                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5067                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5068                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5069                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5070                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5071                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5072                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5073                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5074                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5075
5076                 { 0x1800, 0, 0x00000000, 0x00000001 },
5077                 { 0x1804, 0, 0x00000000, 0x00000003 },
5078
5079                 { 0x2800, 0, 0x00000000, 0x00000001 },
5080                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5081                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5082                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5083                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5084                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5085                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5086                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5087                 { 0x2840, 0, 0x00000000, 0xffffffff },
5088                 { 0x2844, 0, 0x00000000, 0xffffffff },
5089                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5090                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5091
5092                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5093                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5094
5095                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5096                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5097                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5098                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5099                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5100                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5101                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5102                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5103                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5104
5105                 { 0x5004, 0, 0x00000000, 0x0000007f },
5106                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5107
5108                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5109                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5110                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5111                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5112                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5113                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5114                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5115                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5116                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5117
5118                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5119                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5120                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5121                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5122                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5123                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5124                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5125                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5126                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5127                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5128                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5129                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5130                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5131                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5132                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5133                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5134                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5135                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5136                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5137                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5138                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5139                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5140                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5141
5142                 { 0xffff, 0, 0x00000000, 0x00000000 },
5143         };
5144
5145         ret = 0;
5146         is_5709 = 0;
5147         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5148                 is_5709 = 1;
5149
5150         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5151                 u32 offset, rw_mask, ro_mask, save_val, val;
5152                 u16 flags = reg_tbl[i].flags;
5153
5154                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5155                         continue;
5156
5157                 offset = (u32) reg_tbl[i].offset;
5158                 rw_mask = reg_tbl[i].rw_mask;
5159                 ro_mask = reg_tbl[i].ro_mask;
5160
5161                 save_val = readl(bp->regview + offset);
5162
5163                 writel(0, bp->regview + offset);
5164
5165                 val = readl(bp->regview + offset);
5166                 if ((val & rw_mask) != 0) {
5167                         goto reg_test_err;
5168                 }
5169
5170                 if ((val & ro_mask) != (save_val & ro_mask)) {
5171                         goto reg_test_err;
5172                 }
5173
5174                 writel(0xffffffff, bp->regview + offset);
5175
5176                 val = readl(bp->regview + offset);
5177                 if ((val & rw_mask) != rw_mask) {
5178                         goto reg_test_err;
5179                 }
5180
5181                 if ((val & ro_mask) != (save_val & ro_mask)) {
5182                         goto reg_test_err;
5183                 }
5184
5185                 writel(save_val, bp->regview + offset);
5186                 continue;
5187
5188 reg_test_err:
5189                 writel(save_val, bp->regview + offset);
5190                 ret = -ENODEV;
5191                 break;
5192         }
5193         return ret;
5194 }
5195
5196 static int
5197 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5198 {
5199         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5200                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5201         int i;
5202
5203         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5204                 u32 offset;
5205
5206                 for (offset = 0; offset < size; offset += 4) {
5207
5208                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5209
5210                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5211                                 test_pattern[i]) {
5212                                 return -ENODEV;
5213                         }
5214                 }
5215         }
5216         return 0;
5217 }
5218
5219 static int
5220 bnx2_test_memory(struct bnx2 *bp)
5221 {
5222         int ret = 0;
5223         int i;
5224         static struct mem_entry {
5225                 u32   offset;
5226                 u32   len;
5227         } mem_tbl_5706[] = {
5228                 { 0x60000,  0x4000 },
5229                 { 0xa0000,  0x3000 },
5230                 { 0xe0000,  0x4000 },
5231                 { 0x120000, 0x4000 },
5232                 { 0x1a0000, 0x4000 },
5233                 { 0x160000, 0x4000 },
5234                 { 0xffffffff, 0    },
5235         },
5236         mem_tbl_5709[] = {
5237                 { 0x60000,  0x4000 },
5238                 { 0xa0000,  0x3000 },
5239                 { 0xe0000,  0x4000 },
5240                 { 0x120000, 0x4000 },
5241                 { 0x1a0000, 0x4000 },
5242                 { 0xffffffff, 0    },
5243         };
5244         struct mem_entry *mem_tbl;
5245
5246         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5247                 mem_tbl = mem_tbl_5709;
5248         else
5249                 mem_tbl = mem_tbl_5706;
5250
5251         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5252                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5253                         mem_tbl[i].len)) != 0) {
5254                         return ret;
5255                 }
5256         }
5257
5258         return ret;
5259 }
5260
5261 #define BNX2_MAC_LOOPBACK       0
5262 #define BNX2_PHY_LOOPBACK       1
5263
5264 static int
5265 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5266 {
5267         unsigned int pkt_size, num_pkts, i;
5268         struct sk_buff *skb, *rx_skb;
5269         unsigned char *packet;
5270         u16 rx_start_idx, rx_idx;
5271         dma_addr_t map;
5272         struct tx_bd *txbd;
5273         struct sw_bd *rx_buf;
5274         struct l2_fhdr *rx_hdr;
5275         int ret = -ENODEV;
5276         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5277         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5278         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5279
5280         tx_napi = bnapi;
5281
5282         txr = &tx_napi->tx_ring;
5283         rxr = &bnapi->rx_ring;
5284         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5285                 bp->loopback = MAC_LOOPBACK;
5286                 bnx2_set_mac_loopback(bp);
5287         }
5288         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5289                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5290                         return 0;
5291
5292                 bp->loopback = PHY_LOOPBACK;
5293                 bnx2_set_phy_loopback(bp);
5294         }
5295         else
5296                 return -EINVAL;
5297
5298         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5299         skb = netdev_alloc_skb(bp->dev, pkt_size);
5300         if (!skb)
5301                 return -ENOMEM;
5302         packet = skb_put(skb, pkt_size);
5303         memcpy(packet, bp->dev->dev_addr, 6);
5304         memset(packet + 6, 0x0, 8);
5305         for (i = 14; i < pkt_size; i++)
5306                 packet[i] = (unsigned char) (i & 0xff);
5307
5308         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5309                 PCI_DMA_TODEVICE);
5310
5311         REG_WR(bp, BNX2_HC_COMMAND,
5312                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5313
5314         REG_RD(bp, BNX2_HC_COMMAND);
5315
5316         udelay(5);
5317         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5318
5319         num_pkts = 0;
5320
5321         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5322
5323         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5324         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5325         txbd->tx_bd_mss_nbytes = pkt_size;
5326         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5327
5328         num_pkts++;
5329         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5330         txr->tx_prod_bseq += pkt_size;
5331
5332         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5333         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5334
5335         udelay(100);
5336
5337         REG_WR(bp, BNX2_HC_COMMAND,
5338                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5339
5340         REG_RD(bp, BNX2_HC_COMMAND);
5341
5342         udelay(5);
5343
5344         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5345         dev_kfree_skb(skb);
5346
5347         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5348                 goto loopback_test_done;
5349
5350         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5351         if (rx_idx != rx_start_idx + num_pkts) {
5352                 goto loopback_test_done;
5353         }
5354
5355         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5356         rx_skb = rx_buf->skb;
5357
5358         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5359         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5360
5361         pci_dma_sync_single_for_cpu(bp->pdev,
5362                 pci_unmap_addr(rx_buf, mapping),
5363                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5364
5365         if (rx_hdr->l2_fhdr_status &
5366                 (L2_FHDR_ERRORS_BAD_CRC |
5367                 L2_FHDR_ERRORS_PHY_DECODE |
5368                 L2_FHDR_ERRORS_ALIGNMENT |
5369                 L2_FHDR_ERRORS_TOO_SHORT |
5370                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5371
5372                 goto loopback_test_done;
5373         }
5374
5375         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5376                 goto loopback_test_done;
5377         }
5378
5379         for (i = 14; i < pkt_size; i++) {
5380                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5381                         goto loopback_test_done;
5382                 }
5383         }
5384
5385         ret = 0;
5386
5387 loopback_test_done:
5388         bp->loopback = 0;
5389         return ret;
5390 }
5391
5392 #define BNX2_MAC_LOOPBACK_FAILED        1
5393 #define BNX2_PHY_LOOPBACK_FAILED        2
5394 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5395                                          BNX2_PHY_LOOPBACK_FAILED)
5396
5397 static int
5398 bnx2_test_loopback(struct bnx2 *bp)
5399 {
5400         int rc = 0;
5401
5402         if (!netif_running(bp->dev))
5403                 return BNX2_LOOPBACK_FAILED;
5404
5405         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5406         spin_lock_bh(&bp->phy_lock);
5407         bnx2_init_phy(bp, 1);
5408         spin_unlock_bh(&bp->phy_lock);
5409         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5410                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5411         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5412                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5413         return rc;
5414 }
5415
5416 #define NVRAM_SIZE 0x200
5417 #define CRC32_RESIDUAL 0xdebb20e3
5418
5419 static int
5420 bnx2_test_nvram(struct bnx2 *bp)
5421 {
5422         __be32 buf[NVRAM_SIZE / 4];
5423         u8 *data = (u8 *) buf;
5424         int rc = 0;
5425         u32 magic, csum;
5426
5427         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5428                 goto test_nvram_done;
5429
5430         magic = be32_to_cpu(buf[0]);
5431         if (magic != 0x669955aa) {
5432                 rc = -ENODEV;
5433                 goto test_nvram_done;
5434         }
5435
5436         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5437                 goto test_nvram_done;
5438
5439         csum = ether_crc_le(0x100, data);
5440         if (csum != CRC32_RESIDUAL) {
5441                 rc = -ENODEV;
5442                 goto test_nvram_done;
5443         }
5444
5445         csum = ether_crc_le(0x100, data + 0x100);
5446         if (csum != CRC32_RESIDUAL) {
5447                 rc = -ENODEV;
5448         }
5449
5450 test_nvram_done:
5451         return rc;
5452 }
5453
5454 static int
5455 bnx2_test_link(struct bnx2 *bp)
5456 {
5457         u32 bmsr;
5458
5459         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5460                 if (bp->link_up)
5461                         return 0;
5462                 return -ENODEV;
5463         }
5464         spin_lock_bh(&bp->phy_lock);
5465         bnx2_enable_bmsr1(bp);
5466         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5467         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5468         bnx2_disable_bmsr1(bp);
5469         spin_unlock_bh(&bp->phy_lock);
5470
5471         if (bmsr & BMSR_LSTATUS) {
5472                 return 0;
5473         }
5474         return -ENODEV;
5475 }
5476
5477 static int
5478 bnx2_test_intr(struct bnx2 *bp)
5479 {
5480         int i;
5481         u16 status_idx;
5482
5483         if (!netif_running(bp->dev))
5484                 return -ENODEV;
5485
5486         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5487
5488         /* This register is not touched during run-time. */
5489         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5490         REG_RD(bp, BNX2_HC_COMMAND);
5491
5492         for (i = 0; i < 10; i++) {
5493                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5494                         status_idx) {
5495
5496                         break;
5497                 }
5498
5499                 msleep_interruptible(10);
5500         }
5501         if (i < 10)
5502                 return 0;
5503
5504         return -ENODEV;
5505 }
5506
5507 /* Determining link for parallel detection. */
5508 static int
5509 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5510 {
5511         u32 mode_ctl, an_dbg, exp;
5512
5513         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5514                 return 0;
5515
5516         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5517         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5518
5519         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5520                 return 0;
5521
5522         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5523         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5524         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5525
5526         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5527                 return 0;
5528
5529         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5530         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5531         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5532
5533         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5534                 return 0;
5535
5536         return 1;
5537 }
5538
5539 static void
5540 bnx2_5706_serdes_timer(struct bnx2 *bp)
5541 {
5542         int check_link = 1;
5543
5544         spin_lock(&bp->phy_lock);
5545         if (bp->serdes_an_pending) {
5546                 bp->serdes_an_pending--;
5547                 check_link = 0;
5548         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5549                 u32 bmcr;
5550
5551                 bp->current_interval = bp->timer_interval;
5552
5553                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5554
5555                 if (bmcr & BMCR_ANENABLE) {
5556                         if (bnx2_5706_serdes_has_link(bp)) {
5557                                 bmcr &= ~BMCR_ANENABLE;
5558                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5559                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5560                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5561                         }
5562                 }
5563         }
5564         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5565                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5566                 u32 phy2;
5567
5568                 bnx2_write_phy(bp, 0x17, 0x0f01);
5569                 bnx2_read_phy(bp, 0x15, &phy2);
5570                 if (phy2 & 0x20) {
5571                         u32 bmcr;
5572
5573                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5574                         bmcr |= BMCR_ANENABLE;
5575                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5576
5577                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5578                 }
5579         } else
5580                 bp->current_interval = bp->timer_interval;
5581
5582         if (check_link) {
5583                 u32 val;
5584
5585                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5586                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5587                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5588
5589                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5590                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5591                                 bnx2_5706s_force_link_dn(bp, 1);
5592                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5593                         } else
5594                                 bnx2_set_link(bp);
5595                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5596                         bnx2_set_link(bp);
5597         }
5598         spin_unlock(&bp->phy_lock);
5599 }
5600
5601 static void
5602 bnx2_5708_serdes_timer(struct bnx2 *bp)
5603 {
5604         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5605                 return;
5606
5607         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5608                 bp->serdes_an_pending = 0;
5609                 return;
5610         }
5611
5612         spin_lock(&bp->phy_lock);
5613         if (bp->serdes_an_pending)
5614                 bp->serdes_an_pending--;
5615         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5616                 u32 bmcr;
5617
5618                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5619                 if (bmcr & BMCR_ANENABLE) {
5620                         bnx2_enable_forced_2g5(bp);
5621                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5622                 } else {
5623                         bnx2_disable_forced_2g5(bp);
5624                         bp->serdes_an_pending = 2;
5625                         bp->current_interval = bp->timer_interval;
5626                 }
5627
5628         } else
5629                 bp->current_interval = bp->timer_interval;
5630
5631         spin_unlock(&bp->phy_lock);
5632 }
5633
5634 static void
5635 bnx2_timer(unsigned long data)
5636 {
5637         struct bnx2 *bp = (struct bnx2 *) data;
5638
5639         if (!netif_running(bp->dev))
5640                 return;
5641
5642         if (atomic_read(&bp->intr_sem) != 0)
5643                 goto bnx2_restart_timer;
5644
5645         bnx2_send_heart_beat(bp);
5646
5647         bp->stats_blk->stat_FwRxDrop =
5648                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5649
5650         /* workaround occasional corrupted counters */
5651         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5652                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5653                                             BNX2_HC_COMMAND_STATS_NOW);
5654
5655         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5656                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5657                         bnx2_5706_serdes_timer(bp);
5658                 else
5659                         bnx2_5708_serdes_timer(bp);
5660         }
5661
5662 bnx2_restart_timer:
5663         mod_timer(&bp->timer, jiffies + bp->current_interval);
5664 }
5665
5666 static int
5667 bnx2_request_irq(struct bnx2 *bp)
5668 {
5669         unsigned long flags;
5670         struct bnx2_irq *irq;
5671         int rc = 0, i;
5672
5673         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5674                 flags = 0;
5675         else
5676                 flags = IRQF_SHARED;
5677
5678         for (i = 0; i < bp->irq_nvecs; i++) {
5679                 irq = &bp->irq_tbl[i];
5680                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5681                                  &bp->bnx2_napi[i]);
5682                 if (rc)
5683                         break;
5684                 irq->requested = 1;
5685         }
5686         return rc;
5687 }
5688
5689 static void
5690 bnx2_free_irq(struct bnx2 *bp)
5691 {
5692         struct bnx2_irq *irq;
5693         int i;
5694
5695         for (i = 0; i < bp->irq_nvecs; i++) {
5696                 irq = &bp->irq_tbl[i];
5697                 if (irq->requested)
5698                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5699                 irq->requested = 0;
5700         }
5701         if (bp->flags & BNX2_FLAG_USING_MSI)
5702                 pci_disable_msi(bp->pdev);
5703         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5704                 pci_disable_msix(bp->pdev);
5705
5706         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5707 }
5708
5709 static void
5710 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5711 {
5712         int i, rc;
5713         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5714
5715         bnx2_setup_msix_tbl(bp);
5716         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5717         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5718         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5719
5720         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5721                 msix_ent[i].entry = i;
5722                 msix_ent[i].vector = 0;
5723
5724                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5725                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5726         }
5727
5728         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5729         if (rc != 0)
5730                 return;
5731
5732         bp->irq_nvecs = msix_vecs;
5733         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5734         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5735                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5736 }
5737
5738 static void
5739 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5740 {
5741         int cpus = num_online_cpus();
5742         int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5743
5744         bp->irq_tbl[0].handler = bnx2_interrupt;
5745         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5746         bp->irq_nvecs = 1;
5747         bp->irq_tbl[0].vector = bp->pdev->irq;
5748
5749         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5750                 bnx2_enable_msix(bp, msix_vecs);
5751
5752         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5753             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5754                 if (pci_enable_msi(bp->pdev) == 0) {
5755                         bp->flags |= BNX2_FLAG_USING_MSI;
5756                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5757                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5758                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5759                         } else
5760                                 bp->irq_tbl[0].handler = bnx2_msi;
5761
5762                         bp->irq_tbl[0].vector = bp->pdev->irq;
5763                 }
5764         }
5765         bp->num_tx_rings = 1;
5766         bp->num_rx_rings = bp->irq_nvecs;
5767 }
5768
5769 /* Called with rtnl_lock */
5770 static int
5771 bnx2_open(struct net_device *dev)
5772 {
5773         struct bnx2 *bp = netdev_priv(dev);
5774         int rc;
5775
5776         netif_carrier_off(dev);
5777
5778         bnx2_set_power_state(bp, PCI_D0);
5779         bnx2_disable_int(bp);
5780
5781         bnx2_setup_int_mode(bp, disable_msi);
5782         bnx2_napi_enable(bp);
5783         rc = bnx2_alloc_mem(bp);
5784         if (rc)
5785                 goto open_err;
5786
5787         rc = bnx2_request_irq(bp);
5788         if (rc)
5789                 goto open_err;
5790
5791         rc = bnx2_init_nic(bp, 1);
5792         if (rc)
5793                 goto open_err;
5794
5795         mod_timer(&bp->timer, jiffies + bp->current_interval);
5796
5797         atomic_set(&bp->intr_sem, 0);
5798
5799         bnx2_enable_int(bp);
5800
5801         if (bp->flags & BNX2_FLAG_USING_MSI) {
5802                 /* Test MSI to make sure it is working
5803                  * If MSI test fails, go back to INTx mode
5804                  */
5805                 if (bnx2_test_intr(bp) != 0) {
5806                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5807                                " using MSI, switching to INTx mode. Please"
5808                                " report this failure to the PCI maintainer"
5809                                " and include system chipset information.\n",
5810                                bp->dev->name);
5811
5812                         bnx2_disable_int(bp);
5813                         bnx2_free_irq(bp);
5814
5815                         bnx2_setup_int_mode(bp, 1);
5816
5817                         rc = bnx2_init_nic(bp, 0);
5818
5819                         if (!rc)
5820                                 rc = bnx2_request_irq(bp);
5821
5822                         if (rc) {
5823                                 del_timer_sync(&bp->timer);
5824                                 goto open_err;
5825                         }
5826                         bnx2_enable_int(bp);
5827                 }
5828         }
5829         if (bp->flags & BNX2_FLAG_USING_MSI)
5830                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5831         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5832                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5833
5834         netif_start_queue(dev);
5835
5836         return 0;
5837
5838 open_err:
5839         bnx2_napi_disable(bp);
5840         bnx2_free_skbs(bp);
5841         bnx2_free_irq(bp);
5842         bnx2_free_mem(bp);
5843         return rc;
5844 }
5845
5846 static void
5847 bnx2_reset_task(struct work_struct *work)
5848 {
5849         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5850
5851         if (!netif_running(bp->dev))
5852                 return;
5853
5854         bnx2_netif_stop(bp);
5855
5856         bnx2_init_nic(bp, 1);
5857
5858         atomic_set(&bp->intr_sem, 1);
5859         bnx2_netif_start(bp);
5860 }
5861
5862 static void
5863 bnx2_tx_timeout(struct net_device *dev)
5864 {
5865         struct bnx2 *bp = netdev_priv(dev);
5866
5867         /* This allows the netif to be shutdown gracefully before resetting */
5868         schedule_work(&bp->reset_task);
5869 }
5870
5871 #ifdef BCM_VLAN
5872 /* Called with rtnl_lock */
5873 static void
5874 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5875 {
5876         struct bnx2 *bp = netdev_priv(dev);
5877
5878         bnx2_netif_stop(bp);
5879
5880         bp->vlgrp = vlgrp;
5881         bnx2_set_rx_mode(dev);
5882
5883         bnx2_netif_start(bp);
5884 }
5885 #endif
5886
5887 /* Called with netif_tx_lock.
5888  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5889  * netif_wake_queue().
5890  */
5891 static int
5892 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5893 {
5894         struct bnx2 *bp = netdev_priv(dev);
5895         dma_addr_t mapping;
5896         struct tx_bd *txbd;
5897         struct sw_bd *tx_buf;
5898         u32 len, vlan_tag_flags, last_frag, mss;
5899         u16 prod, ring_prod;
5900         int i;
5901         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5902         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5903
5904         if (unlikely(bnx2_tx_avail(bp, txr) <
5905             (skb_shinfo(skb)->nr_frags + 1))) {
5906                 netif_stop_queue(dev);
5907                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5908                         dev->name);
5909
5910                 return NETDEV_TX_BUSY;
5911         }
5912         len = skb_headlen(skb);
5913         prod = txr->tx_prod;
5914         ring_prod = TX_RING_IDX(prod);
5915
5916         vlan_tag_flags = 0;
5917         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5919         }
5920
5921         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5922                 vlan_tag_flags |=
5923                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5924         }
5925         if ((mss = skb_shinfo(skb)->gso_size)) {
5926                 u32 tcp_opt_len, ip_tcp_len;
5927                 struct iphdr *iph;
5928
5929                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5930
5931                 tcp_opt_len = tcp_optlen(skb);
5932
5933                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5934                         u32 tcp_off = skb_transport_offset(skb) -
5935                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5936
5937                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5938                                           TX_BD_FLAGS_SW_FLAGS;
5939                         if (likely(tcp_off == 0))
5940                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5941                         else {
5942                                 tcp_off >>= 3;
5943                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5944                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5945                                                   ((tcp_off & 0x10) <<
5946                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5947                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5948                         }
5949                 } else {
5950                         if (skb_header_cloned(skb) &&
5951                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5952                                 dev_kfree_skb(skb);
5953                                 return NETDEV_TX_OK;
5954                         }
5955
5956                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5957
5958                         iph = ip_hdr(skb);
5959                         iph->check = 0;
5960                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5961                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5962                                                                  iph->daddr, 0,
5963                                                                  IPPROTO_TCP,
5964                                                                  0);
5965                         if (tcp_opt_len || (iph->ihl > 5)) {
5966                                 vlan_tag_flags |= ((iph->ihl - 5) +
5967                                                    (tcp_opt_len >> 2)) << 8;
5968                         }
5969                 }
5970         } else
5971                 mss = 0;
5972
5973         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5974
5975         tx_buf = &txr->tx_buf_ring[ring_prod];
5976         tx_buf->skb = skb;
5977         pci_unmap_addr_set(tx_buf, mapping, mapping);
5978
5979         txbd = &txr->tx_desc_ring[ring_prod];
5980
5981         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5982         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5983         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5984         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5985
5986         last_frag = skb_shinfo(skb)->nr_frags;
5987
5988         for (i = 0; i < last_frag; i++) {
5989                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5990
5991                 prod = NEXT_TX_BD(prod);
5992                 ring_prod = TX_RING_IDX(prod);
5993                 txbd = &txr->tx_desc_ring[ring_prod];
5994
5995                 len = frag->size;
5996                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5997                         len, PCI_DMA_TODEVICE);
5998                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5999                                 mapping, mapping);
6000
6001                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6002                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6003                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6004                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6005
6006         }
6007         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6008
6009         prod = NEXT_TX_BD(prod);
6010         txr->tx_prod_bseq += skb->len;
6011
6012         REG_WR16(bp, txr->tx_bidx_addr, prod);
6013         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6014
6015         mmiowb();
6016
6017         txr->tx_prod = prod;
6018         dev->trans_start = jiffies;
6019
6020         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6021                 netif_stop_queue(dev);
6022                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6023                         netif_wake_queue(dev);
6024         }
6025
6026         return NETDEV_TX_OK;
6027 }
6028
6029 /* Called with rtnl_lock */
6030 static int
6031 bnx2_close(struct net_device *dev)
6032 {
6033         struct bnx2 *bp = netdev_priv(dev);
6034         u32 reset_code;
6035
6036         cancel_work_sync(&bp->reset_task);
6037
6038         bnx2_disable_int_sync(bp);
6039         bnx2_napi_disable(bp);
6040         del_timer_sync(&bp->timer);
6041         if (bp->flags & BNX2_FLAG_NO_WOL)
6042                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6043         else if (bp->wol)
6044                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6045         else
6046                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6047         bnx2_reset_chip(bp, reset_code);
6048         bnx2_free_irq(bp);
6049         bnx2_free_skbs(bp);
6050         bnx2_free_mem(bp);
6051         bp->link_up = 0;
6052         netif_carrier_off(bp->dev);
6053         bnx2_set_power_state(bp, PCI_D3hot);
6054         return 0;
6055 }
6056
6057 #define GET_NET_STATS64(ctr)                                    \
6058         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6059         (unsigned long) (ctr##_lo)
6060
6061 #define GET_NET_STATS32(ctr)            \
6062         (ctr##_lo)
6063
6064 #if (BITS_PER_LONG == 64)
6065 #define GET_NET_STATS   GET_NET_STATS64
6066 #else
6067 #define GET_NET_STATS   GET_NET_STATS32
6068 #endif
6069
6070 static struct net_device_stats *
6071 bnx2_get_stats(struct net_device *dev)
6072 {
6073         struct bnx2 *bp = netdev_priv(dev);
6074         struct statistics_block *stats_blk = bp->stats_blk;
6075         struct net_device_stats *net_stats = &bp->net_stats;
6076
6077         if (bp->stats_blk == NULL) {
6078                 return net_stats;
6079         }
6080         net_stats->rx_packets =
6081                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6082                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6083                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6084
6085         net_stats->tx_packets =
6086                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6087                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6088                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6089
6090         net_stats->rx_bytes =
6091                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6092
6093         net_stats->tx_bytes =
6094                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6095
6096         net_stats->multicast =
6097                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6098
6099         net_stats->collisions =
6100                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6101
6102         net_stats->rx_length_errors =
6103                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6104                 stats_blk->stat_EtherStatsOverrsizePkts);
6105
6106         net_stats->rx_over_errors =
6107                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6108
6109         net_stats->rx_frame_errors =
6110                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6111
6112         net_stats->rx_crc_errors =
6113                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6114
6115         net_stats->rx_errors = net_stats->rx_length_errors +
6116                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6117                 net_stats->rx_crc_errors;
6118
6119         net_stats->tx_aborted_errors =
6120                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6121                 stats_blk->stat_Dot3StatsLateCollisions);
6122
6123         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6124             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6125                 net_stats->tx_carrier_errors = 0;
6126         else {
6127                 net_stats->tx_carrier_errors =
6128                         (unsigned long)
6129                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6130         }
6131
6132         net_stats->tx_errors =
6133                 (unsigned long)
6134                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6135                 +
6136                 net_stats->tx_aborted_errors +
6137                 net_stats->tx_carrier_errors;
6138
6139         net_stats->rx_missed_errors =
6140                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6141                 stats_blk->stat_FwRxDrop);
6142
6143         return net_stats;
6144 }
6145
6146 /* All ethtool functions called with rtnl_lock */
6147
6148 static int
6149 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6150 {
6151         struct bnx2 *bp = netdev_priv(dev);
6152         int support_serdes = 0, support_copper = 0;
6153
6154         cmd->supported = SUPPORTED_Autoneg;
6155         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6156                 support_serdes = 1;
6157                 support_copper = 1;
6158         } else if (bp->phy_port == PORT_FIBRE)
6159                 support_serdes = 1;
6160         else
6161                 support_copper = 1;
6162
6163         if (support_serdes) {
6164                 cmd->supported |= SUPPORTED_1000baseT_Full |
6165                         SUPPORTED_FIBRE;
6166                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6167                         cmd->supported |= SUPPORTED_2500baseX_Full;
6168
6169         }
6170         if (support_copper) {
6171                 cmd->supported |= SUPPORTED_10baseT_Half |
6172                         SUPPORTED_10baseT_Full |
6173                         SUPPORTED_100baseT_Half |
6174                         SUPPORTED_100baseT_Full |
6175                         SUPPORTED_1000baseT_Full |
6176                         SUPPORTED_TP;
6177
6178         }
6179
6180         spin_lock_bh(&bp->phy_lock);
6181         cmd->port = bp->phy_port;
6182         cmd->advertising = bp->advertising;
6183
6184         if (bp->autoneg & AUTONEG_SPEED) {
6185                 cmd->autoneg = AUTONEG_ENABLE;
6186         }
6187         else {
6188                 cmd->autoneg = AUTONEG_DISABLE;
6189         }
6190
6191         if (netif_carrier_ok(dev)) {
6192                 cmd->speed = bp->line_speed;
6193                 cmd->duplex = bp->duplex;
6194         }
6195         else {
6196                 cmd->speed = -1;
6197                 cmd->duplex = -1;
6198         }
6199         spin_unlock_bh(&bp->phy_lock);
6200
6201         cmd->transceiver = XCVR_INTERNAL;
6202         cmd->phy_address = bp->phy_addr;
6203
6204         return 0;
6205 }
6206
6207 static int
6208 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6209 {
6210         struct bnx2 *bp = netdev_priv(dev);
6211         u8 autoneg = bp->autoneg;
6212         u8 req_duplex = bp->req_duplex;
6213         u16 req_line_speed = bp->req_line_speed;
6214         u32 advertising = bp->advertising;
6215         int err = -EINVAL;
6216
6217         spin_lock_bh(&bp->phy_lock);
6218
6219         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6220                 goto err_out_unlock;
6221
6222         if (cmd->port != bp->phy_port &&
6223             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6224                 goto err_out_unlock;
6225
6226         /* If device is down, we can store the settings only if the user
6227          * is setting the currently active port.
6228          */
6229         if (!netif_running(dev) && cmd->port != bp->phy_port)
6230                 goto err_out_unlock;
6231
6232         if (cmd->autoneg == AUTONEG_ENABLE) {
6233                 autoneg |= AUTONEG_SPEED;
6234
6235                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6236
6237                 /* allow advertising 1 speed */
6238                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6239                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6240                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6241                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6242
6243                         if (cmd->port == PORT_FIBRE)
6244                                 goto err_out_unlock;
6245
6246                         advertising = cmd->advertising;
6247
6248                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6249                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6250                             (cmd->port == PORT_TP))
6251                                 goto err_out_unlock;
6252                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6253                         advertising = cmd->advertising;
6254                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6255                         goto err_out_unlock;
6256                 else {
6257                         if (cmd->port == PORT_FIBRE)
6258                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6259                         else
6260                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6261                 }
6262                 advertising |= ADVERTISED_Autoneg;
6263         }
6264         else {
6265                 if (cmd->port == PORT_FIBRE) {
6266                         if ((cmd->speed != SPEED_1000 &&
6267                              cmd->speed != SPEED_2500) ||
6268                             (cmd->duplex != DUPLEX_FULL))
6269                                 goto err_out_unlock;
6270
6271                         if (cmd->speed == SPEED_2500 &&
6272                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6273                                 goto err_out_unlock;
6274                 }
6275                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6276                         goto err_out_unlock;
6277
6278                 autoneg &= ~AUTONEG_SPEED;
6279                 req_line_speed = cmd->speed;
6280                 req_duplex = cmd->duplex;
6281                 advertising = 0;
6282         }
6283
6284         bp->autoneg = autoneg;
6285         bp->advertising = advertising;
6286         bp->req_line_speed = req_line_speed;
6287         bp->req_duplex = req_duplex;
6288
6289         err = 0;
6290         /* If device is down, the new settings will be picked up when it is
6291          * brought up.
6292          */
6293         if (netif_running(dev))
6294                 err = bnx2_setup_phy(bp, cmd->port);
6295
6296 err_out_unlock:
6297         spin_unlock_bh(&bp->phy_lock);
6298
6299         return err;
6300 }
6301
6302 static void
6303 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6304 {
6305         struct bnx2 *bp = netdev_priv(dev);
6306
6307         strcpy(info->driver, DRV_MODULE_NAME);
6308         strcpy(info->version, DRV_MODULE_VERSION);
6309         strcpy(info->bus_info, pci_name(bp->pdev));
6310         strcpy(info->fw_version, bp->fw_version);
6311 }
6312
6313 #define BNX2_REGDUMP_LEN                (32 * 1024)
6314
6315 static int
6316 bnx2_get_regs_len(struct net_device *dev)
6317 {
6318         return BNX2_REGDUMP_LEN;
6319 }
6320
6321 static void
6322 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6323 {
6324         u32 *p = _p, i, offset;
6325         u8 *orig_p = _p;
6326         struct bnx2 *bp = netdev_priv(dev);
6327         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6328                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6329                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6330                                  0x1040, 0x1048, 0x1080, 0x10a4,
6331                                  0x1400, 0x1490, 0x1498, 0x14f0,
6332                                  0x1500, 0x155c, 0x1580, 0x15dc,
6333                                  0x1600, 0x1658, 0x1680, 0x16d8,
6334                                  0x1800, 0x1820, 0x1840, 0x1854,
6335                                  0x1880, 0x1894, 0x1900, 0x1984,
6336                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6337                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6338                                  0x2000, 0x2030, 0x23c0, 0x2400,
6339                                  0x2800, 0x2820, 0x2830, 0x2850,
6340                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6341                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6342                                  0x4080, 0x4090, 0x43c0, 0x4458,
6343                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6344                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6345                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6346                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6347                                  0x6800, 0x6848, 0x684c, 0x6860,
6348                                  0x6888, 0x6910, 0x8000 };
6349
6350         regs->version = 0;
6351
6352         memset(p, 0, BNX2_REGDUMP_LEN);
6353
6354         if (!netif_running(bp->dev))
6355                 return;
6356
6357         i = 0;
6358         offset = reg_boundaries[0];
6359         p += offset;
6360         while (offset < BNX2_REGDUMP_LEN) {
6361                 *p++ = REG_RD(bp, offset);
6362                 offset += 4;
6363                 if (offset == reg_boundaries[i + 1]) {
6364                         offset = reg_boundaries[i + 2];
6365                         p = (u32 *) (orig_p + offset);
6366                         i += 2;
6367                 }
6368         }
6369 }
6370
6371 static void
6372 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6373 {
6374         struct bnx2 *bp = netdev_priv(dev);
6375
6376         if (bp->flags & BNX2_FLAG_NO_WOL) {
6377                 wol->supported = 0;
6378                 wol->wolopts = 0;
6379         }
6380         else {
6381                 wol->supported = WAKE_MAGIC;
6382                 if (bp->wol)
6383                         wol->wolopts = WAKE_MAGIC;
6384                 else
6385                         wol->wolopts = 0;
6386         }
6387         memset(&wol->sopass, 0, sizeof(wol->sopass));
6388 }
6389
6390 static int
6391 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6392 {
6393         struct bnx2 *bp = netdev_priv(dev);
6394
6395         if (wol->wolopts & ~WAKE_MAGIC)
6396                 return -EINVAL;
6397
6398         if (wol->wolopts & WAKE_MAGIC) {
6399                 if (bp->flags & BNX2_FLAG_NO_WOL)
6400                         return -EINVAL;
6401
6402                 bp->wol = 1;
6403         }
6404         else {
6405                 bp->wol = 0;
6406         }
6407         return 0;
6408 }
6409
6410 static int
6411 bnx2_nway_reset(struct net_device *dev)
6412 {
6413         struct bnx2 *bp = netdev_priv(dev);
6414         u32 bmcr;
6415
6416         if (!(bp->autoneg & AUTONEG_SPEED)) {
6417                 return -EINVAL;
6418         }
6419
6420         spin_lock_bh(&bp->phy_lock);
6421
6422         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6423                 int rc;
6424
6425                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6426                 spin_unlock_bh(&bp->phy_lock);
6427                 return rc;
6428         }
6429
6430         /* Force a link down visible on the other side */
6431         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6432                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6433                 spin_unlock_bh(&bp->phy_lock);
6434
6435                 msleep(20);
6436
6437                 spin_lock_bh(&bp->phy_lock);
6438
6439                 bp->current_interval = SERDES_AN_TIMEOUT;
6440                 bp->serdes_an_pending = 1;
6441                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6442         }
6443
6444         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6445         bmcr &= ~BMCR_LOOPBACK;
6446         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6447
6448         spin_unlock_bh(&bp->phy_lock);
6449
6450         return 0;
6451 }
6452
6453 static int
6454 bnx2_get_eeprom_len(struct net_device *dev)
6455 {
6456         struct bnx2 *bp = netdev_priv(dev);
6457
6458         if (bp->flash_info == NULL)
6459                 return 0;
6460
6461         return (int) bp->flash_size;
6462 }
6463
6464 static int
6465 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6466                 u8 *eebuf)
6467 {
6468         struct bnx2 *bp = netdev_priv(dev);
6469         int rc;
6470
6471         /* parameters already validated in ethtool_get_eeprom */
6472
6473         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6474
6475         return rc;
6476 }
6477
6478 static int
6479 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6480                 u8 *eebuf)
6481 {
6482         struct bnx2 *bp = netdev_priv(dev);
6483         int rc;
6484
6485         /* parameters already validated in ethtool_set_eeprom */
6486
6487         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6488
6489         return rc;
6490 }
6491
6492 static int
6493 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6494 {
6495         struct bnx2 *bp = netdev_priv(dev);
6496
6497         memset(coal, 0, sizeof(struct ethtool_coalesce));
6498
6499         coal->rx_coalesce_usecs = bp->rx_ticks;
6500         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6501         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6502         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6503
6504         coal->tx_coalesce_usecs = bp->tx_ticks;
6505         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6506         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6507         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6508
6509         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6510
6511         return 0;
6512 }
6513
6514 static int
6515 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6516 {
6517         struct bnx2 *bp = netdev_priv(dev);
6518
6519         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6520         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6521
6522         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6523         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6524
6525         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6526         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6527
6528         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6529         if (bp->rx_quick_cons_trip_int > 0xff)
6530                 bp->rx_quick_cons_trip_int = 0xff;
6531
6532         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6533         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6534
6535         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6536         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6537
6538         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6539         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6540
6541         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6542         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6543                 0xff;
6544
6545         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6546         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6547                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6548                         bp->stats_ticks = USEC_PER_SEC;
6549         }
6550         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6551                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6552         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6553
6554         if (netif_running(bp->dev)) {
6555                 bnx2_netif_stop(bp);
6556                 bnx2_init_nic(bp, 0);
6557                 bnx2_netif_start(bp);
6558         }
6559
6560         return 0;
6561 }
6562
6563 static void
6564 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6565 {
6566         struct bnx2 *bp = netdev_priv(dev);
6567
6568         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6569         ering->rx_mini_max_pending = 0;
6570         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6571
6572         ering->rx_pending = bp->rx_ring_size;
6573         ering->rx_mini_pending = 0;
6574         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6575
6576         ering->tx_max_pending = MAX_TX_DESC_CNT;
6577         ering->tx_pending = bp->tx_ring_size;
6578 }
6579
6580 static int
6581 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6582 {
6583         if (netif_running(bp->dev)) {
6584                 bnx2_netif_stop(bp);
6585                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6586                 bnx2_free_skbs(bp);
6587                 bnx2_free_mem(bp);
6588         }
6589
6590         bnx2_set_rx_ring_size(bp, rx);
6591         bp->tx_ring_size = tx;
6592
6593         if (netif_running(bp->dev)) {
6594                 int rc;
6595
6596                 rc = bnx2_alloc_mem(bp);
6597                 if (rc)
6598                         return rc;
6599                 bnx2_init_nic(bp, 0);
6600                 bnx2_netif_start(bp);
6601         }
6602         return 0;
6603 }
6604
6605 static int
6606 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6607 {
6608         struct bnx2 *bp = netdev_priv(dev);
6609         int rc;
6610
6611         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6612                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6613                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6614
6615                 return -EINVAL;
6616         }
6617         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6618         return rc;
6619 }
6620
6621 static void
6622 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6623 {
6624         struct bnx2 *bp = netdev_priv(dev);
6625
6626         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6627         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6628         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6629 }
6630
6631 static int
6632 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6633 {
6634         struct bnx2 *bp = netdev_priv(dev);
6635
6636         bp->req_flow_ctrl = 0;
6637         if (epause->rx_pause)
6638                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6639         if (epause->tx_pause)
6640                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6641
6642         if (epause->autoneg) {
6643                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6644         }
6645         else {
6646                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6647         }
6648
6649         spin_lock_bh(&bp->phy_lock);
6650
6651         bnx2_setup_phy(bp, bp->phy_port);
6652
6653         spin_unlock_bh(&bp->phy_lock);
6654
6655         return 0;
6656 }
6657
6658 static u32
6659 bnx2_get_rx_csum(struct net_device *dev)
6660 {
6661         struct bnx2 *bp = netdev_priv(dev);
6662
6663         return bp->rx_csum;
6664 }
6665
6666 static int
6667 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6668 {
6669         struct bnx2 *bp = netdev_priv(dev);
6670
6671         bp->rx_csum = data;
6672         return 0;
6673 }
6674
6675 static int
6676 bnx2_set_tso(struct net_device *dev, u32 data)
6677 {
6678         struct bnx2 *bp = netdev_priv(dev);
6679
6680         if (data) {
6681                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6682                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6683                         dev->features |= NETIF_F_TSO6;
6684         } else
6685                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6686                                    NETIF_F_TSO_ECN);
6687         return 0;
6688 }
6689
6690 #define BNX2_NUM_STATS 46
6691
6692 static struct {
6693         char string[ETH_GSTRING_LEN];
6694 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6695         { "rx_bytes" },
6696         { "rx_error_bytes" },
6697         { "tx_bytes" },
6698         { "tx_error_bytes" },
6699         { "rx_ucast_packets" },
6700         { "rx_mcast_packets" },
6701         { "rx_bcast_packets" },
6702         { "tx_ucast_packets" },
6703         { "tx_mcast_packets" },
6704         { "tx_bcast_packets" },
6705         { "tx_mac_errors" },
6706         { "tx_carrier_errors" },
6707         { "rx_crc_errors" },
6708         { "rx_align_errors" },
6709         { "tx_single_collisions" },
6710         { "tx_multi_collisions" },
6711         { "tx_deferred" },
6712         { "tx_excess_collisions" },
6713         { "tx_late_collisions" },
6714         { "tx_total_collisions" },
6715         { "rx_fragments" },
6716         { "rx_jabbers" },
6717         { "rx_undersize_packets" },
6718         { "rx_oversize_packets" },
6719         { "rx_64_byte_packets" },
6720         { "rx_65_to_127_byte_packets" },
6721         { "rx_128_to_255_byte_packets" },
6722         { "rx_256_to_511_byte_packets" },
6723         { "rx_512_to_1023_byte_packets" },
6724         { "rx_1024_to_1522_byte_packets" },
6725         { "rx_1523_to_9022_byte_packets" },
6726         { "tx_64_byte_packets" },
6727         { "tx_65_to_127_byte_packets" },
6728         { "tx_128_to_255_byte_packets" },
6729         { "tx_256_to_511_byte_packets" },
6730         { "tx_512_to_1023_byte_packets" },
6731         { "tx_1024_to_1522_byte_packets" },
6732         { "tx_1523_to_9022_byte_packets" },
6733         { "rx_xon_frames" },
6734         { "rx_xoff_frames" },
6735         { "tx_xon_frames" },
6736         { "tx_xoff_frames" },
6737         { "rx_mac_ctrl_frames" },
6738         { "rx_filtered_packets" },
6739         { "rx_discards" },
6740         { "rx_fw_discards" },
6741 };
6742
6743 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6744
6745 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6746     STATS_OFFSET32(stat_IfHCInOctets_hi),
6747     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6748     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6749     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6750     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6751     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6752     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6753     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6754     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6755     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6756     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6757     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6758     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6759     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6760     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6761     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6762     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6763     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6764     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6765     STATS_OFFSET32(stat_EtherStatsCollisions),
6766     STATS_OFFSET32(stat_EtherStatsFragments),
6767     STATS_OFFSET32(stat_EtherStatsJabbers),
6768     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6769     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6770     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6771     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6772     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6773     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6774     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6775     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6776     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6777     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6778     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6779     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6780     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6781     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6782     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6783     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6784     STATS_OFFSET32(stat_XonPauseFramesReceived),
6785     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6786     STATS_OFFSET32(stat_OutXonSent),
6787     STATS_OFFSET32(stat_OutXoffSent),
6788     STATS_OFFSET32(stat_MacControlFramesReceived),
6789     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6790     STATS_OFFSET32(stat_IfInMBUFDiscards),
6791     STATS_OFFSET32(stat_FwRxDrop),
6792 };
6793
6794 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6795  * skipped because of errata.
6796  */
6797 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6798         8,0,8,8,8,8,8,8,8,8,
6799         4,0,4,4,4,4,4,4,4,4,
6800         4,4,4,4,4,4,4,4,4,4,
6801         4,4,4,4,4,4,4,4,4,4,
6802         4,4,4,4,4,4,
6803 };
6804
6805 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6806         8,0,8,8,8,8,8,8,8,8,
6807         4,4,4,4,4,4,4,4,4,4,
6808         4,4,4,4,4,4,4,4,4,4,
6809         4,4,4,4,4,4,4,4,4,4,
6810         4,4,4,4,4,4,
6811 };
6812
6813 #define BNX2_NUM_TESTS 6
6814
6815 static struct {
6816         char string[ETH_GSTRING_LEN];
6817 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6818         { "register_test (offline)" },
6819         { "memory_test (offline)" },
6820         { "loopback_test (offline)" },
6821         { "nvram_test (online)" },
6822         { "interrupt_test (online)" },
6823         { "link_test (online)" },
6824 };
6825
6826 static int
6827 bnx2_get_sset_count(struct net_device *dev, int sset)
6828 {
6829         switch (sset) {
6830         case ETH_SS_TEST:
6831                 return BNX2_NUM_TESTS;
6832         case ETH_SS_STATS:
6833                 return BNX2_NUM_STATS;
6834         default:
6835                 return -EOPNOTSUPP;
6836         }
6837 }
6838
6839 static void
6840 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6841 {
6842         struct bnx2 *bp = netdev_priv(dev);
6843
6844         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6845         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6846                 int i;
6847
6848                 bnx2_netif_stop(bp);
6849                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6850                 bnx2_free_skbs(bp);
6851
6852                 if (bnx2_test_registers(bp) != 0) {
6853                         buf[0] = 1;
6854                         etest->flags |= ETH_TEST_FL_FAILED;
6855                 }
6856                 if (bnx2_test_memory(bp) != 0) {
6857                         buf[1] = 1;
6858                         etest->flags |= ETH_TEST_FL_FAILED;
6859                 }
6860                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6861                         etest->flags |= ETH_TEST_FL_FAILED;
6862
6863                 if (!netif_running(bp->dev)) {
6864                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6865                 }
6866                 else {
6867                         bnx2_init_nic(bp, 1);
6868                         bnx2_netif_start(bp);
6869                 }
6870
6871                 /* wait for link up */
6872                 for (i = 0; i < 7; i++) {
6873                         if (bp->link_up)
6874                                 break;
6875                         msleep_interruptible(1000);
6876                 }
6877         }
6878
6879         if (bnx2_test_nvram(bp) != 0) {
6880                 buf[3] = 1;
6881                 etest->flags |= ETH_TEST_FL_FAILED;
6882         }
6883         if (bnx2_test_intr(bp) != 0) {
6884                 buf[4] = 1;
6885                 etest->flags |= ETH_TEST_FL_FAILED;
6886         }
6887
6888         if (bnx2_test_link(bp) != 0) {
6889                 buf[5] = 1;
6890                 etest->flags |= ETH_TEST_FL_FAILED;
6891
6892         }
6893 }
6894
6895 static void
6896 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6897 {
6898         switch (stringset) {
6899         case ETH_SS_STATS:
6900                 memcpy(buf, bnx2_stats_str_arr,
6901                         sizeof(bnx2_stats_str_arr));
6902                 break;
6903         case ETH_SS_TEST:
6904                 memcpy(buf, bnx2_tests_str_arr,
6905                         sizeof(bnx2_tests_str_arr));
6906                 break;
6907         }
6908 }
6909
6910 static void
6911 bnx2_get_ethtool_stats(struct net_device *dev,
6912                 struct ethtool_stats *stats, u64 *buf)
6913 {
6914         struct bnx2 *bp = netdev_priv(dev);
6915         int i;
6916         u32 *hw_stats = (u32 *) bp->stats_blk;
6917         u8 *stats_len_arr = NULL;
6918
6919         if (hw_stats == NULL) {
6920                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6921                 return;
6922         }
6923
6924         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6925             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6926             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6927             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6928                 stats_len_arr = bnx2_5706_stats_len_arr;
6929         else
6930                 stats_len_arr = bnx2_5708_stats_len_arr;
6931
6932         for (i = 0; i < BNX2_NUM_STATS; i++) {
6933                 if (stats_len_arr[i] == 0) {
6934                         /* skip this counter */
6935                         buf[i] = 0;
6936                         continue;
6937                 }
6938                 if (stats_len_arr[i] == 4) {
6939                         /* 4-byte counter */
6940                         buf[i] = (u64)
6941                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6942                         continue;
6943                 }
6944                 /* 8-byte counter */
6945                 buf[i] = (((u64) *(hw_stats +
6946                                         bnx2_stats_offset_arr[i])) << 32) +
6947                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6948         }
6949 }
6950
6951 static int
6952 bnx2_phys_id(struct net_device *dev, u32 data)
6953 {
6954         struct bnx2 *bp = netdev_priv(dev);
6955         int i;
6956         u32 save;
6957
6958         if (data == 0)
6959                 data = 2;
6960
6961         save = REG_RD(bp, BNX2_MISC_CFG);
6962         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6963
6964         for (i = 0; i < (data * 2); i++) {
6965                 if ((i % 2) == 0) {
6966                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6967                 }
6968                 else {
6969                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6970                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6971                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6972                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6973                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6974                                 BNX2_EMAC_LED_TRAFFIC);
6975                 }
6976                 msleep_interruptible(500);
6977                 if (signal_pending(current))
6978                         break;
6979         }
6980         REG_WR(bp, BNX2_EMAC_LED, 0);
6981         REG_WR(bp, BNX2_MISC_CFG, save);
6982         return 0;
6983 }
6984
6985 static int
6986 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6987 {
6988         struct bnx2 *bp = netdev_priv(dev);
6989
6990         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6991                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6992         else
6993                 return (ethtool_op_set_tx_csum(dev, data));
6994 }
6995
6996 static const struct ethtool_ops bnx2_ethtool_ops = {
6997         .get_settings           = bnx2_get_settings,
6998         .set_settings           = bnx2_set_settings,
6999         .get_drvinfo            = bnx2_get_drvinfo,
7000         .get_regs_len           = bnx2_get_regs_len,
7001         .get_regs               = bnx2_get_regs,
7002         .get_wol                = bnx2_get_wol,
7003         .set_wol                = bnx2_set_wol,
7004         .nway_reset             = bnx2_nway_reset,
7005         .get_link               = ethtool_op_get_link,
7006         .get_eeprom_len         = bnx2_get_eeprom_len,
7007         .get_eeprom             = bnx2_get_eeprom,
7008         .set_eeprom             = bnx2_set_eeprom,
7009         .get_coalesce           = bnx2_get_coalesce,
7010         .set_coalesce           = bnx2_set_coalesce,
7011         .get_ringparam          = bnx2_get_ringparam,
7012         .set_ringparam          = bnx2_set_ringparam,
7013         .get_pauseparam         = bnx2_get_pauseparam,
7014         .set_pauseparam         = bnx2_set_pauseparam,
7015         .get_rx_csum            = bnx2_get_rx_csum,
7016         .set_rx_csum            = bnx2_set_rx_csum,
7017         .set_tx_csum            = bnx2_set_tx_csum,
7018         .set_sg                 = ethtool_op_set_sg,
7019         .set_tso                = bnx2_set_tso,
7020         .self_test              = bnx2_self_test,
7021         .get_strings            = bnx2_get_strings,
7022         .phys_id                = bnx2_phys_id,
7023         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7024         .get_sset_count         = bnx2_get_sset_count,
7025 };
7026
7027 /* Called with rtnl_lock */
7028 static int
7029 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7030 {
7031         struct mii_ioctl_data *data = if_mii(ifr);
7032         struct bnx2 *bp = netdev_priv(dev);
7033         int err;
7034
7035         switch(cmd) {
7036         case SIOCGMIIPHY:
7037                 data->phy_id = bp->phy_addr;
7038
7039                 /* fallthru */
7040         case SIOCGMIIREG: {
7041                 u32 mii_regval;
7042
7043                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7044                         return -EOPNOTSUPP;
7045
7046                 if (!netif_running(dev))
7047                         return -EAGAIN;
7048
7049                 spin_lock_bh(&bp->phy_lock);
7050                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7051                 spin_unlock_bh(&bp->phy_lock);
7052
7053                 data->val_out = mii_regval;
7054
7055                 return err;
7056         }
7057
7058         case SIOCSMIIREG:
7059                 if (!capable(CAP_NET_ADMIN))
7060                         return -EPERM;
7061
7062                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7063                         return -EOPNOTSUPP;
7064
7065                 if (!netif_running(dev))
7066                         return -EAGAIN;
7067
7068                 spin_lock_bh(&bp->phy_lock);
7069                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7070                 spin_unlock_bh(&bp->phy_lock);
7071
7072                 return err;
7073
7074         default:
7075                 /* do nothing */
7076                 break;
7077         }
7078         return -EOPNOTSUPP;
7079 }
7080
7081 /* Called with rtnl_lock */
7082 static int
7083 bnx2_change_mac_addr(struct net_device *dev, void *p)
7084 {
7085         struct sockaddr *addr = p;
7086         struct bnx2 *bp = netdev_priv(dev);
7087
7088         if (!is_valid_ether_addr(addr->sa_data))
7089                 return -EINVAL;
7090
7091         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7092         if (netif_running(dev))
7093                 bnx2_set_mac_addr(bp);
7094
7095         return 0;
7096 }
7097
7098 /* Called with rtnl_lock */
7099 static int
7100 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7101 {
7102         struct bnx2 *bp = netdev_priv(dev);
7103
7104         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7105                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7106                 return -EINVAL;
7107
7108         dev->mtu = new_mtu;
7109         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7110 }
7111
7112 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7113 static void
7114 poll_bnx2(struct net_device *dev)
7115 {
7116         struct bnx2 *bp = netdev_priv(dev);
7117
7118         disable_irq(bp->pdev->irq);
7119         bnx2_interrupt(bp->pdev->irq, dev);
7120         enable_irq(bp->pdev->irq);
7121 }
7122 #endif
7123
7124 static void __devinit
7125 bnx2_get_5709_media(struct bnx2 *bp)
7126 {
7127         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7128         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7129         u32 strap;
7130
7131         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7132                 return;
7133         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7134                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7135                 return;
7136         }
7137
7138         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7139                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7140         else
7141                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7142
7143         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7144                 switch (strap) {
7145                 case 0x4:
7146                 case 0x5:
7147                 case 0x6:
7148                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7149                         return;
7150                 }
7151         } else {
7152                 switch (strap) {
7153                 case 0x1:
7154                 case 0x2:
7155                 case 0x4:
7156                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7157                         return;
7158                 }
7159         }
7160 }
7161
7162 static void __devinit
7163 bnx2_get_pci_speed(struct bnx2 *bp)
7164 {
7165         u32 reg;
7166
7167         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7168         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7169                 u32 clkreg;
7170
7171                 bp->flags |= BNX2_FLAG_PCIX;
7172
7173                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7174
7175                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7176                 switch (clkreg) {
7177                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7178                         bp->bus_speed_mhz = 133;
7179                         break;
7180
7181                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7182                         bp->bus_speed_mhz = 100;
7183                         break;
7184
7185                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7186                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7187                         bp->bus_speed_mhz = 66;
7188                         break;
7189
7190                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7191                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7192                         bp->bus_speed_mhz = 50;
7193                         break;
7194
7195                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7196                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7197                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7198                         bp->bus_speed_mhz = 33;
7199                         break;
7200                 }
7201         }
7202         else {
7203                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7204                         bp->bus_speed_mhz = 66;
7205                 else
7206                         bp->bus_speed_mhz = 33;
7207         }
7208
7209         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7210                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7211
7212 }
7213
7214 static int __devinit
7215 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7216 {
7217         struct bnx2 *bp;
7218         unsigned long mem_len;
7219         int rc, i, j;
7220         u32 reg;
7221         u64 dma_mask, persist_dma_mask;
7222
7223         SET_NETDEV_DEV(dev, &pdev->dev);
7224         bp = netdev_priv(dev);
7225
7226         bp->flags = 0;
7227         bp->phy_flags = 0;
7228
7229         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7230         rc = pci_enable_device(pdev);
7231         if (rc) {
7232                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7233                 goto err_out;
7234         }
7235
7236         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7237                 dev_err(&pdev->dev,
7238                         "Cannot find PCI device base address, aborting.\n");
7239                 rc = -ENODEV;
7240                 goto err_out_disable;
7241         }
7242
7243         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7244         if (rc) {
7245                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7246                 goto err_out_disable;
7247         }
7248
7249         pci_set_master(pdev);
7250         pci_save_state(pdev);
7251
7252         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7253         if (bp->pm_cap == 0) {
7254                 dev_err(&pdev->dev,
7255                         "Cannot find power management capability, aborting.\n");
7256                 rc = -EIO;
7257                 goto err_out_release;
7258         }
7259
7260         bp->dev = dev;
7261         bp->pdev = pdev;
7262
7263         spin_lock_init(&bp->phy_lock);
7264         spin_lock_init(&bp->indirect_lock);
7265         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7266
7267         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7268         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7269         dev->mem_end = dev->mem_start + mem_len;
7270         dev->irq = pdev->irq;
7271
7272         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7273
7274         if (!bp->regview) {
7275                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7276                 rc = -ENOMEM;
7277                 goto err_out_release;
7278         }
7279
7280         /* Configure byte swap and enable write to the reg_window registers.
7281          * Rely on CPU to do target byte swapping on big endian systems
7282          * The chip's target access swapping will not swap all accesses
7283          */
7284         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7285                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7286                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7287
7288         bnx2_set_power_state(bp, PCI_D0);
7289
7290         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7291
7292         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7293                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7294                         dev_err(&pdev->dev,
7295                                 "Cannot find PCIE capability, aborting.\n");
7296                         rc = -EIO;
7297                         goto err_out_unmap;
7298                 }
7299                 bp->flags |= BNX2_FLAG_PCIE;
7300                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7301                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7302         } else {
7303                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7304                 if (bp->pcix_cap == 0) {
7305                         dev_err(&pdev->dev,
7306                                 "Cannot find PCIX capability, aborting.\n");
7307                         rc = -EIO;
7308                         goto err_out_unmap;
7309                 }
7310         }
7311
7312         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7313                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7314                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7315         }
7316
7317         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7318                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7319                         bp->flags |= BNX2_FLAG_MSI_CAP;
7320         }
7321
7322         /* 5708 cannot support DMA addresses > 40-bit.  */
7323         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7324                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7325         else
7326                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7327
7328         /* Configure DMA attributes. */
7329         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7330                 dev->features |= NETIF_F_HIGHDMA;
7331                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7332                 if (rc) {
7333                         dev_err(&pdev->dev,
7334                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7335                         goto err_out_unmap;
7336                 }
7337         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7338                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7339                 goto err_out_unmap;
7340         }
7341
7342         if (!(bp->flags & BNX2_FLAG_PCIE))
7343                 bnx2_get_pci_speed(bp);
7344
7345         /* 5706A0 may falsely detect SERR and PERR. */
7346         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7347                 reg = REG_RD(bp, PCI_COMMAND);
7348                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7349                 REG_WR(bp, PCI_COMMAND, reg);
7350         }
7351         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7352                 !(bp->flags & BNX2_FLAG_PCIX)) {
7353
7354                 dev_err(&pdev->dev,
7355                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7356                 goto err_out_unmap;
7357         }
7358
7359         bnx2_init_nvram(bp);
7360
7361         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7362
7363         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7364             BNX2_SHM_HDR_SIGNATURE_SIG) {
7365                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7366
7367                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7368         } else
7369                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7370
7371         /* Get the permanent MAC address.  First we need to make sure the
7372          * firmware is actually running.
7373          */
7374         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7375
7376         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7377             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7378                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7379                 rc = -ENODEV;
7380                 goto err_out_unmap;
7381         }
7382
7383         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7384         for (i = 0, j = 0; i < 3; i++) {
7385                 u8 num, k, skip0;
7386
7387                 num = (u8) (reg >> (24 - (i * 8)));
7388                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7389                         if (num >= k || !skip0 || k == 1) {
7390                                 bp->fw_version[j++] = (num / k) + '0';
7391                                 skip0 = 0;
7392                         }
7393                 }
7394                 if (i != 2)
7395                         bp->fw_version[j++] = '.';
7396         }
7397         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7398         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7399                 bp->wol = 1;
7400
7401         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7402                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7403
7404                 for (i = 0; i < 30; i++) {
7405                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7406                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7407                                 break;
7408                         msleep(10);
7409                 }
7410         }
7411         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7412         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7413         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7414             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7415                 int i;
7416                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7417
7418                 bp->fw_version[j++] = ' ';
7419                 for (i = 0; i < 3; i++) {
7420                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7421                         reg = swab32(reg);
7422                         memcpy(&bp->fw_version[j], &reg, 4);
7423                         j += 4;
7424                 }
7425         }
7426
7427         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7428         bp->mac_addr[0] = (u8) (reg >> 8);
7429         bp->mac_addr[1] = (u8) reg;
7430
7431         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7432         bp->mac_addr[2] = (u8) (reg >> 24);
7433         bp->mac_addr[3] = (u8) (reg >> 16);
7434         bp->mac_addr[4] = (u8) (reg >> 8);
7435         bp->mac_addr[5] = (u8) reg;
7436
7437         bp->tx_ring_size = MAX_TX_DESC_CNT;
7438         bnx2_set_rx_ring_size(bp, 255);
7439
7440         bp->rx_csum = 1;
7441
7442         bp->tx_quick_cons_trip_int = 20;
7443         bp->tx_quick_cons_trip = 20;
7444         bp->tx_ticks_int = 80;
7445         bp->tx_ticks = 80;
7446
7447         bp->rx_quick_cons_trip_int = 6;
7448         bp->rx_quick_cons_trip = 6;
7449         bp->rx_ticks_int = 18;
7450         bp->rx_ticks = 18;
7451
7452         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7453
7454         bp->timer_interval =  HZ;
7455         bp->current_interval =  HZ;
7456
7457         bp->phy_addr = 1;
7458
7459         /* Disable WOL support if we are running on a SERDES chip. */
7460         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7461                 bnx2_get_5709_media(bp);
7462         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7463                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7464
7465         bp->phy_port = PORT_TP;
7466         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7467                 bp->phy_port = PORT_FIBRE;
7468                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7469                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7470                         bp->flags |= BNX2_FLAG_NO_WOL;
7471                         bp->wol = 0;
7472                 }
7473                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7474                         /* Don't do parallel detect on this board because of
7475                          * some board problems.  The link will not go down
7476                          * if we do parallel detect.
7477                          */
7478                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7479                             pdev->subsystem_device == 0x310c)
7480                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7481                 } else {
7482                         bp->phy_addr = 2;
7483                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7484                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7485                 }
7486                 bnx2_init_remote_phy(bp);
7487
7488         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7489                    CHIP_NUM(bp) == CHIP_NUM_5708)
7490                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7491         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7492                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7493                   CHIP_REV(bp) == CHIP_REV_Bx))
7494                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7495
7496         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7497             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7498             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7499                 bp->flags |= BNX2_FLAG_NO_WOL;
7500                 bp->wol = 0;
7501         }
7502
7503         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7504                 bp->tx_quick_cons_trip_int =
7505                         bp->tx_quick_cons_trip;
7506                 bp->tx_ticks_int = bp->tx_ticks;
7507                 bp->rx_quick_cons_trip_int =
7508                         bp->rx_quick_cons_trip;
7509                 bp->rx_ticks_int = bp->rx_ticks;
7510                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7511                 bp->com_ticks_int = bp->com_ticks;
7512                 bp->cmd_ticks_int = bp->cmd_ticks;
7513         }
7514
7515         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7516          *
7517          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7518          * with byte enables disabled on the unused 32-bit word.  This is legal
7519          * but causes problems on the AMD 8132 which will eventually stop
7520          * responding after a while.
7521          *
7522          * AMD believes this incompatibility is unique to the 5706, and
7523          * prefers to locally disable MSI rather than globally disabling it.
7524          */
7525         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7526                 struct pci_dev *amd_8132 = NULL;
7527
7528                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7529                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7530                                                   amd_8132))) {
7531
7532                         if (amd_8132->revision >= 0x10 &&
7533                             amd_8132->revision <= 0x13) {
7534                                 disable_msi = 1;
7535                                 pci_dev_put(amd_8132);
7536                                 break;
7537                         }
7538                 }
7539         }
7540
7541         bnx2_set_default_link(bp);
7542         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7543
7544         init_timer(&bp->timer);
7545         bp->timer.expires = RUN_AT(bp->timer_interval);
7546         bp->timer.data = (unsigned long) bp;
7547         bp->timer.function = bnx2_timer;
7548
7549         return 0;
7550
7551 err_out_unmap:
7552         if (bp->regview) {
7553                 iounmap(bp->regview);
7554                 bp->regview = NULL;
7555         }
7556
7557 err_out_release:
7558         pci_release_regions(pdev);
7559
7560 err_out_disable:
7561         pci_disable_device(pdev);
7562         pci_set_drvdata(pdev, NULL);
7563
7564 err_out:
7565         return rc;
7566 }
7567
7568 static char * __devinit
7569 bnx2_bus_string(struct bnx2 *bp, char *str)
7570 {
7571         char *s = str;
7572
7573         if (bp->flags & BNX2_FLAG_PCIE) {
7574                 s += sprintf(s, "PCI Express");
7575         } else {
7576                 s += sprintf(s, "PCI");
7577                 if (bp->flags & BNX2_FLAG_PCIX)
7578                         s += sprintf(s, "-X");
7579                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7580                         s += sprintf(s, " 32-bit");
7581                 else
7582                         s += sprintf(s, " 64-bit");
7583                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7584         }
7585         return str;
7586 }
7587
7588 static void __devinit
7589 bnx2_init_napi(struct bnx2 *bp)
7590 {
7591         int i;
7592
7593         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7594                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7595                 int (*poll)(struct napi_struct *, int);
7596
7597                 if (i == 0)
7598                         poll = bnx2_poll;
7599                 else
7600                         poll = bnx2_poll_msix;
7601
7602                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7603                 bnapi->bp = bp;
7604         }
7605 }
7606
7607 static int __devinit
7608 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7609 {
7610         static int version_printed = 0;
7611         struct net_device *dev = NULL;
7612         struct bnx2 *bp;
7613         int rc;
7614         char str[40];
7615         DECLARE_MAC_BUF(mac);
7616
7617         if (version_printed++ == 0)
7618                 printk(KERN_INFO "%s", version);
7619
7620         /* dev zeroed in init_etherdev */
7621         dev = alloc_etherdev(sizeof(*bp));
7622
7623         if (!dev)
7624                 return -ENOMEM;
7625
7626         rc = bnx2_init_board(pdev, dev);
7627         if (rc < 0) {
7628                 free_netdev(dev);
7629                 return rc;
7630         }
7631
7632         dev->open = bnx2_open;
7633         dev->hard_start_xmit = bnx2_start_xmit;
7634         dev->stop = bnx2_close;
7635         dev->get_stats = bnx2_get_stats;
7636         dev->set_multicast_list = bnx2_set_rx_mode;
7637         dev->do_ioctl = bnx2_ioctl;
7638         dev->set_mac_address = bnx2_change_mac_addr;
7639         dev->change_mtu = bnx2_change_mtu;
7640         dev->tx_timeout = bnx2_tx_timeout;
7641         dev->watchdog_timeo = TX_TIMEOUT;
7642 #ifdef BCM_VLAN
7643         dev->vlan_rx_register = bnx2_vlan_rx_register;
7644 #endif
7645         dev->ethtool_ops = &bnx2_ethtool_ops;
7646
7647         bp = netdev_priv(dev);
7648         bnx2_init_napi(bp);
7649
7650 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7651         dev->poll_controller = poll_bnx2;
7652 #endif
7653
7654         pci_set_drvdata(pdev, dev);
7655
7656         memcpy(dev->dev_addr, bp->mac_addr, 6);
7657         memcpy(dev->perm_addr, bp->mac_addr, 6);
7658         bp->name = board_info[ent->driver_data].name;
7659
7660         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7661         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7662                 dev->features |= NETIF_F_IPV6_CSUM;
7663
7664 #ifdef BCM_VLAN
7665         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7666 #endif
7667         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7668         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7669                 dev->features |= NETIF_F_TSO6;
7670
7671         if ((rc = register_netdev(dev))) {
7672                 dev_err(&pdev->dev, "Cannot register net device\n");
7673                 if (bp->regview)
7674                         iounmap(bp->regview);
7675                 pci_release_regions(pdev);
7676                 pci_disable_device(pdev);
7677                 pci_set_drvdata(pdev, NULL);
7678                 free_netdev(dev);
7679                 return rc;
7680         }
7681
7682         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7683                 "IRQ %d, node addr %s\n",
7684                 dev->name,
7685                 bp->name,
7686                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7687                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7688                 bnx2_bus_string(bp, str),
7689                 dev->base_addr,
7690                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7691
7692         return 0;
7693 }
7694
7695 static void __devexit
7696 bnx2_remove_one(struct pci_dev *pdev)
7697 {
7698         struct net_device *dev = pci_get_drvdata(pdev);
7699         struct bnx2 *bp = netdev_priv(dev);
7700
7701         flush_scheduled_work();
7702
7703         unregister_netdev(dev);
7704
7705         if (bp->regview)
7706                 iounmap(bp->regview);
7707
7708         free_netdev(dev);
7709         pci_release_regions(pdev);
7710         pci_disable_device(pdev);
7711         pci_set_drvdata(pdev, NULL);
7712 }
7713
7714 static int
7715 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7716 {
7717         struct net_device *dev = pci_get_drvdata(pdev);
7718         struct bnx2 *bp = netdev_priv(dev);
7719         u32 reset_code;
7720
7721         /* PCI register 4 needs to be saved whether netif_running() or not.
7722          * MSI address and data need to be saved if using MSI and
7723          * netif_running().
7724          */
7725         pci_save_state(pdev);
7726         if (!netif_running(dev))
7727                 return 0;
7728
7729         flush_scheduled_work();
7730         bnx2_netif_stop(bp);
7731         netif_device_detach(dev);
7732         del_timer_sync(&bp->timer);
7733         if (bp->flags & BNX2_FLAG_NO_WOL)
7734                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7735         else if (bp->wol)
7736                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7737         else
7738                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7739         bnx2_reset_chip(bp, reset_code);
7740         bnx2_free_skbs(bp);
7741         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7742         return 0;
7743 }
7744
7745 static int
7746 bnx2_resume(struct pci_dev *pdev)
7747 {
7748         struct net_device *dev = pci_get_drvdata(pdev);
7749         struct bnx2 *bp = netdev_priv(dev);
7750
7751         pci_restore_state(pdev);
7752         if (!netif_running(dev))
7753                 return 0;
7754
7755         bnx2_set_power_state(bp, PCI_D0);
7756         netif_device_attach(dev);
7757         bnx2_init_nic(bp, 1);
7758         bnx2_netif_start(bp);
7759         return 0;
7760 }
7761
7762 /**
7763  * bnx2_io_error_detected - called when PCI error is detected
7764  * @pdev: Pointer to PCI device
7765  * @state: The current pci connection state
7766  *
7767  * This function is called after a PCI bus error affecting
7768  * this device has been detected.
7769  */
7770 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7771                                                pci_channel_state_t state)
7772 {
7773         struct net_device *dev = pci_get_drvdata(pdev);
7774         struct bnx2 *bp = netdev_priv(dev);
7775
7776         rtnl_lock();
7777         netif_device_detach(dev);
7778
7779         if (netif_running(dev)) {
7780                 bnx2_netif_stop(bp);
7781                 del_timer_sync(&bp->timer);
7782                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7783         }
7784
7785         pci_disable_device(pdev);
7786         rtnl_unlock();
7787
7788         /* Request a slot slot reset. */
7789         return PCI_ERS_RESULT_NEED_RESET;
7790 }
7791
7792 /**
7793  * bnx2_io_slot_reset - called after the pci bus has been reset.
7794  * @pdev: Pointer to PCI device
7795  *
7796  * Restart the card from scratch, as if from a cold-boot.
7797  */
7798 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7799 {
7800         struct net_device *dev = pci_get_drvdata(pdev);
7801         struct bnx2 *bp = netdev_priv(dev);
7802
7803         rtnl_lock();
7804         if (pci_enable_device(pdev)) {
7805                 dev_err(&pdev->dev,
7806                         "Cannot re-enable PCI device after reset.\n");
7807                 rtnl_unlock();
7808                 return PCI_ERS_RESULT_DISCONNECT;
7809         }
7810         pci_set_master(pdev);
7811         pci_restore_state(pdev);
7812
7813         if (netif_running(dev)) {
7814                 bnx2_set_power_state(bp, PCI_D0);
7815                 bnx2_init_nic(bp, 1);
7816         }
7817
7818         rtnl_unlock();
7819         return PCI_ERS_RESULT_RECOVERED;
7820 }
7821
7822 /**
7823  * bnx2_io_resume - called when traffic can start flowing again.
7824  * @pdev: Pointer to PCI device
7825  *
7826  * This callback is called when the error recovery driver tells us that
7827  * its OK to resume normal operation.
7828  */
7829 static void bnx2_io_resume(struct pci_dev *pdev)
7830 {
7831         struct net_device *dev = pci_get_drvdata(pdev);
7832         struct bnx2 *bp = netdev_priv(dev);
7833
7834         rtnl_lock();
7835         if (netif_running(dev))
7836                 bnx2_netif_start(bp);
7837
7838         netif_device_attach(dev);
7839         rtnl_unlock();
7840 }
7841
7842 static struct pci_error_handlers bnx2_err_handler = {
7843         .error_detected = bnx2_io_error_detected,
7844         .slot_reset     = bnx2_io_slot_reset,
7845         .resume         = bnx2_io_resume,
7846 };
7847
7848 static struct pci_driver bnx2_pci_driver = {
7849         .name           = DRV_MODULE_NAME,
7850         .id_table       = bnx2_pci_tbl,
7851         .probe          = bnx2_init_one,
7852         .remove         = __devexit_p(bnx2_remove_one),
7853         .suspend        = bnx2_suspend,
7854         .resume         = bnx2_resume,
7855         .err_handler    = &bnx2_err_handler,
7856 };
7857
7858 static int __init bnx2_init(void)
7859 {
7860         return pci_register_driver(&bnx2_pci_driver);
7861 }
7862
7863 static void __exit bnx2_cleanup(void)
7864 {
7865         pci_unregister_driver(&bnx2_pci_driver);
7866 }
7867
7868 module_init(bnx2_init);
7869 module_exit(bnx2_cleanup);
7870
7871
7872