bnx2: Allow flexible VLAN tag settings.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.7"
60 #define DRV_MODULE_RELDATE      "June 17, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90         BCM5716,
91 } board_t;
92
93 /* indexed by board_t, above */
94 static struct {
95         char *name;
96 } board_info[] __devinitdata = {
97         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
98         { "HP NC370T Multifunction Gigabit Server Adapter" },
99         { "HP NC370i Multifunction Gigabit Server Adapter" },
100         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
101         { "HP NC370F Multifunction Gigabit Server Adapter" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
103         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
105         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
106         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
107         };
108
109 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
128         { PCI_VENDOR_ID_BROADCOM, 0x163b,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
130         { 0, }
131 };
132
133 static struct flash_spec flash_table[] =
134 {
135 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
136 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
137         /* Slow EEPROM */
138         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
139          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
140          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
141          "EEPROM - slow"},
142         /* Expansion entry 0001 */
143         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
144          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
146          "Entry 0001"},
147         /* Saifun SA25F010 (non-buffered flash) */
148         /* strap, cfg1, & write1 need updates */
149         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
150          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
152          "Non-buffered flash (128kB)"},
153         /* Saifun SA25F020 (non-buffered flash) */
154         /* strap, cfg1, & write1 need updates */
155         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
156          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
157          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
158          "Non-buffered flash (256kB)"},
159         /* Expansion entry 0100 */
160         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0100"},
164         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
165         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
167          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
168          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
169         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
170         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
173          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
174         /* Saifun SA25F005 (non-buffered flash) */
175         /* strap, cfg1, & write1 need updates */
176         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
179          "Non-buffered flash (64kB)"},
180         /* Fast EEPROM */
181         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
182          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
183          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
184          "EEPROM - fast"},
185         /* Expansion entry 1001 */
186         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189          "Entry 1001"},
190         /* Expansion entry 1010 */
191         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194          "Entry 1010"},
195         /* ATMEL AT45DB011B (buffered flash) */
196         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
197          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
199          "Buffered flash (128kB)"},
200         /* Expansion entry 1100 */
201         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1100"},
205         /* Expansion entry 1101 */
206         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1101"},
210         /* Ateml Expansion entry 1110 */
211         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
212          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1110 (Atmel)"},
215         /* ATMEL AT45DB021B (buffered flash) */
216         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
217          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
219          "Buffered flash (256kB)"},
220 };
221
222 static struct flash_spec flash_5709 = {
223         .flags          = BNX2_NV_BUFFERED,
224         .page_bits      = BCM5709_FLASH_PAGE_BITS,
225         .page_size      = BCM5709_FLASH_PAGE_SIZE,
226         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
227         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
228         .name           = "5709 Buffered flash (256kB)",
229 };
230
231 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
232
233 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
234 {
235         u32 diff;
236
237         smp_mb();
238
239         /* The ring uses 256 indices for 255 entries, one of them
240          * needs to be skipped.
241          */
242         diff = txr->tx_prod - txr->tx_cons;
243         if (unlikely(diff >= TX_DESC_CNT)) {
244                 diff &= 0xffff;
245                 if (diff == TX_DESC_CNT)
246                         diff = MAX_TX_DESC_CNT;
247         }
248         return (bp->tx_ring_size - diff);
249 }
250
251 static u32
252 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
253 {
254         u32 val;
255
256         spin_lock_bh(&bp->indirect_lock);
257         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
258         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
259         spin_unlock_bh(&bp->indirect_lock);
260         return val;
261 }
262
263 static void
264 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
265 {
266         spin_lock_bh(&bp->indirect_lock);
267         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
269         spin_unlock_bh(&bp->indirect_lock);
270 }
271
272 static void
273 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
274 {
275         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
276 }
277
278 static u32
279 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
280 {
281         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
282 }
283
284 static void
285 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
286 {
287         offset += cid_addr;
288         spin_lock_bh(&bp->indirect_lock);
289         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
290                 int i;
291
292                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
293                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
294                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
295                 for (i = 0; i < 5; i++) {
296                         u32 val;
297                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299                                 break;
300                         udelay(5);
301                 }
302         } else {
303                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304                 REG_WR(bp, BNX2_CTX_DATA, val);
305         }
306         spin_unlock_bh(&bp->indirect_lock);
307 }
308
309 static int
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 {
312         u32 val1;
313         int i, ret;
314
315         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322                 udelay(40);
323         }
324
325         val1 = (bp->phy_addr << 21) | (reg << 16) |
326                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327                 BNX2_EMAC_MDIO_COMM_START_BUSY;
328         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
329
330         for (i = 0; i < 50; i++) {
331                 udelay(10);
332
333                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
335                         udelay(5);
336
337                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
339
340                         break;
341                 }
342         }
343
344         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
345                 *val = 0x0;
346                 ret = -EBUSY;
347         }
348         else {
349                 *val = val1;
350                 ret = 0;
351         }
352
353         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356
357                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359
360                 udelay(40);
361         }
362
363         return ret;
364 }
365
366 static int
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 {
369         u32 val1;
370         int i, ret;
371
372         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
386
387         for (i = 0; i < 50; i++) {
388                 udelay(10);
389
390                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
392                         udelay(5);
393                         break;
394                 }
395         }
396
397         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398                 ret = -EBUSY;
399         else
400                 ret = 0;
401
402         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
405
406                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408
409                 udelay(40);
410         }
411
412         return ret;
413 }
414
415 static void
416 bnx2_disable_int(struct bnx2 *bp)
417 {
418         int i;
419         struct bnx2_napi *bnapi;
420
421         for (i = 0; i < bp->irq_nvecs; i++) {
422                 bnapi = &bp->bnx2_napi[i];
423                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
425         }
426         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
427 }
428
429 static void
430 bnx2_enable_int(struct bnx2 *bp)
431 {
432         int i;
433         struct bnx2_napi *bnapi;
434
435         for (i = 0; i < bp->irq_nvecs; i++) {
436                 bnapi = &bp->bnx2_napi[i];
437
438                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441                        bnapi->last_status_idx);
442
443                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445                        bnapi->last_status_idx);
446         }
447         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
448 }
449
450 static void
451 bnx2_disable_int_sync(struct bnx2 *bp)
452 {
453         int i;
454
455         atomic_inc(&bp->intr_sem);
456         bnx2_disable_int(bp);
457         for (i = 0; i < bp->irq_nvecs; i++)
458                 synchronize_irq(bp->irq_tbl[i].vector);
459 }
460
461 static void
462 bnx2_napi_disable(struct bnx2 *bp)
463 {
464         int i;
465
466         for (i = 0; i < bp->irq_nvecs; i++)
467                 napi_disable(&bp->bnx2_napi[i].napi);
468 }
469
470 static void
471 bnx2_napi_enable(struct bnx2 *bp)
472 {
473         int i;
474
475         for (i = 0; i < bp->irq_nvecs; i++)
476                 napi_enable(&bp->bnx2_napi[i].napi);
477 }
478
479 static void
480 bnx2_netif_stop(struct bnx2 *bp)
481 {
482         bnx2_disable_int_sync(bp);
483         if (netif_running(bp->dev)) {
484                 bnx2_napi_disable(bp);
485                 netif_tx_disable(bp->dev);
486                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487         }
488 }
489
490 static void
491 bnx2_netif_start(struct bnx2 *bp)
492 {
493         if (atomic_dec_and_test(&bp->intr_sem)) {
494                 if (netif_running(bp->dev)) {
495                         netif_wake_queue(bp->dev);
496                         bnx2_napi_enable(bp);
497                         bnx2_enable_int(bp);
498                 }
499         }
500 }
501
502 static void
503 bnx2_free_tx_mem(struct bnx2 *bp)
504 {
505         int i;
506
507         for (i = 0; i < bp->num_tx_rings; i++) {
508                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
510
511                 if (txr->tx_desc_ring) {
512                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513                                             txr->tx_desc_ring,
514                                             txr->tx_desc_mapping);
515                         txr->tx_desc_ring = NULL;
516                 }
517                 kfree(txr->tx_buf_ring);
518                 txr->tx_buf_ring = NULL;
519         }
520 }
521
522 static void
523 bnx2_free_rx_mem(struct bnx2 *bp)
524 {
525         int i;
526
527         for (i = 0; i < bp->num_rx_rings; i++) {
528                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530                 int j;
531
532                 for (j = 0; j < bp->rx_max_ring; j++) {
533                         if (rxr->rx_desc_ring[j])
534                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535                                                     rxr->rx_desc_ring[j],
536                                                     rxr->rx_desc_mapping[j]);
537                         rxr->rx_desc_ring[j] = NULL;
538                 }
539                 if (rxr->rx_buf_ring)
540                         vfree(rxr->rx_buf_ring);
541                 rxr->rx_buf_ring = NULL;
542
543                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544                         if (rxr->rx_pg_desc_ring[j])
545                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546                                                     rxr->rx_pg_desc_ring[i],
547                                                     rxr->rx_pg_desc_mapping[i]);
548                         rxr->rx_pg_desc_ring[i] = NULL;
549                 }
550                 if (rxr->rx_pg_ring)
551                         vfree(rxr->rx_pg_ring);
552                 rxr->rx_pg_ring = NULL;
553         }
554 }
555
556 static int
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
558 {
559         int i;
560
561         for (i = 0; i < bp->num_tx_rings; i++) {
562                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
564
565                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566                 if (txr->tx_buf_ring == NULL)
567                         return -ENOMEM;
568
569                 txr->tx_desc_ring =
570                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571                                              &txr->tx_desc_mapping);
572                 if (txr->tx_desc_ring == NULL)
573                         return -ENOMEM;
574         }
575         return 0;
576 }
577
578 static int
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
580 {
581         int i;
582
583         for (i = 0; i < bp->num_rx_rings; i++) {
584                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586                 int j;
587
588                 rxr->rx_buf_ring =
589                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590                 if (rxr->rx_buf_ring == NULL)
591                         return -ENOMEM;
592
593                 memset(rxr->rx_buf_ring, 0,
594                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
595
596                 for (j = 0; j < bp->rx_max_ring; j++) {
597                         rxr->rx_desc_ring[j] =
598                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599                                                      &rxr->rx_desc_mapping[j]);
600                         if (rxr->rx_desc_ring[j] == NULL)
601                                 return -ENOMEM;
602
603                 }
604
605                 if (bp->rx_pg_ring_size) {
606                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
607                                                   bp->rx_max_pg_ring);
608                         if (rxr->rx_pg_ring == NULL)
609                                 return -ENOMEM;
610
611                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612                                bp->rx_max_pg_ring);
613                 }
614
615                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616                         rxr->rx_pg_desc_ring[j] =
617                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618                                                 &rxr->rx_pg_desc_mapping[j]);
619                         if (rxr->rx_pg_desc_ring[j] == NULL)
620                                 return -ENOMEM;
621
622                 }
623         }
624         return 0;
625 }
626
627 static void
628 bnx2_free_mem(struct bnx2 *bp)
629 {
630         int i;
631         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
632
633         bnx2_free_tx_mem(bp);
634         bnx2_free_rx_mem(bp);
635
636         for (i = 0; i < bp->ctx_pages; i++) {
637                 if (bp->ctx_blk[i]) {
638                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639                                             bp->ctx_blk[i],
640                                             bp->ctx_blk_mapping[i]);
641                         bp->ctx_blk[i] = NULL;
642                 }
643         }
644         if (bnapi->status_blk.msi) {
645                 pci_free_consistent(bp->pdev, bp->status_stats_size,
646                                     bnapi->status_blk.msi,
647                                     bp->status_blk_mapping);
648                 bnapi->status_blk.msi = NULL;
649                 bp->stats_blk = NULL;
650         }
651 }
652
653 static int
654 bnx2_alloc_mem(struct bnx2 *bp)
655 {
656         int i, status_blk_size, err;
657         struct bnx2_napi *bnapi;
658         void *status_blk;
659
660         /* Combine status and statistics blocks into one allocation. */
661         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662         if (bp->flags & BNX2_FLAG_MSIX_CAP)
663                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
665         bp->status_stats_size = status_blk_size +
666                                 sizeof(struct statistics_block);
667
668         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669                                           &bp->status_blk_mapping);
670         if (status_blk == NULL)
671                 goto alloc_mem_err;
672
673         memset(status_blk, 0, bp->status_stats_size);
674
675         bnapi = &bp->bnx2_napi[0];
676         bnapi->status_blk.msi = status_blk;
677         bnapi->hw_tx_cons_ptr =
678                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679         bnapi->hw_rx_cons_ptr =
680                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683                         struct status_block_msix *sblk;
684
685                         bnapi = &bp->bnx2_napi[i];
686
687                         sblk = (void *) (status_blk +
688                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689                         bnapi->status_blk.msix = sblk;
690                         bnapi->hw_tx_cons_ptr =
691                                 &sblk->status_tx_quick_consumer_index;
692                         bnapi->hw_rx_cons_ptr =
693                                 &sblk->status_rx_quick_consumer_index;
694                         bnapi->int_num = i << 24;
695                 }
696         }
697
698         bp->stats_blk = status_blk + status_blk_size;
699
700         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
701
702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704                 if (bp->ctx_pages == 0)
705                         bp->ctx_pages = 1;
706                 for (i = 0; i < bp->ctx_pages; i++) {
707                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
708                                                 BCM_PAGE_SIZE,
709                                                 &bp->ctx_blk_mapping[i]);
710                         if (bp->ctx_blk[i] == NULL)
711                                 goto alloc_mem_err;
712                 }
713         }
714
715         err = bnx2_alloc_rx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         err = bnx2_alloc_tx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         return 0;
724
725 alloc_mem_err:
726         bnx2_free_mem(bp);
727         return -ENOMEM;
728 }
729
730 static void
731 bnx2_report_fw_link(struct bnx2 *bp)
732 {
733         u32 fw_link_status = 0;
734
735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
736                 return;
737
738         if (bp->link_up) {
739                 u32 bmsr;
740
741                 switch (bp->line_speed) {
742                 case SPEED_10:
743                         if (bp->duplex == DUPLEX_HALF)
744                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
745                         else
746                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
747                         break;
748                 case SPEED_100:
749                         if (bp->duplex == DUPLEX_HALF)
750                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
751                         else
752                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
753                         break;
754                 case SPEED_1000:
755                         if (bp->duplex == DUPLEX_HALF)
756                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
757                         else
758                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
759                         break;
760                 case SPEED_2500:
761                         if (bp->duplex == DUPLEX_HALF)
762                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
763                         else
764                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
765                         break;
766                 }
767
768                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
769
770                 if (bp->autoneg) {
771                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
772
773                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
775
776                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
779                         else
780                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
781                 }
782         }
783         else
784                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
785
786         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
787 }
788
789 static char *
790 bnx2_xceiver_str(struct bnx2 *bp)
791 {
792         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
794                  "Copper"));
795 }
796
797 static void
798 bnx2_report_link(struct bnx2 *bp)
799 {
800         if (bp->link_up) {
801                 netif_carrier_on(bp->dev);
802                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803                        bnx2_xceiver_str(bp));
804
805                 printk("%d Mbps ", bp->line_speed);
806
807                 if (bp->duplex == DUPLEX_FULL)
808                         printk("full duplex");
809                 else
810                         printk("half duplex");
811
812                 if (bp->flow_ctrl) {
813                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
814                                 printk(", receive ");
815                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
816                                         printk("& transmit ");
817                         }
818                         else {
819                                 printk(", transmit ");
820                         }
821                         printk("flow control ON");
822                 }
823                 printk("\n");
824         }
825         else {
826                 netif_carrier_off(bp->dev);
827                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828                        bnx2_xceiver_str(bp));
829         }
830
831         bnx2_report_fw_link(bp);
832 }
833
834 static void
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
836 {
837         u32 local_adv, remote_adv;
838
839         bp->flow_ctrl = 0;
840         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
842
843                 if (bp->duplex == DUPLEX_FULL) {
844                         bp->flow_ctrl = bp->req_flow_ctrl;
845                 }
846                 return;
847         }
848
849         if (bp->duplex != DUPLEX_FULL) {
850                 return;
851         }
852
853         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
855                 u32 val;
856
857                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859                         bp->flow_ctrl |= FLOW_CTRL_TX;
860                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861                         bp->flow_ctrl |= FLOW_CTRL_RX;
862                 return;
863         }
864
865         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
867
868         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869                 u32 new_local_adv = 0;
870                 u32 new_remote_adv = 0;
871
872                 if (local_adv & ADVERTISE_1000XPAUSE)
873                         new_local_adv |= ADVERTISE_PAUSE_CAP;
874                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
876                 if (remote_adv & ADVERTISE_1000XPAUSE)
877                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
878                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
880
881                 local_adv = new_local_adv;
882                 remote_adv = new_remote_adv;
883         }
884
885         /* See Table 28B-3 of 802.3ab-1999 spec. */
886         if (local_adv & ADVERTISE_PAUSE_CAP) {
887                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
889                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
890                         }
891                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892                                 bp->flow_ctrl = FLOW_CTRL_RX;
893                         }
894                 }
895                 else {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                 }
900         }
901         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
904
905                         bp->flow_ctrl = FLOW_CTRL_TX;
906                 }
907         }
908 }
909
910 static int
911 bnx2_5709s_linkup(struct bnx2 *bp)
912 {
913         u32 val, speed;
914
915         bp->link_up = 1;
916
917         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
920
921         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922                 bp->line_speed = bp->req_line_speed;
923                 bp->duplex = bp->req_duplex;
924                 return 0;
925         }
926         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
927         switch (speed) {
928                 case MII_BNX2_GP_TOP_AN_SPEED_10:
929                         bp->line_speed = SPEED_10;
930                         break;
931                 case MII_BNX2_GP_TOP_AN_SPEED_100:
932                         bp->line_speed = SPEED_100;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936                         bp->line_speed = SPEED_1000;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939                         bp->line_speed = SPEED_2500;
940                         break;
941         }
942         if (val & MII_BNX2_GP_TOP_AN_FD)
943                 bp->duplex = DUPLEX_FULL;
944         else
945                 bp->duplex = DUPLEX_HALF;
946         return 0;
947 }
948
949 static int
950 bnx2_5708s_linkup(struct bnx2 *bp)
951 {
952         u32 val;
953
954         bp->link_up = 1;
955         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957                 case BCM5708S_1000X_STAT1_SPEED_10:
958                         bp->line_speed = SPEED_10;
959                         break;
960                 case BCM5708S_1000X_STAT1_SPEED_100:
961                         bp->line_speed = SPEED_100;
962                         break;
963                 case BCM5708S_1000X_STAT1_SPEED_1G:
964                         bp->line_speed = SPEED_1000;
965                         break;
966                 case BCM5708S_1000X_STAT1_SPEED_2G5:
967                         bp->line_speed = SPEED_2500;
968                         break;
969         }
970         if (val & BCM5708S_1000X_STAT1_FD)
971                 bp->duplex = DUPLEX_FULL;
972         else
973                 bp->duplex = DUPLEX_HALF;
974
975         return 0;
976 }
977
978 static int
979 bnx2_5706s_linkup(struct bnx2 *bp)
980 {
981         u32 bmcr, local_adv, remote_adv, common;
982
983         bp->link_up = 1;
984         bp->line_speed = SPEED_1000;
985
986         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987         if (bmcr & BMCR_FULLDPLX) {
988                 bp->duplex = DUPLEX_FULL;
989         }
990         else {
991                 bp->duplex = DUPLEX_HALF;
992         }
993
994         if (!(bmcr & BMCR_ANENABLE)) {
995                 return 0;
996         }
997
998         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1000
1001         common = local_adv & remote_adv;
1002         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1003
1004                 if (common & ADVERTISE_1000XFULL) {
1005                         bp->duplex = DUPLEX_FULL;
1006                 }
1007                 else {
1008                         bp->duplex = DUPLEX_HALF;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 bnx2_copper_linkup(struct bnx2 *bp)
1017 {
1018         u32 bmcr;
1019
1020         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021         if (bmcr & BMCR_ANENABLE) {
1022                 u32 local_adv, remote_adv, common;
1023
1024                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1026
1027                 common = local_adv & (remote_adv >> 2);
1028                 if (common & ADVERTISE_1000FULL) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_FULL;
1031                 }
1032                 else if (common & ADVERTISE_1000HALF) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_HALF;
1035                 }
1036                 else {
1037                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1039
1040                         common = local_adv & remote_adv;
1041                         if (common & ADVERTISE_100FULL) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_FULL;
1044                         }
1045                         else if (common & ADVERTISE_100HALF) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_HALF;
1048                         }
1049                         else if (common & ADVERTISE_10FULL) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_10HALF) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else {
1058                                 bp->line_speed = 0;
1059                                 bp->link_up = 0;
1060                         }
1061                 }
1062         }
1063         else {
1064                 if (bmcr & BMCR_SPEED100) {
1065                         bp->line_speed = SPEED_100;
1066                 }
1067                 else {
1068                         bp->line_speed = SPEED_10;
1069                 }
1070                 if (bmcr & BMCR_FULLDPLX) {
1071                         bp->duplex = DUPLEX_FULL;
1072                 }
1073                 else {
1074                         bp->duplex = DUPLEX_HALF;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static void
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1083 {
1084         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1085
1086         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1088         val |= 0x02 << 8;
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091                 u32 lo_water, hi_water;
1092
1093                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1095                 else
1096                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097                 if (lo_water >= bp->rx_ring_size)
1098                         lo_water = 0;
1099
1100                 hi_water = bp->rx_ring_size / 4;
1101
1102                 if (hi_water <= lo_water)
1103                         lo_water = 0;
1104
1105                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1107
1108                 if (hi_water > 0xf)
1109                         hi_water = 0xf;
1110                 else if (hi_water == 0)
1111                         lo_water = 0;
1112                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1113         }
1114         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1115 }
1116
1117 static void
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119 {
1120         int i;
1121         u32 cid;
1122
1123         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124                 if (i == 1)
1125                         cid = RX_RSS_CID;
1126                 bnx2_init_rx_context(bp, cid);
1127         }
1128 }
1129
1130 static int
1131 bnx2_set_mac_link(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137                 (bp->duplex == DUPLEX_HALF)) {
1138                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1139         }
1140
1141         /* Configure the EMAC mode register. */
1142         val = REG_RD(bp, BNX2_EMAC_MODE);
1143
1144         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146                 BNX2_EMAC_MODE_25G_MODE);
1147
1148         if (bp->link_up) {
1149                 switch (bp->line_speed) {
1150                         case SPEED_10:
1151                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1153                                         break;
1154                                 }
1155                                 /* fall through */
1156                         case SPEED_100:
1157                                 val |= BNX2_EMAC_MODE_PORT_MII;
1158                                 break;
1159                         case SPEED_2500:
1160                                 val |= BNX2_EMAC_MODE_25G_MODE;
1161                                 /* fall through */
1162                         case SPEED_1000:
1163                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1164                                 break;
1165                 }
1166         }
1167         else {
1168                 val |= BNX2_EMAC_MODE_PORT_GMII;
1169         }
1170
1171         /* Set the MAC to operate in the appropriate duplex mode. */
1172         if (bp->duplex == DUPLEX_HALF)
1173                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174         REG_WR(bp, BNX2_EMAC_MODE, val);
1175
1176         /* Enable/disable rx PAUSE. */
1177         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1178
1179         if (bp->flow_ctrl & FLOW_CTRL_RX)
1180                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1182
1183         /* Enable/disable tx PAUSE. */
1184         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_TX)
1188                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1190
1191         /* Acknowledge the interrupt. */
1192         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1193
1194         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195                 bnx2_init_all_rx_contexts(bp);
1196
1197         return 0;
1198 }
1199
1200 static void
1201 bnx2_enable_bmsr1(struct bnx2 *bp)
1202 {
1203         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1204             (CHIP_NUM(bp) == CHIP_NUM_5709))
1205                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206                                MII_BNX2_BLK_ADDR_GP_STATUS);
1207 }
1208
1209 static void
1210 bnx2_disable_bmsr1(struct bnx2 *bp)
1211 {
1212         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1213             (CHIP_NUM(bp) == CHIP_NUM_5709))
1214                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1216 }
1217
1218 static int
1219 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1220 {
1221         u32 up1;
1222         int ret = 1;
1223
1224         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1225                 return 0;
1226
1227         if (bp->autoneg & AUTONEG_SPEED)
1228                 bp->advertising |= ADVERTISED_2500baseX_Full;
1229
1230         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1232
1233         bnx2_read_phy(bp, bp->mii_up1, &up1);
1234         if (!(up1 & BCM5708S_UP1_2G5)) {
1235                 up1 |= BCM5708S_UP1_2G5;
1236                 bnx2_write_phy(bp, bp->mii_up1, up1);
1237                 ret = 0;
1238         }
1239
1240         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1243
1244         return ret;
1245 }
1246
1247 static int
1248 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1249 {
1250         u32 up1;
1251         int ret = 0;
1252
1253         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1254                 return 0;
1255
1256         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1258
1259         bnx2_read_phy(bp, bp->mii_up1, &up1);
1260         if (up1 & BCM5708S_UP1_2G5) {
1261                 up1 &= ~BCM5708S_UP1_2G5;
1262                 bnx2_write_phy(bp, bp->mii_up1, up1);
1263                 ret = 1;
1264         }
1265
1266         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1269
1270         return ret;
1271 }
1272
1273 static void
1274 bnx2_enable_forced_2g5(struct bnx2 *bp)
1275 {
1276         u32 bmcr;
1277
1278         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1279                 return;
1280
1281         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1282                 u32 val;
1283
1284                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1286                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1290
1291                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1294
1295         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1296                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1298         }
1299
1300         if (bp->autoneg & AUTONEG_SPEED) {
1301                 bmcr &= ~BMCR_ANENABLE;
1302                 if (bp->req_duplex == DUPLEX_FULL)
1303                         bmcr |= BMCR_FULLDPLX;
1304         }
1305         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1306 }
1307
1308 static void
1309 bnx2_disable_forced_2g5(struct bnx2 *bp)
1310 {
1311         u32 bmcr;
1312
1313         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1314                 return;
1315
1316         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1317                 u32 val;
1318
1319                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1321                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1324
1325                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1328
1329         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1330                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1332         }
1333
1334         if (bp->autoneg & AUTONEG_SPEED)
1335                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1337 }
1338
1339 static void
1340 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1341 {
1342         u32 val;
1343
1344         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1346         if (start)
1347                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1348         else
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1350 }
1351
1352 static int
1353 bnx2_set_link(struct bnx2 *bp)
1354 {
1355         u32 bmsr;
1356         u8 link_up;
1357
1358         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1359                 bp->link_up = 1;
1360                 return 0;
1361         }
1362
1363         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1364                 return 0;
1365
1366         link_up = bp->link_up;
1367
1368         bnx2_enable_bmsr1(bp);
1369         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371         bnx2_disable_bmsr1(bp);
1372
1373         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1374             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1375                 u32 val, an_dbg;
1376
1377                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1378                         bnx2_5706s_force_link_dn(bp, 0);
1379                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1380                 }
1381                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1382
1383                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1386
1387                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1389                         bmsr |= BMSR_LSTATUS;
1390                 else
1391                         bmsr &= ~BMSR_LSTATUS;
1392         }
1393
1394         if (bmsr & BMSR_LSTATUS) {
1395                 bp->link_up = 1;
1396
1397                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1398                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399                                 bnx2_5706s_linkup(bp);
1400                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401                                 bnx2_5708s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403                                 bnx2_5709s_linkup(bp);
1404                 }
1405                 else {
1406                         bnx2_copper_linkup(bp);
1407                 }
1408                 bnx2_resolve_flow_ctrl(bp);
1409         }
1410         else {
1411                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1412                     (bp->autoneg & AUTONEG_SPEED))
1413                         bnx2_disable_forced_2g5(bp);
1414
1415                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1416                         u32 bmcr;
1417
1418                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419                         bmcr |= BMCR_ANENABLE;
1420                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1421
1422                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1423                 }
1424                 bp->link_up = 0;
1425         }
1426
1427         if (bp->link_up != link_up) {
1428                 bnx2_report_link(bp);
1429         }
1430
1431         bnx2_set_mac_link(bp);
1432
1433         return 0;
1434 }
1435
1436 static int
1437 bnx2_reset_phy(struct bnx2 *bp)
1438 {
1439         int i;
1440         u32 reg;
1441
1442         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1443
1444 #define PHY_RESET_MAX_WAIT 100
1445         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1446                 udelay(10);
1447
1448                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1449                 if (!(reg & BMCR_RESET)) {
1450                         udelay(20);
1451                         break;
1452                 }
1453         }
1454         if (i == PHY_RESET_MAX_WAIT) {
1455                 return -EBUSY;
1456         }
1457         return 0;
1458 }
1459
1460 static u32
1461 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1462 {
1463         u32 adv = 0;
1464
1465         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1467
1468                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1469                         adv = ADVERTISE_1000XPAUSE;
1470                 }
1471                 else {
1472                         adv = ADVERTISE_PAUSE_CAP;
1473                 }
1474         }
1475         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1476                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477                         adv = ADVERTISE_1000XPSE_ASYM;
1478                 }
1479                 else {
1480                         adv = ADVERTISE_PAUSE_ASYM;
1481                 }
1482         }
1483         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1484                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1486                 }
1487                 else {
1488                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1489                 }
1490         }
1491         return adv;
1492 }
1493
1494 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1495
1496 static int
1497 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1498 {
1499         u32 speed_arg = 0, pause_adv;
1500
1501         pause_adv = bnx2_phy_get_pause_adv(bp);
1502
1503         if (bp->autoneg & AUTONEG_SPEED) {
1504                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505                 if (bp->advertising & ADVERTISED_10baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507                 if (bp->advertising & ADVERTISED_10baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509                 if (bp->advertising & ADVERTISED_100baseT_Half)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511                 if (bp->advertising & ADVERTISED_100baseT_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1517         } else {
1518                 if (bp->req_line_speed == SPEED_2500)
1519                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520                 else if (bp->req_line_speed == SPEED_1000)
1521                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522                 else if (bp->req_line_speed == SPEED_100) {
1523                         if (bp->req_duplex == DUPLEX_FULL)
1524                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1525                         else
1526                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527                 } else if (bp->req_line_speed == SPEED_10) {
1528                         if (bp->req_duplex == DUPLEX_FULL)
1529                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1530                         else
1531                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1532                 }
1533         }
1534
1535         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1537         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1538                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1539
1540         if (port == PORT_TP)
1541                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1543
1544         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1545
1546         spin_unlock_bh(&bp->phy_lock);
1547         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1548         spin_lock_bh(&bp->phy_lock);
1549
1550         return 0;
1551 }
1552
1553 static int
1554 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1555 {
1556         u32 adv, bmcr;
1557         u32 new_adv = 0;
1558
1559         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560                 return (bnx2_setup_remote_phy(bp, port));
1561
1562         if (!(bp->autoneg & AUTONEG_SPEED)) {
1563                 u32 new_bmcr;
1564                 int force_link_down = 0;
1565
1566                 if (bp->req_line_speed == SPEED_2500) {
1567                         if (!bnx2_test_and_enable_2g5(bp))
1568                                 force_link_down = 1;
1569                 } else if (bp->req_line_speed == SPEED_1000) {
1570                         if (bnx2_test_and_disable_2g5(bp))
1571                                 force_link_down = 1;
1572                 }
1573                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1574                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1575
1576                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1578                 new_bmcr |= BMCR_SPEED1000;
1579
1580                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581                         if (bp->req_line_speed == SPEED_2500)
1582                                 bnx2_enable_forced_2g5(bp);
1583                         else if (bp->req_line_speed == SPEED_1000) {
1584                                 bnx2_disable_forced_2g5(bp);
1585                                 new_bmcr &= ~0x2000;
1586                         }
1587
1588                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1589                         if (bp->req_line_speed == SPEED_2500)
1590                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1591                         else
1592                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1593                 }
1594
1595                 if (bp->req_duplex == DUPLEX_FULL) {
1596                         adv |= ADVERTISE_1000XFULL;
1597                         new_bmcr |= BMCR_FULLDPLX;
1598                 }
1599                 else {
1600                         adv |= ADVERTISE_1000XHALF;
1601                         new_bmcr &= ~BMCR_FULLDPLX;
1602                 }
1603                 if ((new_bmcr != bmcr) || (force_link_down)) {
1604                         /* Force a link down visible on the other side */
1605                         if (bp->link_up) {
1606                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1607                                                ~(ADVERTISE_1000XFULL |
1608                                                  ADVERTISE_1000XHALF));
1609                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1610                                         BMCR_ANRESTART | BMCR_ANENABLE);
1611
1612                                 bp->link_up = 0;
1613                                 netif_carrier_off(bp->dev);
1614                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                                 bnx2_report_link(bp);
1616                         }
1617                         bnx2_write_phy(bp, bp->mii_adv, adv);
1618                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1619                 } else {
1620                         bnx2_resolve_flow_ctrl(bp);
1621                         bnx2_set_mac_link(bp);
1622                 }
1623                 return 0;
1624         }
1625
1626         bnx2_test_and_enable_2g5(bp);
1627
1628         if (bp->advertising & ADVERTISED_1000baseT_Full)
1629                 new_adv |= ADVERTISE_1000XFULL;
1630
1631         new_adv |= bnx2_phy_get_pause_adv(bp);
1632
1633         bnx2_read_phy(bp, bp->mii_adv, &adv);
1634         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1635
1636         bp->serdes_an_pending = 0;
1637         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638                 /* Force a link down visible on the other side */
1639                 if (bp->link_up) {
1640                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1641                         spin_unlock_bh(&bp->phy_lock);
1642                         msleep(20);
1643                         spin_lock_bh(&bp->phy_lock);
1644                 }
1645
1646                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1648                         BMCR_ANENABLE);
1649                 /* Speed up link-up time when the link partner
1650                  * does not autonegotiate which is very common
1651                  * in blade servers. Some blade servers use
1652                  * IPMI for kerboard input and it's important
1653                  * to minimize link disruptions. Autoneg. involves
1654                  * exchanging base pages plus 3 next pages and
1655                  * normally completes in about 120 msec.
1656                  */
1657                 bp->current_interval = SERDES_AN_TIMEOUT;
1658                 bp->serdes_an_pending = 1;
1659                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1660         } else {
1661                 bnx2_resolve_flow_ctrl(bp);
1662                 bnx2_set_mac_link(bp);
1663         }
1664
1665         return 0;
1666 }
1667
1668 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1669         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1670                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671                 (ADVERTISED_1000baseT_Full)
1672
1673 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1674         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1675         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1676         ADVERTISED_1000baseT_Full)
1677
1678 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1680
1681 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1682
1683 static void
1684 bnx2_set_default_remote_link(struct bnx2 *bp)
1685 {
1686         u32 link;
1687
1688         if (bp->phy_port == PORT_TP)
1689                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1690         else
1691                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1692
1693         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694                 bp->req_line_speed = 0;
1695                 bp->autoneg |= AUTONEG_SPEED;
1696                 bp->advertising = ADVERTISED_Autoneg;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698                         bp->advertising |= ADVERTISED_10baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700                         bp->advertising |= ADVERTISED_10baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702                         bp->advertising |= ADVERTISED_100baseT_Half;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704                         bp->advertising |= ADVERTISED_100baseT_Full;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706                         bp->advertising |= ADVERTISED_1000baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708                         bp->advertising |= ADVERTISED_2500baseX_Full;
1709         } else {
1710                 bp->autoneg = 0;
1711                 bp->advertising = 0;
1712                 bp->req_duplex = DUPLEX_FULL;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714                         bp->req_line_speed = SPEED_10;
1715                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716                                 bp->req_duplex = DUPLEX_HALF;
1717                 }
1718                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719                         bp->req_line_speed = SPEED_100;
1720                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721                                 bp->req_duplex = DUPLEX_HALF;
1722                 }
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724                         bp->req_line_speed = SPEED_1000;
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726                         bp->req_line_speed = SPEED_2500;
1727         }
1728 }
1729
1730 static void
1731 bnx2_set_default_link(struct bnx2 *bp)
1732 {
1733         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734                 bnx2_set_default_remote_link(bp);
1735                 return;
1736         }
1737
1738         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739         bp->req_line_speed = 0;
1740         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1741                 u32 reg;
1742
1743                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1744
1745                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1746                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1748                         bp->autoneg = 0;
1749                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1750                         bp->req_duplex = DUPLEX_FULL;
1751                 }
1752         } else
1753                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1754 }
1755
1756 static void
1757 bnx2_send_heart_beat(struct bnx2 *bp)
1758 {
1759         u32 msg;
1760         u32 addr;
1761
1762         spin_lock(&bp->indirect_lock);
1763         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767         spin_unlock(&bp->indirect_lock);
1768 }
1769
1770 static void
1771 bnx2_remote_phy_event(struct bnx2 *bp)
1772 {
1773         u32 msg;
1774         u8 link_up = bp->link_up;
1775         u8 old_port;
1776
1777         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1778
1779         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780                 bnx2_send_heart_beat(bp);
1781
1782         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1783
1784         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1785                 bp->link_up = 0;
1786         else {
1787                 u32 speed;
1788
1789                 bp->link_up = 1;
1790                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791                 bp->duplex = DUPLEX_FULL;
1792                 switch (speed) {
1793                         case BNX2_LINK_STATUS_10HALF:
1794                                 bp->duplex = DUPLEX_HALF;
1795                         case BNX2_LINK_STATUS_10FULL:
1796                                 bp->line_speed = SPEED_10;
1797                                 break;
1798                         case BNX2_LINK_STATUS_100HALF:
1799                                 bp->duplex = DUPLEX_HALF;
1800                         case BNX2_LINK_STATUS_100BASE_T4:
1801                         case BNX2_LINK_STATUS_100FULL:
1802                                 bp->line_speed = SPEED_100;
1803                                 break;
1804                         case BNX2_LINK_STATUS_1000HALF:
1805                                 bp->duplex = DUPLEX_HALF;
1806                         case BNX2_LINK_STATUS_1000FULL:
1807                                 bp->line_speed = SPEED_1000;
1808                                 break;
1809                         case BNX2_LINK_STATUS_2500HALF:
1810                                 bp->duplex = DUPLEX_HALF;
1811                         case BNX2_LINK_STATUS_2500FULL:
1812                                 bp->line_speed = SPEED_2500;
1813                                 break;
1814                         default:
1815                                 bp->line_speed = 0;
1816                                 break;
1817                 }
1818
1819                 bp->flow_ctrl = 0;
1820                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822                         if (bp->duplex == DUPLEX_FULL)
1823                                 bp->flow_ctrl = bp->req_flow_ctrl;
1824                 } else {
1825                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1827                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1829                 }
1830
1831                 old_port = bp->phy_port;
1832                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833                         bp->phy_port = PORT_FIBRE;
1834                 else
1835                         bp->phy_port = PORT_TP;
1836
1837                 if (old_port != bp->phy_port)
1838                         bnx2_set_default_link(bp);
1839
1840         }
1841         if (bp->link_up != link_up)
1842                 bnx2_report_link(bp);
1843
1844         bnx2_set_mac_link(bp);
1845 }
1846
1847 static int
1848 bnx2_set_remote_link(struct bnx2 *bp)
1849 {
1850         u32 evt_code;
1851
1852         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1853         switch (evt_code) {
1854                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855                         bnx2_remote_phy_event(bp);
1856                         break;
1857                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1858                 default:
1859                         bnx2_send_heart_beat(bp);
1860                         break;
1861         }
1862         return 0;
1863 }
1864
1865 static int
1866 bnx2_setup_copper_phy(struct bnx2 *bp)
1867 {
1868         u32 bmcr;
1869         u32 new_bmcr;
1870
1871         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1872
1873         if (bp->autoneg & AUTONEG_SPEED) {
1874                 u32 adv_reg, adv1000_reg;
1875                 u32 new_adv_reg = 0;
1876                 u32 new_adv1000_reg = 0;
1877
1878                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1879                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880                         ADVERTISE_PAUSE_ASYM);
1881
1882                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883                 adv1000_reg &= PHY_ALL_1000_SPEED;
1884
1885                 if (bp->advertising & ADVERTISED_10baseT_Half)
1886                         new_adv_reg |= ADVERTISE_10HALF;
1887                 if (bp->advertising & ADVERTISED_10baseT_Full)
1888                         new_adv_reg |= ADVERTISE_10FULL;
1889                 if (bp->advertising & ADVERTISED_100baseT_Half)
1890                         new_adv_reg |= ADVERTISE_100HALF;
1891                 if (bp->advertising & ADVERTISED_100baseT_Full)
1892                         new_adv_reg |= ADVERTISE_100FULL;
1893                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894                         new_adv1000_reg |= ADVERTISE_1000FULL;
1895
1896                 new_adv_reg |= ADVERTISE_CSMA;
1897
1898                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1899
1900                 if ((adv1000_reg != new_adv1000_reg) ||
1901                         (adv_reg != new_adv_reg) ||
1902                         ((bmcr & BMCR_ANENABLE) == 0)) {
1903
1904                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1905                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1906                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1907                                 BMCR_ANENABLE);
1908                 }
1909                 else if (bp->link_up) {
1910                         /* Flow ctrl may have changed from auto to forced */
1911                         /* or vice-versa. */
1912
1913                         bnx2_resolve_flow_ctrl(bp);
1914                         bnx2_set_mac_link(bp);
1915                 }
1916                 return 0;
1917         }
1918
1919         new_bmcr = 0;
1920         if (bp->req_line_speed == SPEED_100) {
1921                 new_bmcr |= BMCR_SPEED100;
1922         }
1923         if (bp->req_duplex == DUPLEX_FULL) {
1924                 new_bmcr |= BMCR_FULLDPLX;
1925         }
1926         if (new_bmcr != bmcr) {
1927                 u32 bmsr;
1928
1929                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1931
1932                 if (bmsr & BMSR_LSTATUS) {
1933                         /* Force link down */
1934                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1935                         spin_unlock_bh(&bp->phy_lock);
1936                         msleep(50);
1937                         spin_lock_bh(&bp->phy_lock);
1938
1939                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1941                 }
1942
1943                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1944
1945                 /* Normally, the new speed is setup after the link has
1946                  * gone down and up again. In some cases, link will not go
1947                  * down so we need to set up the new speed here.
1948                  */
1949                 if (bmsr & BMSR_LSTATUS) {
1950                         bp->line_speed = bp->req_line_speed;
1951                         bp->duplex = bp->req_duplex;
1952                         bnx2_resolve_flow_ctrl(bp);
1953                         bnx2_set_mac_link(bp);
1954                 }
1955         } else {
1956                 bnx2_resolve_flow_ctrl(bp);
1957                 bnx2_set_mac_link(bp);
1958         }
1959         return 0;
1960 }
1961
1962 static int
1963 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1964 {
1965         if (bp->loopback == MAC_LOOPBACK)
1966                 return 0;
1967
1968         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1969                 return (bnx2_setup_serdes_phy(bp, port));
1970         }
1971         else {
1972                 return (bnx2_setup_copper_phy(bp));
1973         }
1974 }
1975
1976 static int
1977 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1978 {
1979         u32 val;
1980
1981         bp->mii_bmcr = MII_BMCR + 0x10;
1982         bp->mii_bmsr = MII_BMSR + 0x10;
1983         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984         bp->mii_adv = MII_ADVERTISE + 0x10;
1985         bp->mii_lpa = MII_LPA + 0x10;
1986         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1987
1988         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1992         if (reset_phy)
1993                 bnx2_reset_phy(bp);
1994
1995         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1996
1997         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2001
2002         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2004         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2005                 val |= BCM5708S_UP1_2G5;
2006         else
2007                 val &= ~BCM5708S_UP1_2G5;
2008         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2009
2010         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2014
2015         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2016
2017         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2020
2021         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2022
2023         return 0;
2024 }
2025
2026 static int
2027 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2028 {
2029         u32 val;
2030
2031         if (reset_phy)
2032                 bnx2_reset_phy(bp);
2033
2034         bp->mii_up1 = BCM5708S_UP1;
2035
2036         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2043
2044         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2047
2048         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2049                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050                 val |= BCM5708S_UP1_2G5;
2051                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2052         }
2053
2054         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2055             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2057                 /* increase tx signal amplitude */
2058                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059                                BCM5708S_BLK_ADDR_TX_MISC);
2060                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2064         }
2065
2066         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2067               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2068
2069         if (val) {
2070                 u32 is_backplane;
2071
2072                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2073                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075                                        BCM5708S_BLK_ADDR_TX_MISC);
2076                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078                                        BCM5708S_BLK_ADDR_DIG);
2079                 }
2080         }
2081         return 0;
2082 }
2083
2084 static int
2085 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2086 {
2087         if (reset_phy)
2088                 bnx2_reset_phy(bp);
2089
2090         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2091
2092         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2094
2095         if (bp->dev->mtu > 1500) {
2096                 u32 val;
2097
2098                 /* Set extended packet length bit */
2099                 bnx2_write_phy(bp, 0x18, 0x7);
2100                 bnx2_read_phy(bp, 0x18, &val);
2101                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2102
2103                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104                 bnx2_read_phy(bp, 0x1c, &val);
2105                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2106         }
2107         else {
2108                 u32 val;
2109
2110                 bnx2_write_phy(bp, 0x18, 0x7);
2111                 bnx2_read_phy(bp, 0x18, &val);
2112                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2113
2114                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115                 bnx2_read_phy(bp, 0x1c, &val);
2116                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int
2123 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2124 {
2125         u32 val;
2126
2127         if (reset_phy)
2128                 bnx2_reset_phy(bp);
2129
2130         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2131                 bnx2_write_phy(bp, 0x18, 0x0c00);
2132                 bnx2_write_phy(bp, 0x17, 0x000a);
2133                 bnx2_write_phy(bp, 0x15, 0x310b);
2134                 bnx2_write_phy(bp, 0x17, 0x201f);
2135                 bnx2_write_phy(bp, 0x15, 0x9506);
2136                 bnx2_write_phy(bp, 0x17, 0x401f);
2137                 bnx2_write_phy(bp, 0x15, 0x14e2);
2138                 bnx2_write_phy(bp, 0x18, 0x0400);
2139         }
2140
2141         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2144                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2145                 val &= ~(1 << 8);
2146                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2147         }
2148
2149         if (bp->dev->mtu > 1500) {
2150                 /* Set extended packet length bit */
2151                 bnx2_write_phy(bp, 0x18, 0x7);
2152                 bnx2_read_phy(bp, 0x18, &val);
2153                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2154
2155                 bnx2_read_phy(bp, 0x10, &val);
2156                 bnx2_write_phy(bp, 0x10, val | 0x1);
2157         }
2158         else {
2159                 bnx2_write_phy(bp, 0x18, 0x7);
2160                 bnx2_read_phy(bp, 0x18, &val);
2161                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2162
2163                 bnx2_read_phy(bp, 0x10, &val);
2164                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2165         }
2166
2167         /* ethernet@wirespeed */
2168         bnx2_write_phy(bp, 0x18, 0x7007);
2169         bnx2_read_phy(bp, 0x18, &val);
2170         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2171         return 0;
2172 }
2173
2174
2175 static int
2176 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2177 {
2178         u32 val;
2179         int rc = 0;
2180
2181         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2183
2184         bp->mii_bmcr = MII_BMCR;
2185         bp->mii_bmsr = MII_BMSR;
2186         bp->mii_bmsr1 = MII_BMSR;
2187         bp->mii_adv = MII_ADVERTISE;
2188         bp->mii_lpa = MII_LPA;
2189
2190         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2191
2192         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2193                 goto setup_phy;
2194
2195         bnx2_read_phy(bp, MII_PHYSID1, &val);
2196         bp->phy_id = val << 16;
2197         bnx2_read_phy(bp, MII_PHYSID2, &val);
2198         bp->phy_id |= val & 0xffff;
2199
2200         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2201                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2202                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2203                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2204                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2205                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2206                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2207         }
2208         else {
2209                 rc = bnx2_init_copper_phy(bp, reset_phy);
2210         }
2211
2212 setup_phy:
2213         if (!rc)
2214                 rc = bnx2_setup_phy(bp, bp->phy_port);
2215
2216         return rc;
2217 }
2218
2219 static int
2220 bnx2_set_mac_loopback(struct bnx2 *bp)
2221 {
2222         u32 mac_mode;
2223
2224         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2228         bp->link_up = 1;
2229         return 0;
2230 }
2231
2232 static int bnx2_test_link(struct bnx2 *);
2233
2234 static int
2235 bnx2_set_phy_loopback(struct bnx2 *bp)
2236 {
2237         u32 mac_mode;
2238         int rc, i;
2239
2240         spin_lock_bh(&bp->phy_lock);
2241         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2242                             BMCR_SPEED1000);
2243         spin_unlock_bh(&bp->phy_lock);
2244         if (rc)
2245                 return rc;
2246
2247         for (i = 0; i < 10; i++) {
2248                 if (bnx2_test_link(bp) == 0)
2249                         break;
2250                 msleep(100);
2251         }
2252
2253         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2256                       BNX2_EMAC_MODE_25G_MODE);
2257
2258         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2260         bp->link_up = 1;
2261         return 0;
2262 }
2263
2264 static int
2265 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2266 {
2267         int i;
2268         u32 val;
2269
2270         bp->fw_wr_seq++;
2271         msg_data |= bp->fw_wr_seq;
2272
2273         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2274
2275         if (!ack)
2276                 return 0;
2277
2278         /* wait for an acknowledgement. */
2279         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2280                 msleep(10);
2281
2282                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2283
2284                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2285                         break;
2286         }
2287         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2288                 return 0;
2289
2290         /* If we timed out, inform the firmware that this is the case. */
2291         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2292                 if (!silent)
2293                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2294                                             "%x\n", msg_data);
2295
2296                 msg_data &= ~BNX2_DRV_MSG_CODE;
2297                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2298
2299                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2300
2301                 return -EBUSY;
2302         }
2303
2304         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2305                 return -EIO;
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_5709_context(struct bnx2 *bp)
2312 {
2313         int i, ret = 0;
2314         u32 val;
2315
2316         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2317         val |= (BCM_PAGE_BITS - 8) << 16;
2318         REG_WR(bp, BNX2_CTX_COMMAND, val);
2319         for (i = 0; i < 10; i++) {
2320                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2321                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2322                         break;
2323                 udelay(2);
2324         }
2325         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2326                 return -EBUSY;
2327
2328         for (i = 0; i < bp->ctx_pages; i++) {
2329                 int j;
2330
2331                 if (bp->ctx_blk[i])
2332                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2333                 else
2334                         return -ENOMEM;
2335
2336                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2337                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2338                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2339                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2340                        (u64) bp->ctx_blk_mapping[i] >> 32);
2341                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2342                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2343                 for (j = 0; j < 10; j++) {
2344
2345                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2346                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2347                                 break;
2348                         udelay(5);
2349                 }
2350                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2351                         ret = -EBUSY;
2352                         break;
2353                 }
2354         }
2355         return ret;
2356 }
2357
2358 static void
2359 bnx2_init_context(struct bnx2 *bp)
2360 {
2361         u32 vcid;
2362
2363         vcid = 96;
2364         while (vcid) {
2365                 u32 vcid_addr, pcid_addr, offset;
2366                 int i;
2367
2368                 vcid--;
2369
2370                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2371                         u32 new_vcid;
2372
2373                         vcid_addr = GET_PCID_ADDR(vcid);
2374                         if (vcid & 0x8) {
2375                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2376                         }
2377                         else {
2378                                 new_vcid = vcid;
2379                         }
2380                         pcid_addr = GET_PCID_ADDR(new_vcid);
2381                 }
2382                 else {
2383                         vcid_addr = GET_CID_ADDR(vcid);
2384                         pcid_addr = vcid_addr;
2385                 }
2386
2387                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2388                         vcid_addr += (i << PHY_CTX_SHIFT);
2389                         pcid_addr += (i << PHY_CTX_SHIFT);
2390
2391                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2392                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2393
2394                         /* Zero out the context. */
2395                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2396                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2397                 }
2398         }
2399 }
2400
2401 static int
2402 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2403 {
2404         u16 *good_mbuf;
2405         u32 good_mbuf_cnt;
2406         u32 val;
2407
2408         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2409         if (good_mbuf == NULL) {
2410                 printk(KERN_ERR PFX "Failed to allocate memory in "
2411                                     "bnx2_alloc_bad_rbuf\n");
2412                 return -ENOMEM;
2413         }
2414
2415         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2416                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2417
2418         good_mbuf_cnt = 0;
2419
2420         /* Allocate a bunch of mbufs and save the good ones in an array. */
2421         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2422         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2423                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2424                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2425
2426                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2427
2428                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2429
2430                 /* The addresses with Bit 9 set are bad memory blocks. */
2431                 if (!(val & (1 << 9))) {
2432                         good_mbuf[good_mbuf_cnt] = (u16) val;
2433                         good_mbuf_cnt++;
2434                 }
2435
2436                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2437         }
2438
2439         /* Free the good ones back to the mbuf pool thus discarding
2440          * all the bad ones. */
2441         while (good_mbuf_cnt) {
2442                 good_mbuf_cnt--;
2443
2444                 val = good_mbuf[good_mbuf_cnt];
2445                 val = (val << 9) | val | 1;
2446
2447                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2448         }
2449         kfree(good_mbuf);
2450         return 0;
2451 }
2452
2453 static void
2454 bnx2_set_mac_addr(struct bnx2 *bp)
2455 {
2456         u32 val;
2457         u8 *mac_addr = bp->dev->dev_addr;
2458
2459         val = (mac_addr[0] << 8) | mac_addr[1];
2460
2461         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2462
2463         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2464                 (mac_addr[4] << 8) | mac_addr[5];
2465
2466         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2467 }
2468
2469 static inline int
2470 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2471 {
2472         dma_addr_t mapping;
2473         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2474         struct rx_bd *rxbd =
2475                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2476         struct page *page = alloc_page(GFP_ATOMIC);
2477
2478         if (!page)
2479                 return -ENOMEM;
2480         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2481                                PCI_DMA_FROMDEVICE);
2482         rx_pg->page = page;
2483         pci_unmap_addr_set(rx_pg, mapping, mapping);
2484         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2485         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2486         return 0;
2487 }
2488
2489 static void
2490 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2491 {
2492         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2493         struct page *page = rx_pg->page;
2494
2495         if (!page)
2496                 return;
2497
2498         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2499                        PCI_DMA_FROMDEVICE);
2500
2501         __free_page(page);
2502         rx_pg->page = NULL;
2503 }
2504
2505 static inline int
2506 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2507 {
2508         struct sk_buff *skb;
2509         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2510         dma_addr_t mapping;
2511         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2512         unsigned long align;
2513
2514         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2515         if (skb == NULL) {
2516                 return -ENOMEM;
2517         }
2518
2519         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2520                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2521
2522         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2523                 PCI_DMA_FROMDEVICE);
2524
2525         rx_buf->skb = skb;
2526         pci_unmap_addr_set(rx_buf, mapping, mapping);
2527
2528         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2529         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2530
2531         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2532
2533         return 0;
2534 }
2535
2536 static int
2537 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2538 {
2539         struct status_block *sblk = bnapi->status_blk.msi;
2540         u32 new_link_state, old_link_state;
2541         int is_set = 1;
2542
2543         new_link_state = sblk->status_attn_bits & event;
2544         old_link_state = sblk->status_attn_bits_ack & event;
2545         if (new_link_state != old_link_state) {
2546                 if (new_link_state)
2547                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2548                 else
2549                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2550         } else
2551                 is_set = 0;
2552
2553         return is_set;
2554 }
2555
2556 static void
2557 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2558 {
2559         spin_lock(&bp->phy_lock);
2560
2561         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2562                 bnx2_set_link(bp);
2563         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2564                 bnx2_set_remote_link(bp);
2565
2566         spin_unlock(&bp->phy_lock);
2567
2568 }
2569
2570 static inline u16
2571 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2572 {
2573         u16 cons;
2574
2575         /* Tell compiler that status block fields can change. */
2576         barrier();
2577         cons = *bnapi->hw_tx_cons_ptr;
2578         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2579                 cons++;
2580         return cons;
2581 }
2582
2583 static int
2584 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2585 {
2586         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2587         u16 hw_cons, sw_cons, sw_ring_cons;
2588         int tx_pkt = 0;
2589
2590         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2591         sw_cons = txr->tx_cons;
2592
2593         while (sw_cons != hw_cons) {
2594                 struct sw_bd *tx_buf;
2595                 struct sk_buff *skb;
2596                 int i, last;
2597
2598                 sw_ring_cons = TX_RING_IDX(sw_cons);
2599
2600                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2601                 skb = tx_buf->skb;
2602
2603                 /* partial BD completions possible with TSO packets */
2604                 if (skb_is_gso(skb)) {
2605                         u16 last_idx, last_ring_idx;
2606
2607                         last_idx = sw_cons +
2608                                 skb_shinfo(skb)->nr_frags + 1;
2609                         last_ring_idx = sw_ring_cons +
2610                                 skb_shinfo(skb)->nr_frags + 1;
2611                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2612                                 last_idx++;
2613                         }
2614                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2615                                 break;
2616                         }
2617                 }
2618
2619                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2620                         skb_headlen(skb), PCI_DMA_TODEVICE);
2621
2622                 tx_buf->skb = NULL;
2623                 last = skb_shinfo(skb)->nr_frags;
2624
2625                 for (i = 0; i < last; i++) {
2626                         sw_cons = NEXT_TX_BD(sw_cons);
2627
2628                         pci_unmap_page(bp->pdev,
2629                                 pci_unmap_addr(
2630                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2631                                         mapping),
2632                                 skb_shinfo(skb)->frags[i].size,
2633                                 PCI_DMA_TODEVICE);
2634                 }
2635
2636                 sw_cons = NEXT_TX_BD(sw_cons);
2637
2638                 dev_kfree_skb(skb);
2639                 tx_pkt++;
2640                 if (tx_pkt == budget)
2641                         break;
2642
2643                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2644         }
2645
2646         txr->hw_tx_cons = hw_cons;
2647         txr->tx_cons = sw_cons;
2648         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2649          * before checking for netif_queue_stopped().  Without the
2650          * memory barrier, there is a small possibility that bnx2_start_xmit()
2651          * will miss it and cause the queue to be stopped forever.
2652          */
2653         smp_mb();
2654
2655         if (unlikely(netif_queue_stopped(bp->dev)) &&
2656                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2657                 netif_tx_lock(bp->dev);
2658                 if ((netif_queue_stopped(bp->dev)) &&
2659                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2660                         netif_wake_queue(bp->dev);
2661                 netif_tx_unlock(bp->dev);
2662         }
2663         return tx_pkt;
2664 }
2665
2666 static void
2667 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2668                         struct sk_buff *skb, int count)
2669 {
2670         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2671         struct rx_bd *cons_bd, *prod_bd;
2672         dma_addr_t mapping;
2673         int i;
2674         u16 hw_prod = rxr->rx_pg_prod, prod;
2675         u16 cons = rxr->rx_pg_cons;
2676
2677         for (i = 0; i < count; i++) {
2678                 prod = RX_PG_RING_IDX(hw_prod);
2679
2680                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2681                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2682                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2683                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2684
2685                 if (i == 0 && skb) {
2686                         struct page *page;
2687                         struct skb_shared_info *shinfo;
2688
2689                         shinfo = skb_shinfo(skb);
2690                         shinfo->nr_frags--;
2691                         page = shinfo->frags[shinfo->nr_frags].page;
2692                         shinfo->frags[shinfo->nr_frags].page = NULL;
2693                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2694                                                PCI_DMA_FROMDEVICE);
2695                         cons_rx_pg->page = page;
2696                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2697                         dev_kfree_skb(skb);
2698                 }
2699                 if (prod != cons) {
2700                         prod_rx_pg->page = cons_rx_pg->page;
2701                         cons_rx_pg->page = NULL;
2702                         pci_unmap_addr_set(prod_rx_pg, mapping,
2703                                 pci_unmap_addr(cons_rx_pg, mapping));
2704
2705                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2706                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2707
2708                 }
2709                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2710                 hw_prod = NEXT_RX_BD(hw_prod);
2711         }
2712         rxr->rx_pg_prod = hw_prod;
2713         rxr->rx_pg_cons = cons;
2714 }
2715
2716 static inline void
2717 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2718                   struct sk_buff *skb, u16 cons, u16 prod)
2719 {
2720         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2721         struct rx_bd *cons_bd, *prod_bd;
2722
2723         cons_rx_buf = &rxr->rx_buf_ring[cons];
2724         prod_rx_buf = &rxr->rx_buf_ring[prod];
2725
2726         pci_dma_sync_single_for_device(bp->pdev,
2727                 pci_unmap_addr(cons_rx_buf, mapping),
2728                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2729
2730         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2731
2732         prod_rx_buf->skb = skb;
2733
2734         if (cons == prod)
2735                 return;
2736
2737         pci_unmap_addr_set(prod_rx_buf, mapping,
2738                         pci_unmap_addr(cons_rx_buf, mapping));
2739
2740         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2741         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2742         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2743         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2744 }
2745
2746 static int
2747 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2748             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2749             u32 ring_idx)
2750 {
2751         int err;
2752         u16 prod = ring_idx & 0xffff;
2753
2754         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2755         if (unlikely(err)) {
2756                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2757                 if (hdr_len) {
2758                         unsigned int raw_len = len + 4;
2759                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2760
2761                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2762                 }
2763                 return err;
2764         }
2765
2766         skb_reserve(skb, BNX2_RX_OFFSET);
2767         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2768                          PCI_DMA_FROMDEVICE);
2769
2770         if (hdr_len == 0) {
2771                 skb_put(skb, len);
2772                 return 0;
2773         } else {
2774                 unsigned int i, frag_len, frag_size, pages;
2775                 struct sw_pg *rx_pg;
2776                 u16 pg_cons = rxr->rx_pg_cons;
2777                 u16 pg_prod = rxr->rx_pg_prod;
2778
2779                 frag_size = len + 4 - hdr_len;
2780                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2781                 skb_put(skb, hdr_len);
2782
2783                 for (i = 0; i < pages; i++) {
2784                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2785                         if (unlikely(frag_len <= 4)) {
2786                                 unsigned int tail = 4 - frag_len;
2787
2788                                 rxr->rx_pg_cons = pg_cons;
2789                                 rxr->rx_pg_prod = pg_prod;
2790                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2791                                                         pages - i);
2792                                 skb->len -= tail;
2793                                 if (i == 0) {
2794                                         skb->tail -= tail;
2795                                 } else {
2796                                         skb_frag_t *frag =
2797                                                 &skb_shinfo(skb)->frags[i - 1];
2798                                         frag->size -= tail;
2799                                         skb->data_len -= tail;
2800                                         skb->truesize -= tail;
2801                                 }
2802                                 return 0;
2803                         }
2804                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2805
2806                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2807                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2808
2809                         if (i == pages - 1)
2810                                 frag_len -= 4;
2811
2812                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2813                         rx_pg->page = NULL;
2814
2815                         err = bnx2_alloc_rx_page(bp, rxr,
2816                                                  RX_PG_RING_IDX(pg_prod));
2817                         if (unlikely(err)) {
2818                                 rxr->rx_pg_cons = pg_cons;
2819                                 rxr->rx_pg_prod = pg_prod;
2820                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2821                                                         pages - i);
2822                                 return err;
2823                         }
2824
2825                         frag_size -= frag_len;
2826                         skb->data_len += frag_len;
2827                         skb->truesize += frag_len;
2828                         skb->len += frag_len;
2829
2830                         pg_prod = NEXT_RX_BD(pg_prod);
2831                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2832                 }
2833                 rxr->rx_pg_prod = pg_prod;
2834                 rxr->rx_pg_cons = pg_cons;
2835         }
2836         return 0;
2837 }
2838
2839 static inline u16
2840 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2841 {
2842         u16 cons;
2843
2844         /* Tell compiler that status block fields can change. */
2845         barrier();
2846         cons = *bnapi->hw_rx_cons_ptr;
2847         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2848                 cons++;
2849         return cons;
2850 }
2851
2852 static int
2853 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2854 {
2855         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2856         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2857         struct l2_fhdr *rx_hdr;
2858         int rx_pkt = 0, pg_ring_used = 0;
2859
2860         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2861         sw_cons = rxr->rx_cons;
2862         sw_prod = rxr->rx_prod;
2863
2864         /* Memory barrier necessary as speculative reads of the rx
2865          * buffer can be ahead of the index in the status block
2866          */
2867         rmb();
2868         while (sw_cons != hw_cons) {
2869                 unsigned int len, hdr_len;
2870                 u32 status;
2871                 struct sw_bd *rx_buf;
2872                 struct sk_buff *skb;
2873                 dma_addr_t dma_addr;
2874
2875                 sw_ring_cons = RX_RING_IDX(sw_cons);
2876                 sw_ring_prod = RX_RING_IDX(sw_prod);
2877
2878                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2879                 skb = rx_buf->skb;
2880
2881                 rx_buf->skb = NULL;
2882
2883                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2884
2885                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2886                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2887                         PCI_DMA_FROMDEVICE);
2888
2889                 rx_hdr = (struct l2_fhdr *) skb->data;
2890                 len = rx_hdr->l2_fhdr_pkt_len;
2891
2892                 if ((status = rx_hdr->l2_fhdr_status) &
2893                         (L2_FHDR_ERRORS_BAD_CRC |
2894                         L2_FHDR_ERRORS_PHY_DECODE |
2895                         L2_FHDR_ERRORS_ALIGNMENT |
2896                         L2_FHDR_ERRORS_TOO_SHORT |
2897                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2898
2899                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2900                                           sw_ring_prod);
2901                         goto next_rx;
2902                 }
2903                 hdr_len = 0;
2904                 if (status & L2_FHDR_STATUS_SPLIT) {
2905                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2906                         pg_ring_used = 1;
2907                 } else if (len > bp->rx_jumbo_thresh) {
2908                         hdr_len = bp->rx_jumbo_thresh;
2909                         pg_ring_used = 1;
2910                 }
2911
2912                 len -= 4;
2913
2914                 if (len <= bp->rx_copy_thresh) {
2915                         struct sk_buff *new_skb;
2916
2917                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2918                         if (new_skb == NULL) {
2919                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2920                                                   sw_ring_prod);
2921                                 goto next_rx;
2922                         }
2923
2924                         /* aligned copy */
2925                         skb_copy_from_linear_data_offset(skb,
2926                                                          BNX2_RX_OFFSET - 2,
2927                                       new_skb->data, len + 2);
2928                         skb_reserve(new_skb, 2);
2929                         skb_put(new_skb, len);
2930
2931                         bnx2_reuse_rx_skb(bp, rxr, skb,
2932                                 sw_ring_cons, sw_ring_prod);
2933
2934                         skb = new_skb;
2935                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2936                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2937                         goto next_rx;
2938
2939                 skb->protocol = eth_type_trans(skb, bp->dev);
2940
2941                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2942                         (ntohs(skb->protocol) != 0x8100)) {
2943
2944                         dev_kfree_skb(skb);
2945                         goto next_rx;
2946
2947                 }
2948
2949                 skb->ip_summed = CHECKSUM_NONE;
2950                 if (bp->rx_csum &&
2951                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2952                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2953
2954                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2955                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2956                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2957                 }
2958
2959 #ifdef BCM_VLAN
2960                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2961                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2962                                 rx_hdr->l2_fhdr_vlan_tag);
2963                 }
2964                 else
2965 #endif
2966                         netif_receive_skb(skb);
2967
2968                 bp->dev->last_rx = jiffies;
2969                 rx_pkt++;
2970
2971 next_rx:
2972                 sw_cons = NEXT_RX_BD(sw_cons);
2973                 sw_prod = NEXT_RX_BD(sw_prod);
2974
2975                 if ((rx_pkt == budget))
2976                         break;
2977
2978                 /* Refresh hw_cons to see if there is new work */
2979                 if (sw_cons == hw_cons) {
2980                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2981                         rmb();
2982                 }
2983         }
2984         rxr->rx_cons = sw_cons;
2985         rxr->rx_prod = sw_prod;
2986
2987         if (pg_ring_used)
2988                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2989
2990         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2991
2992         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2993
2994         mmiowb();
2995
2996         return rx_pkt;
2997
2998 }
2999
3000 /* MSI ISR - The only difference between this and the INTx ISR
3001  * is that the MSI interrupt is always serviced.
3002  */
3003 static irqreturn_t
3004 bnx2_msi(int irq, void *dev_instance)
3005 {
3006         struct bnx2_napi *bnapi = dev_instance;
3007         struct bnx2 *bp = bnapi->bp;
3008         struct net_device *dev = bp->dev;
3009
3010         prefetch(bnapi->status_blk.msi);
3011         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3012                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3013                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3014
3015         /* Return here if interrupt is disabled. */
3016         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3017                 return IRQ_HANDLED;
3018
3019         netif_rx_schedule(dev, &bnapi->napi);
3020
3021         return IRQ_HANDLED;
3022 }
3023
3024 static irqreturn_t
3025 bnx2_msi_1shot(int irq, void *dev_instance)
3026 {
3027         struct bnx2_napi *bnapi = dev_instance;
3028         struct bnx2 *bp = bnapi->bp;
3029         struct net_device *dev = bp->dev;
3030
3031         prefetch(bnapi->status_blk.msi);
3032
3033         /* Return here if interrupt is disabled. */
3034         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3035                 return IRQ_HANDLED;
3036
3037         netif_rx_schedule(dev, &bnapi->napi);
3038
3039         return IRQ_HANDLED;
3040 }
3041
3042 static irqreturn_t
3043 bnx2_interrupt(int irq, void *dev_instance)
3044 {
3045         struct bnx2_napi *bnapi = dev_instance;
3046         struct bnx2 *bp = bnapi->bp;
3047         struct net_device *dev = bp->dev;
3048         struct status_block *sblk = bnapi->status_blk.msi;
3049
3050         /* When using INTx, it is possible for the interrupt to arrive
3051          * at the CPU before the status block posted prior to the
3052          * interrupt. Reading a register will flush the status block.
3053          * When using MSI, the MSI message will always complete after
3054          * the status block write.
3055          */
3056         if ((sblk->status_idx == bnapi->last_status_idx) &&
3057             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3058              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3059                 return IRQ_NONE;
3060
3061         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3062                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3063                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3064
3065         /* Read back to deassert IRQ immediately to avoid too many
3066          * spurious interrupts.
3067          */
3068         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3069
3070         /* Return here if interrupt is shared and is disabled. */
3071         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3072                 return IRQ_HANDLED;
3073
3074         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3075                 bnapi->last_status_idx = sblk->status_idx;
3076                 __netif_rx_schedule(dev, &bnapi->napi);
3077         }
3078
3079         return IRQ_HANDLED;
3080 }
3081
3082 static inline int
3083 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3084 {
3085         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3086         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3087
3088         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3089             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3090                 return 1;
3091         return 0;
3092 }
3093
3094 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3095                                  STATUS_ATTN_BITS_TIMER_ABORT)
3096
3097 static inline int
3098 bnx2_has_work(struct bnx2_napi *bnapi)
3099 {
3100         struct status_block *sblk = bnapi->status_blk.msi;
3101
3102         if (bnx2_has_fast_work(bnapi))
3103                 return 1;
3104
3105         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3106             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3107                 return 1;
3108
3109         return 0;
3110 }
3111
3112 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3113 {
3114         struct status_block *sblk = bnapi->status_blk.msi;
3115         u32 status_attn_bits = sblk->status_attn_bits;
3116         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3117
3118         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3119             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3120
3121                 bnx2_phy_int(bp, bnapi);
3122
3123                 /* This is needed to take care of transient status
3124                  * during link changes.
3125                  */
3126                 REG_WR(bp, BNX2_HC_COMMAND,
3127                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3128                 REG_RD(bp, BNX2_HC_COMMAND);
3129         }
3130 }
3131
3132 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3133                           int work_done, int budget)
3134 {
3135         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3136         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3137
3138         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3139                 bnx2_tx_int(bp, bnapi, 0);
3140
3141         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3142                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3143
3144         return work_done;
3145 }
3146
3147 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3148 {
3149         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3150         struct bnx2 *bp = bnapi->bp;
3151         int work_done = 0;
3152         struct status_block_msix *sblk = bnapi->status_blk.msix;
3153
3154         while (1) {
3155                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3156                 if (unlikely(work_done >= budget))
3157                         break;
3158
3159                 bnapi->last_status_idx = sblk->status_idx;
3160                 /* status idx must be read before checking for more work. */
3161                 rmb();
3162                 if (likely(!bnx2_has_fast_work(bnapi))) {
3163
3164                         netif_rx_complete(bp->dev, napi);
3165                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3166                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3167                                bnapi->last_status_idx);
3168                         break;
3169                 }
3170         }
3171         return work_done;
3172 }
3173
3174 static int bnx2_poll(struct napi_struct *napi, int budget)
3175 {
3176         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3177         struct bnx2 *bp = bnapi->bp;
3178         int work_done = 0;
3179         struct status_block *sblk = bnapi->status_blk.msi;
3180
3181         while (1) {
3182                 bnx2_poll_link(bp, bnapi);
3183
3184                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3185
3186                 if (unlikely(work_done >= budget))
3187                         break;
3188
3189                 /* bnapi->last_status_idx is used below to tell the hw how
3190                  * much work has been processed, so we must read it before
3191                  * checking for more work.
3192                  */
3193                 bnapi->last_status_idx = sblk->status_idx;
3194                 rmb();
3195                 if (likely(!bnx2_has_work(bnapi))) {
3196                         netif_rx_complete(bp->dev, napi);
3197                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3198                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3199                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3200                                        bnapi->last_status_idx);
3201                                 break;
3202                         }
3203                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3204                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3205                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3206                                bnapi->last_status_idx);
3207
3208                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3209                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3210                                bnapi->last_status_idx);
3211                         break;
3212                 }
3213         }
3214
3215         return work_done;
3216 }
3217
3218 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3219  * from set_multicast.
3220  */
3221 static void
3222 bnx2_set_rx_mode(struct net_device *dev)
3223 {
3224         struct bnx2 *bp = netdev_priv(dev);
3225         u32 rx_mode, sort_mode;
3226         int i;
3227
3228         spin_lock_bh(&bp->phy_lock);
3229
3230         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3231                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3232         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3233 #ifdef BCM_VLAN
3234         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3235                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3236 #else
3237         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3238                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3239 #endif
3240         if (dev->flags & IFF_PROMISC) {
3241                 /* Promiscuous mode. */
3242                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3243                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3244                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3245         }
3246         else if (dev->flags & IFF_ALLMULTI) {
3247                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3248                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3249                                0xffffffff);
3250                 }
3251                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3252         }
3253         else {
3254                 /* Accept one or more multicast(s). */
3255                 struct dev_mc_list *mclist;
3256                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3257                 u32 regidx;
3258                 u32 bit;
3259                 u32 crc;
3260
3261                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3262
3263                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3264                      i++, mclist = mclist->next) {
3265
3266                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3267                         bit = crc & 0xff;
3268                         regidx = (bit & 0xe0) >> 5;
3269                         bit &= 0x1f;
3270                         mc_filter[regidx] |= (1 << bit);
3271                 }
3272
3273                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3274                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3275                                mc_filter[i]);
3276                 }
3277
3278                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3279         }
3280
3281         if (rx_mode != bp->rx_mode) {
3282                 bp->rx_mode = rx_mode;
3283                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3284         }
3285
3286         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3287         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3288         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3289
3290         spin_unlock_bh(&bp->phy_lock);
3291 }
3292
3293 static void
3294 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3295         u32 rv2p_proc)
3296 {
3297         int i;
3298         u32 val;
3299
3300         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3301                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3302                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3303                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3304                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3305         }
3306
3307         for (i = 0; i < rv2p_code_len; i += 8) {
3308                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3309                 rv2p_code++;
3310                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3311                 rv2p_code++;
3312
3313                 if (rv2p_proc == RV2P_PROC1) {
3314                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3315                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3316                 }
3317                 else {
3318                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3319                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3320                 }
3321         }
3322
3323         /* Reset the processor, un-stall is done later. */
3324         if (rv2p_proc == RV2P_PROC1) {
3325                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3326         }
3327         else {
3328                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3329         }
3330 }
3331
3332 static int
3333 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3334 {
3335         u32 offset;
3336         u32 val;
3337         int rc;
3338
3339         /* Halt the CPU. */
3340         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3341         val |= cpu_reg->mode_value_halt;
3342         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3343         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3344
3345         /* Load the Text area. */
3346         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3347         if (fw->gz_text) {
3348                 int j;
3349
3350                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3351                                        fw->gz_text_len);
3352                 if (rc < 0)
3353                         return rc;
3354
3355                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3356                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3357                 }
3358         }
3359
3360         /* Load the Data area. */
3361         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3362         if (fw->data) {
3363                 int j;
3364
3365                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3366                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3367                 }
3368         }
3369
3370         /* Load the SBSS area. */
3371         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3372         if (fw->sbss_len) {
3373                 int j;
3374
3375                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3376                         bnx2_reg_wr_ind(bp, offset, 0);
3377                 }
3378         }
3379
3380         /* Load the BSS area. */
3381         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3382         if (fw->bss_len) {
3383                 int j;
3384
3385                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3386                         bnx2_reg_wr_ind(bp, offset, 0);
3387                 }
3388         }
3389
3390         /* Load the Read-Only area. */
3391         offset = cpu_reg->spad_base +
3392                 (fw->rodata_addr - cpu_reg->mips_view_base);
3393         if (fw->rodata) {
3394                 int j;
3395
3396                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3397                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3398                 }
3399         }
3400
3401         /* Clear the pre-fetch instruction. */
3402         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3403         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3404
3405         /* Start the CPU. */
3406         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3407         val &= ~cpu_reg->mode_value_halt;
3408         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3409         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3410
3411         return 0;
3412 }
3413
3414 static int
3415 bnx2_init_cpus(struct bnx2 *bp)
3416 {
3417         struct fw_info *fw;
3418         int rc, rv2p_len;
3419         void *text, *rv2p;
3420
3421         /* Initialize the RV2P processor. */
3422         text = vmalloc(FW_BUF_SIZE);
3423         if (!text)
3424                 return -ENOMEM;
3425         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3426                 rv2p = bnx2_xi_rv2p_proc1;
3427                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3428         } else {
3429                 rv2p = bnx2_rv2p_proc1;
3430                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3431         }
3432         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3433         if (rc < 0)
3434                 goto init_cpu_err;
3435
3436         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3437
3438         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3439                 rv2p = bnx2_xi_rv2p_proc2;
3440                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3441         } else {
3442                 rv2p = bnx2_rv2p_proc2;
3443                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3444         }
3445         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3446         if (rc < 0)
3447                 goto init_cpu_err;
3448
3449         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3450
3451         /* Initialize the RX Processor. */
3452         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3453                 fw = &bnx2_rxp_fw_09;
3454         else
3455                 fw = &bnx2_rxp_fw_06;
3456
3457         fw->text = text;
3458         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3459         if (rc)
3460                 goto init_cpu_err;
3461
3462         /* Initialize the TX Processor. */
3463         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3464                 fw = &bnx2_txp_fw_09;
3465         else
3466                 fw = &bnx2_txp_fw_06;
3467
3468         fw->text = text;
3469         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3470         if (rc)
3471                 goto init_cpu_err;
3472
3473         /* Initialize the TX Patch-up Processor. */
3474         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3475                 fw = &bnx2_tpat_fw_09;
3476         else
3477                 fw = &bnx2_tpat_fw_06;
3478
3479         fw->text = text;
3480         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3481         if (rc)
3482                 goto init_cpu_err;
3483
3484         /* Initialize the Completion Processor. */
3485         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3486                 fw = &bnx2_com_fw_09;
3487         else
3488                 fw = &bnx2_com_fw_06;
3489
3490         fw->text = text;
3491         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3492         if (rc)
3493                 goto init_cpu_err;
3494
3495         /* Initialize the Command Processor. */
3496         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3497                 fw = &bnx2_cp_fw_09;
3498         else
3499                 fw = &bnx2_cp_fw_06;
3500
3501         fw->text = text;
3502         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3503
3504 init_cpu_err:
3505         vfree(text);
3506         return rc;
3507 }
3508
3509 static int
3510 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3511 {
3512         u16 pmcsr;
3513
3514         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3515
3516         switch (state) {
3517         case PCI_D0: {
3518                 u32 val;
3519
3520                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3521                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3522                         PCI_PM_CTRL_PME_STATUS);
3523
3524                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3525                         /* delay required during transition out of D3hot */
3526                         msleep(20);
3527
3528                 val = REG_RD(bp, BNX2_EMAC_MODE);
3529                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3530                 val &= ~BNX2_EMAC_MODE_MPKT;
3531                 REG_WR(bp, BNX2_EMAC_MODE, val);
3532
3533                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3534                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3535                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3536                 break;
3537         }
3538         case PCI_D3hot: {
3539                 int i;
3540                 u32 val, wol_msg;
3541
3542                 if (bp->wol) {
3543                         u32 advertising;
3544                         u8 autoneg;
3545
3546                         autoneg = bp->autoneg;
3547                         advertising = bp->advertising;
3548
3549                         if (bp->phy_port == PORT_TP) {
3550                                 bp->autoneg = AUTONEG_SPEED;
3551                                 bp->advertising = ADVERTISED_10baseT_Half |
3552                                         ADVERTISED_10baseT_Full |
3553                                         ADVERTISED_100baseT_Half |
3554                                         ADVERTISED_100baseT_Full |
3555                                         ADVERTISED_Autoneg;
3556                         }
3557
3558                         spin_lock_bh(&bp->phy_lock);
3559                         bnx2_setup_phy(bp, bp->phy_port);
3560                         spin_unlock_bh(&bp->phy_lock);
3561
3562                         bp->autoneg = autoneg;
3563                         bp->advertising = advertising;
3564
3565                         bnx2_set_mac_addr(bp);
3566
3567                         val = REG_RD(bp, BNX2_EMAC_MODE);
3568
3569                         /* Enable port mode. */
3570                         val &= ~BNX2_EMAC_MODE_PORT;
3571                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3572                                BNX2_EMAC_MODE_ACPI_RCVD |
3573                                BNX2_EMAC_MODE_MPKT;
3574                         if (bp->phy_port == PORT_TP)
3575                                 val |= BNX2_EMAC_MODE_PORT_MII;
3576                         else {
3577                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3578                                 if (bp->line_speed == SPEED_2500)
3579                                         val |= BNX2_EMAC_MODE_25G_MODE;
3580                         }
3581
3582                         REG_WR(bp, BNX2_EMAC_MODE, val);
3583
3584                         /* receive all multicast */
3585                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3586                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3587                                        0xffffffff);
3588                         }
3589                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3590                                BNX2_EMAC_RX_MODE_SORT_MODE);
3591
3592                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3593                               BNX2_RPM_SORT_USER0_MC_EN;
3594                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3595                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3596                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3597                                BNX2_RPM_SORT_USER0_ENA);
3598
3599                         /* Need to enable EMAC and RPM for WOL. */
3600                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3601                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3602                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3603                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3604
3605                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3606                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3607                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3608
3609                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3610                 }
3611                 else {
3612                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3613                 }
3614
3615                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3616                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3617                                      1, 0);
3618
3619                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3620                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3621                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3622
3623                         if (bp->wol)
3624                                 pmcsr |= 3;
3625                 }
3626                 else {
3627                         pmcsr |= 3;
3628                 }
3629                 if (bp->wol) {
3630                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3631                 }
3632                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3633                                       pmcsr);
3634
3635                 /* No more memory access after this point until
3636                  * device is brought back to D0.
3637                  */
3638                 udelay(50);
3639                 break;
3640         }
3641         default:
3642                 return -EINVAL;
3643         }
3644         return 0;
3645 }
3646
3647 static int
3648 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3649 {
3650         u32 val;
3651         int j;
3652
3653         /* Request access to the flash interface. */
3654         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3655         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3656                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3657                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3658                         break;
3659
3660                 udelay(5);
3661         }
3662
3663         if (j >= NVRAM_TIMEOUT_COUNT)
3664                 return -EBUSY;
3665
3666         return 0;
3667 }
3668
3669 static int
3670 bnx2_release_nvram_lock(struct bnx2 *bp)
3671 {
3672         int j;
3673         u32 val;
3674
3675         /* Relinquish nvram interface. */
3676         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3677
3678         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3679                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3680                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3681                         break;
3682
3683                 udelay(5);
3684         }
3685
3686         if (j >= NVRAM_TIMEOUT_COUNT)
3687                 return -EBUSY;
3688
3689         return 0;
3690 }
3691
3692
3693 static int
3694 bnx2_enable_nvram_write(struct bnx2 *bp)
3695 {
3696         u32 val;
3697
3698         val = REG_RD(bp, BNX2_MISC_CFG);
3699         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3700
3701         if (bp->flash_info->flags & BNX2_NV_WREN) {
3702                 int j;
3703
3704                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3705                 REG_WR(bp, BNX2_NVM_COMMAND,
3706                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3707
3708                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3709                         udelay(5);
3710
3711                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3712                         if (val & BNX2_NVM_COMMAND_DONE)
3713                                 break;
3714                 }
3715
3716                 if (j >= NVRAM_TIMEOUT_COUNT)
3717                         return -EBUSY;
3718         }
3719         return 0;
3720 }
3721
3722 static void
3723 bnx2_disable_nvram_write(struct bnx2 *bp)
3724 {
3725         u32 val;
3726
3727         val = REG_RD(bp, BNX2_MISC_CFG);
3728         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3729 }
3730
3731
3732 static void
3733 bnx2_enable_nvram_access(struct bnx2 *bp)
3734 {
3735         u32 val;
3736
3737         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3738         /* Enable both bits, even on read. */
3739         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3740                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3741 }
3742
3743 static void
3744 bnx2_disable_nvram_access(struct bnx2 *bp)
3745 {
3746         u32 val;
3747
3748         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3749         /* Disable both bits, even after read. */
3750         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3751                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3752                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3753 }
3754
3755 static int
3756 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3757 {
3758         u32 cmd;
3759         int j;
3760
3761         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3762                 /* Buffered flash, no erase needed */
3763                 return 0;
3764
3765         /* Build an erase command */
3766         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3767               BNX2_NVM_COMMAND_DOIT;
3768
3769         /* Need to clear DONE bit separately. */
3770         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3771
3772         /* Address of the NVRAM to read from. */
3773         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3774
3775         /* Issue an erase command. */
3776         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3777
3778         /* Wait for completion. */
3779         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3780                 u32 val;
3781
3782                 udelay(5);
3783
3784                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3785                 if (val & BNX2_NVM_COMMAND_DONE)
3786                         break;
3787         }
3788
3789         if (j >= NVRAM_TIMEOUT_COUNT)
3790                 return -EBUSY;
3791
3792         return 0;
3793 }
3794
3795 static int
3796 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3797 {
3798         u32 cmd;
3799         int j;
3800
3801         /* Build the command word. */
3802         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3803
3804         /* Calculate an offset of a buffered flash, not needed for 5709. */
3805         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3806                 offset = ((offset / bp->flash_info->page_size) <<
3807                            bp->flash_info->page_bits) +
3808                           (offset % bp->flash_info->page_size);
3809         }
3810
3811         /* Need to clear DONE bit separately. */
3812         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3813
3814         /* Address of the NVRAM to read from. */
3815         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3816
3817         /* Issue a read command. */
3818         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3819
3820         /* Wait for completion. */
3821         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3822                 u32 val;
3823
3824                 udelay(5);
3825
3826                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3827                 if (val & BNX2_NVM_COMMAND_DONE) {
3828                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3829                         memcpy(ret_val, &v, 4);
3830                         break;
3831                 }
3832         }
3833         if (j >= NVRAM_TIMEOUT_COUNT)
3834                 return -EBUSY;
3835
3836         return 0;
3837 }
3838
3839
3840 static int
3841 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3842 {
3843         u32 cmd;
3844         __be32 val32;
3845         int j;
3846
3847         /* Build the command word. */
3848         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3849
3850         /* Calculate an offset of a buffered flash, not needed for 5709. */
3851         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3852                 offset = ((offset / bp->flash_info->page_size) <<
3853                           bp->flash_info->page_bits) +
3854                          (offset % bp->flash_info->page_size);
3855         }
3856
3857         /* Need to clear DONE bit separately. */
3858         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3859
3860         memcpy(&val32, val, 4);
3861
3862         /* Write the data. */
3863         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3864
3865         /* Address of the NVRAM to write to. */
3866         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3867
3868         /* Issue the write command. */
3869         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3870
3871         /* Wait for completion. */
3872         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3873                 udelay(5);
3874
3875                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3876                         break;
3877         }
3878         if (j >= NVRAM_TIMEOUT_COUNT)
3879                 return -EBUSY;
3880
3881         return 0;
3882 }
3883
3884 static int
3885 bnx2_init_nvram(struct bnx2 *bp)
3886 {
3887         u32 val;
3888         int j, entry_count, rc = 0;
3889         struct flash_spec *flash;
3890
3891         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3892                 bp->flash_info = &flash_5709;
3893                 goto get_flash_size;
3894         }
3895
3896         /* Determine the selected interface. */
3897         val = REG_RD(bp, BNX2_NVM_CFG1);
3898
3899         entry_count = ARRAY_SIZE(flash_table);
3900
3901         if (val & 0x40000000) {
3902
3903                 /* Flash interface has been reconfigured */
3904                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3905                      j++, flash++) {
3906                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3907                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3908                                 bp->flash_info = flash;
3909                                 break;
3910                         }
3911                 }
3912         }
3913         else {
3914                 u32 mask;
3915                 /* Not yet been reconfigured */
3916
3917                 if (val & (1 << 23))
3918                         mask = FLASH_BACKUP_STRAP_MASK;
3919                 else
3920                         mask = FLASH_STRAP_MASK;
3921
3922                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3923                         j++, flash++) {
3924
3925                         if ((val & mask) == (flash->strapping & mask)) {
3926                                 bp->flash_info = flash;
3927
3928                                 /* Request access to the flash interface. */
3929                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3930                                         return rc;
3931
3932                                 /* Enable access to flash interface */
3933                                 bnx2_enable_nvram_access(bp);
3934
3935                                 /* Reconfigure the flash interface */
3936                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3937                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3938                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3939                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3940
3941                                 /* Disable access to flash interface */
3942                                 bnx2_disable_nvram_access(bp);
3943                                 bnx2_release_nvram_lock(bp);
3944
3945                                 break;
3946                         }
3947                 }
3948         } /* if (val & 0x40000000) */
3949
3950         if (j == entry_count) {
3951                 bp->flash_info = NULL;
3952                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3953                 return -ENODEV;
3954         }
3955
3956 get_flash_size:
3957         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3958         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3959         if (val)
3960                 bp->flash_size = val;
3961         else
3962                 bp->flash_size = bp->flash_info->total_size;
3963
3964         return rc;
3965 }
3966
3967 static int
3968 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3969                 int buf_size)
3970 {
3971         int rc = 0;
3972         u32 cmd_flags, offset32, len32, extra;
3973
3974         if (buf_size == 0)
3975                 return 0;
3976
3977         /* Request access to the flash interface. */
3978         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3979                 return rc;
3980
3981         /* Enable access to flash interface */
3982         bnx2_enable_nvram_access(bp);
3983
3984         len32 = buf_size;
3985         offset32 = offset;
3986         extra = 0;
3987
3988         cmd_flags = 0;
3989
3990         if (offset32 & 3) {
3991                 u8 buf[4];
3992                 u32 pre_len;
3993
3994                 offset32 &= ~3;
3995                 pre_len = 4 - (offset & 3);
3996
3997                 if (pre_len >= len32) {
3998                         pre_len = len32;
3999                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4000                                     BNX2_NVM_COMMAND_LAST;
4001                 }
4002                 else {
4003                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4004                 }
4005
4006                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4007
4008                 if (rc)
4009                         return rc;
4010
4011                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4012
4013                 offset32 += 4;
4014                 ret_buf += pre_len;
4015                 len32 -= pre_len;
4016         }
4017         if (len32 & 3) {
4018                 extra = 4 - (len32 & 3);
4019                 len32 = (len32 + 4) & ~3;
4020         }
4021
4022         if (len32 == 4) {
4023                 u8 buf[4];
4024
4025                 if (cmd_flags)
4026                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4027                 else
4028                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4029                                     BNX2_NVM_COMMAND_LAST;
4030
4031                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4032
4033                 memcpy(ret_buf, buf, 4 - extra);
4034         }
4035         else if (len32 > 0) {
4036                 u8 buf[4];
4037
4038                 /* Read the first word. */
4039                 if (cmd_flags)
4040                         cmd_flags = 0;
4041                 else
4042                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4043
4044                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4045
4046                 /* Advance to the next dword. */
4047                 offset32 += 4;
4048                 ret_buf += 4;
4049                 len32 -= 4;
4050
4051                 while (len32 > 4 && rc == 0) {
4052                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4053
4054                         /* Advance to the next dword. */
4055                         offset32 += 4;
4056                         ret_buf += 4;
4057                         len32 -= 4;
4058                 }
4059
4060                 if (rc)
4061                         return rc;
4062
4063                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4064                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4065
4066                 memcpy(ret_buf, buf, 4 - extra);
4067         }
4068
4069         /* Disable access to flash interface */
4070         bnx2_disable_nvram_access(bp);
4071
4072         bnx2_release_nvram_lock(bp);
4073
4074         return rc;
4075 }
4076
4077 static int
4078 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4079                 int buf_size)
4080 {
4081         u32 written, offset32, len32;
4082         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4083         int rc = 0;
4084         int align_start, align_end;
4085
4086         buf = data_buf;
4087         offset32 = offset;
4088         len32 = buf_size;
4089         align_start = align_end = 0;
4090
4091         if ((align_start = (offset32 & 3))) {
4092                 offset32 &= ~3;
4093                 len32 += align_start;
4094                 if (len32 < 4)
4095                         len32 = 4;
4096                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4097                         return rc;
4098         }
4099
4100         if (len32 & 3) {
4101                 align_end = 4 - (len32 & 3);
4102                 len32 += align_end;
4103                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4104                         return rc;
4105         }
4106
4107         if (align_start || align_end) {
4108                 align_buf = kmalloc(len32, GFP_KERNEL);
4109                 if (align_buf == NULL)
4110                         return -ENOMEM;
4111                 if (align_start) {
4112                         memcpy(align_buf, start, 4);
4113                 }
4114                 if (align_end) {
4115                         memcpy(align_buf + len32 - 4, end, 4);
4116                 }
4117                 memcpy(align_buf + align_start, data_buf, buf_size);
4118                 buf = align_buf;
4119         }
4120
4121         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4122                 flash_buffer = kmalloc(264, GFP_KERNEL);
4123                 if (flash_buffer == NULL) {
4124                         rc = -ENOMEM;
4125                         goto nvram_write_end;
4126                 }
4127         }
4128
4129         written = 0;
4130         while ((written < len32) && (rc == 0)) {
4131                 u32 page_start, page_end, data_start, data_end;
4132                 u32 addr, cmd_flags;
4133                 int i;
4134
4135                 /* Find the page_start addr */
4136                 page_start = offset32 + written;
4137                 page_start -= (page_start % bp->flash_info->page_size);
4138                 /* Find the page_end addr */
4139                 page_end = page_start + bp->flash_info->page_size;
4140                 /* Find the data_start addr */
4141                 data_start = (written == 0) ? offset32 : page_start;
4142                 /* Find the data_end addr */
4143                 data_end = (page_end > offset32 + len32) ?
4144                         (offset32 + len32) : page_end;
4145
4146                 /* Request access to the flash interface. */
4147                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4148                         goto nvram_write_end;
4149
4150                 /* Enable access to flash interface */
4151                 bnx2_enable_nvram_access(bp);
4152
4153                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4154                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4155                         int j;
4156
4157                         /* Read the whole page into the buffer
4158                          * (non-buffer flash only) */
4159                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4160                                 if (j == (bp->flash_info->page_size - 4)) {
4161                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4162                                 }
4163                                 rc = bnx2_nvram_read_dword(bp,
4164                                         page_start + j,
4165                                         &flash_buffer[j],
4166                                         cmd_flags);
4167
4168                                 if (rc)
4169                                         goto nvram_write_end;
4170
4171                                 cmd_flags = 0;
4172                         }
4173                 }
4174
4175                 /* Enable writes to flash interface (unlock write-protect) */
4176                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4177                         goto nvram_write_end;
4178
4179                 /* Loop to write back the buffer data from page_start to
4180                  * data_start */
4181                 i = 0;
4182                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4183                         /* Erase the page */
4184                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4185                                 goto nvram_write_end;
4186
4187                         /* Re-enable the write again for the actual write */
4188                         bnx2_enable_nvram_write(bp);
4189
4190                         for (addr = page_start; addr < data_start;
4191                                 addr += 4, i += 4) {
4192
4193                                 rc = bnx2_nvram_write_dword(bp, addr,
4194                                         &flash_buffer[i], cmd_flags);
4195
4196                                 if (rc != 0)
4197                                         goto nvram_write_end;
4198
4199                                 cmd_flags = 0;
4200                         }
4201                 }
4202
4203                 /* Loop to write the new data from data_start to data_end */
4204                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4205                         if ((addr == page_end - 4) ||
4206                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4207                                  (addr == data_end - 4))) {
4208
4209                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4210                         }
4211                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4212                                 cmd_flags);
4213
4214                         if (rc != 0)
4215                                 goto nvram_write_end;
4216
4217                         cmd_flags = 0;
4218                         buf += 4;
4219                 }
4220
4221                 /* Loop to write back the buffer data from data_end
4222                  * to page_end */
4223                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4224                         for (addr = data_end; addr < page_end;
4225                                 addr += 4, i += 4) {
4226
4227                                 if (addr == page_end-4) {
4228                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4229                                 }
4230                                 rc = bnx2_nvram_write_dword(bp, addr,
4231                                         &flash_buffer[i], cmd_flags);
4232
4233                                 if (rc != 0)
4234                                         goto nvram_write_end;
4235
4236                                 cmd_flags = 0;
4237                         }
4238                 }
4239
4240                 /* Disable writes to flash interface (lock write-protect) */
4241                 bnx2_disable_nvram_write(bp);
4242
4243                 /* Disable access to flash interface */
4244                 bnx2_disable_nvram_access(bp);
4245                 bnx2_release_nvram_lock(bp);
4246
4247                 /* Increment written */
4248                 written += data_end - data_start;
4249         }
4250
4251 nvram_write_end:
4252         kfree(flash_buffer);
4253         kfree(align_buf);
4254         return rc;
4255 }
4256
4257 static void
4258 bnx2_init_fw_cap(struct bnx2 *bp)
4259 {
4260         u32 val, sig = 0;
4261
4262         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4263         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4264
4265         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4266                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4267
4268         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4269         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4270                 return;
4271
4272         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4273                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4274                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4275         }
4276
4277         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4278             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4279                 u32 link;
4280
4281                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4282
4283                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4284                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4285                         bp->phy_port = PORT_FIBRE;
4286                 else
4287                         bp->phy_port = PORT_TP;
4288
4289                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4290                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4291         }
4292
4293         if (netif_running(bp->dev) && sig)
4294                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4295 }
4296
4297 static void
4298 bnx2_setup_msix_tbl(struct bnx2 *bp)
4299 {
4300         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4301
4302         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4303         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4304 }
4305
4306 static int
4307 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4308 {
4309         u32 val;
4310         int i, rc = 0;
4311         u8 old_port;
4312
4313         /* Wait for the current PCI transaction to complete before
4314          * issuing a reset. */
4315         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4316                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4317                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4318                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4319                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4320         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4321         udelay(5);
4322
4323         /* Wait for the firmware to tell us it is ok to issue a reset. */
4324         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4325
4326         /* Deposit a driver reset signature so the firmware knows that
4327          * this is a soft reset. */
4328         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4329                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4330
4331         /* Do a dummy read to force the chip to complete all current transaction
4332          * before we issue a reset. */
4333         val = REG_RD(bp, BNX2_MISC_ID);
4334
4335         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4336                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4337                 REG_RD(bp, BNX2_MISC_COMMAND);
4338                 udelay(5);
4339
4340                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4341                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4342
4343                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4344
4345         } else {
4346                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4347                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4348                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4349
4350                 /* Chip reset. */
4351                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4352
4353                 /* Reading back any register after chip reset will hang the
4354                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4355                  * of margin for write posting.
4356                  */
4357                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4358                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4359                         msleep(20);
4360
4361                 /* Reset takes approximate 30 usec */
4362                 for (i = 0; i < 10; i++) {
4363                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4364                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4365                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4366                                 break;
4367                         udelay(10);
4368                 }
4369
4370                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4371                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4372                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4373                         return -EBUSY;
4374                 }
4375         }
4376
4377         /* Make sure byte swapping is properly configured. */
4378         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4379         if (val != 0x01020304) {
4380                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4381                 return -ENODEV;
4382         }
4383
4384         /* Wait for the firmware to finish its initialization. */
4385         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4386         if (rc)
4387                 return rc;
4388
4389         spin_lock_bh(&bp->phy_lock);
4390         old_port = bp->phy_port;
4391         bnx2_init_fw_cap(bp);
4392         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4393             old_port != bp->phy_port)
4394                 bnx2_set_default_remote_link(bp);
4395         spin_unlock_bh(&bp->phy_lock);
4396
4397         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4398                 /* Adjust the voltage regular to two steps lower.  The default
4399                  * of this register is 0x0000000e. */
4400                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4401
4402                 /* Remove bad rbuf memory from the free pool. */
4403                 rc = bnx2_alloc_bad_rbuf(bp);
4404         }
4405
4406         if (bp->flags & BNX2_FLAG_USING_MSIX)
4407                 bnx2_setup_msix_tbl(bp);
4408
4409         return rc;
4410 }
4411
4412 static int
4413 bnx2_init_chip(struct bnx2 *bp)
4414 {
4415         u32 val;
4416         int rc, i;
4417
4418         /* Make sure the interrupt is not active. */
4419         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4420
4421         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4422               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4423 #ifdef __BIG_ENDIAN
4424               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4425 #endif
4426               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4427               DMA_READ_CHANS << 12 |
4428               DMA_WRITE_CHANS << 16;
4429
4430         val |= (0x2 << 20) | (1 << 11);
4431
4432         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4433                 val |= (1 << 23);
4434
4435         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4436             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4437                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4438
4439         REG_WR(bp, BNX2_DMA_CONFIG, val);
4440
4441         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4442                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4443                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4444                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4445         }
4446
4447         if (bp->flags & BNX2_FLAG_PCIX) {
4448                 u16 val16;
4449
4450                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4451                                      &val16);
4452                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4453                                       val16 & ~PCI_X_CMD_ERO);
4454         }
4455
4456         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4457                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4458                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4459                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4460
4461         /* Initialize context mapping and zero out the quick contexts.  The
4462          * context block must have already been enabled. */
4463         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4464                 rc = bnx2_init_5709_context(bp);
4465                 if (rc)
4466                         return rc;
4467         } else
4468                 bnx2_init_context(bp);
4469
4470         if ((rc = bnx2_init_cpus(bp)) != 0)
4471                 return rc;
4472
4473         bnx2_init_nvram(bp);
4474
4475         bnx2_set_mac_addr(bp);
4476
4477         val = REG_RD(bp, BNX2_MQ_CONFIG);
4478         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4479         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4480         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4481                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4482
4483         REG_WR(bp, BNX2_MQ_CONFIG, val);
4484
4485         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4486         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4487         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4488
4489         val = (BCM_PAGE_BITS - 8) << 24;
4490         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4491
4492         /* Configure page size. */
4493         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4494         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4495         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4496         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4497
4498         val = bp->mac_addr[0] +
4499               (bp->mac_addr[1] << 8) +
4500               (bp->mac_addr[2] << 16) +
4501               bp->mac_addr[3] +
4502               (bp->mac_addr[4] << 8) +
4503               (bp->mac_addr[5] << 16);
4504         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4505
4506         /* Program the MTU.  Also include 4 bytes for CRC32. */
4507         val = bp->dev->mtu + ETH_HLEN + 4;
4508         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4509                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4510         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4511
4512         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4513                 bp->bnx2_napi[i].last_status_idx = 0;
4514
4515         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4516
4517         /* Set up how to generate a link change interrupt. */
4518         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4519
4520         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4521                (u64) bp->status_blk_mapping & 0xffffffff);
4522         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4523
4524         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4525                (u64) bp->stats_blk_mapping & 0xffffffff);
4526         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4527                (u64) bp->stats_blk_mapping >> 32);
4528
4529         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4530                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4531
4532         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4533                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4534
4535         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4536                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4537
4538         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4539
4540         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4541
4542         REG_WR(bp, BNX2_HC_COM_TICKS,
4543                (bp->com_ticks_int << 16) | bp->com_ticks);
4544
4545         REG_WR(bp, BNX2_HC_CMD_TICKS,
4546                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4547
4548         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4549                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4550         else
4551                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4552         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4553
4554         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4555                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4556         else {
4557                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4558                       BNX2_HC_CONFIG_COLLECT_STATS;
4559         }
4560
4561         if (bp->irq_nvecs > 1) {
4562                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4563                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4564
4565                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4566         }
4567
4568         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4569                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4570
4571         REG_WR(bp, BNX2_HC_CONFIG, val);
4572
4573         for (i = 1; i < bp->irq_nvecs; i++) {
4574                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4575                            BNX2_HC_SB_CONFIG_1;
4576
4577                 REG_WR(bp, base,
4578                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4579                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4580                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4581
4582                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4583                         (bp->tx_quick_cons_trip_int << 16) |
4584                          bp->tx_quick_cons_trip);
4585
4586                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4587                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4588
4589                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4590                        (bp->rx_quick_cons_trip_int << 16) |
4591                         bp->rx_quick_cons_trip);
4592
4593                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4594                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4595         }
4596
4597         /* Clear internal stats counters. */
4598         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4599
4600         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4601
4602         /* Initialize the receive filter. */
4603         bnx2_set_rx_mode(bp->dev);
4604
4605         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4606                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4607                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4608                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4609         }
4610         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4611                           1, 0);
4612
4613         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4614         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4615
4616         udelay(20);
4617
4618         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4619
4620         return rc;
4621 }
4622
4623 static void
4624 bnx2_clear_ring_states(struct bnx2 *bp)
4625 {
4626         struct bnx2_napi *bnapi;
4627         struct bnx2_tx_ring_info *txr;
4628         struct bnx2_rx_ring_info *rxr;
4629         int i;
4630
4631         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4632                 bnapi = &bp->bnx2_napi[i];
4633                 txr = &bnapi->tx_ring;
4634                 rxr = &bnapi->rx_ring;
4635
4636                 txr->tx_cons = 0;
4637                 txr->hw_tx_cons = 0;
4638                 rxr->rx_prod_bseq = 0;
4639                 rxr->rx_prod = 0;
4640                 rxr->rx_cons = 0;
4641                 rxr->rx_pg_prod = 0;
4642                 rxr->rx_pg_cons = 0;
4643         }
4644 }
4645
4646 static void
4647 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4648 {
4649         u32 val, offset0, offset1, offset2, offset3;
4650         u32 cid_addr = GET_CID_ADDR(cid);
4651
4652         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4653                 offset0 = BNX2_L2CTX_TYPE_XI;
4654                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4655                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4656                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4657         } else {
4658                 offset0 = BNX2_L2CTX_TYPE;
4659                 offset1 = BNX2_L2CTX_CMD_TYPE;
4660                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4661                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4662         }
4663         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4664         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4665
4666         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4667         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4668
4669         val = (u64) txr->tx_desc_mapping >> 32;
4670         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4671
4672         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4673         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4674 }
4675
4676 static void
4677 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4678 {
4679         struct tx_bd *txbd;
4680         u32 cid = TX_CID;
4681         struct bnx2_napi *bnapi;
4682         struct bnx2_tx_ring_info *txr;
4683
4684         bnapi = &bp->bnx2_napi[ring_num];
4685         txr = &bnapi->tx_ring;
4686
4687         if (ring_num == 0)
4688                 cid = TX_CID;
4689         else
4690                 cid = TX_TSS_CID + ring_num - 1;
4691
4692         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4693
4694         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4695
4696         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4697         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4698
4699         txr->tx_prod = 0;
4700         txr->tx_prod_bseq = 0;
4701
4702         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4703         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4704
4705         bnx2_init_tx_context(bp, cid, txr);
4706 }
4707
4708 static void
4709 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4710                      int num_rings)
4711 {
4712         int i;
4713         struct rx_bd *rxbd;
4714
4715         for (i = 0; i < num_rings; i++) {
4716                 int j;
4717
4718                 rxbd = &rx_ring[i][0];
4719                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4720                         rxbd->rx_bd_len = buf_size;
4721                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4722                 }
4723                 if (i == (num_rings - 1))
4724                         j = 0;
4725                 else
4726                         j = i + 1;
4727                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4728                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4729         }
4730 }
4731
4732 static void
4733 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4734 {
4735         int i;
4736         u16 prod, ring_prod;
4737         u32 cid, rx_cid_addr, val;
4738         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4739         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4740
4741         if (ring_num == 0)
4742                 cid = RX_CID;
4743         else
4744                 cid = RX_RSS_CID + ring_num - 1;
4745
4746         rx_cid_addr = GET_CID_ADDR(cid);
4747
4748         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4749                              bp->rx_buf_use_size, bp->rx_max_ring);
4750
4751         bnx2_init_rx_context(bp, cid);
4752
4753         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4754                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4755                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4756         }
4757
4758         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4759         if (bp->rx_pg_ring_size) {
4760                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4761                                      rxr->rx_pg_desc_mapping,
4762                                      PAGE_SIZE, bp->rx_max_pg_ring);
4763                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4764                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4765                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4766                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4767
4768                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4769                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4770
4771                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4772                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4773
4774                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4775                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4776         }
4777
4778         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4779         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4780
4781         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4782         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4783
4784         ring_prod = prod = rxr->rx_pg_prod;
4785         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4786                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4787                         break;
4788                 prod = NEXT_RX_BD(prod);
4789                 ring_prod = RX_PG_RING_IDX(prod);
4790         }
4791         rxr->rx_pg_prod = prod;
4792
4793         ring_prod = prod = rxr->rx_prod;
4794         for (i = 0; i < bp->rx_ring_size; i++) {
4795                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4796                         break;
4797                 prod = NEXT_RX_BD(prod);
4798                 ring_prod = RX_RING_IDX(prod);
4799         }
4800         rxr->rx_prod = prod;
4801
4802         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4803         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4804         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4805
4806         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4807         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4808
4809         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4810 }
4811
4812 static void
4813 bnx2_init_all_rings(struct bnx2 *bp)
4814 {
4815         int i;
4816         u32 val;
4817
4818         bnx2_clear_ring_states(bp);
4819
4820         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4821         for (i = 0; i < bp->num_tx_rings; i++)
4822                 bnx2_init_tx_ring(bp, i);
4823
4824         if (bp->num_tx_rings > 1)
4825                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4826                        (TX_TSS_CID << 7));
4827
4828         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4829         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4830
4831         for (i = 0; i < bp->num_rx_rings; i++)
4832                 bnx2_init_rx_ring(bp, i);
4833
4834         if (bp->num_rx_rings > 1) {
4835                 u32 tbl_32;
4836                 u8 *tbl = (u8 *) &tbl_32;
4837
4838                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4839                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4840
4841                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4842                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4843                         if ((i % 4) == 3)
4844                                 bnx2_reg_wr_ind(bp,
4845                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4846                                                 cpu_to_be32(tbl_32));
4847                 }
4848
4849                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4850                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4851
4852                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4853
4854         }
4855 }
4856
4857 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4858 {
4859         u32 max, num_rings = 1;
4860
4861         while (ring_size > MAX_RX_DESC_CNT) {
4862                 ring_size -= MAX_RX_DESC_CNT;
4863                 num_rings++;
4864         }
4865         /* round to next power of 2 */
4866         max = max_size;
4867         while ((max & num_rings) == 0)
4868                 max >>= 1;
4869
4870         if (num_rings != max)
4871                 max <<= 1;
4872
4873         return max;
4874 }
4875
4876 static void
4877 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4878 {
4879         u32 rx_size, rx_space, jumbo_size;
4880
4881         /* 8 for CRC and VLAN */
4882         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4883
4884         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4885                 sizeof(struct skb_shared_info);
4886
4887         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4888         bp->rx_pg_ring_size = 0;
4889         bp->rx_max_pg_ring = 0;
4890         bp->rx_max_pg_ring_idx = 0;
4891         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4892                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4893
4894                 jumbo_size = size * pages;
4895                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4896                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4897
4898                 bp->rx_pg_ring_size = jumbo_size;
4899                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4900                                                         MAX_RX_PG_RINGS);
4901                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4902                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4903                 bp->rx_copy_thresh = 0;
4904         }
4905
4906         bp->rx_buf_use_size = rx_size;
4907         /* hw alignment */
4908         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4909         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4910         bp->rx_ring_size = size;
4911         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4912         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4913 }
4914
4915 static void
4916 bnx2_free_tx_skbs(struct bnx2 *bp)
4917 {
4918         int i;
4919
4920         for (i = 0; i < bp->num_tx_rings; i++) {
4921                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4922                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4923                 int j;
4924
4925                 if (txr->tx_buf_ring == NULL)
4926                         continue;
4927
4928                 for (j = 0; j < TX_DESC_CNT; ) {
4929                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4930                         struct sk_buff *skb = tx_buf->skb;
4931                         int k, last;
4932
4933                         if (skb == NULL) {
4934                                 j++;
4935                                 continue;
4936                         }
4937
4938                         pci_unmap_single(bp->pdev,
4939                                          pci_unmap_addr(tx_buf, mapping),
4940                         skb_headlen(skb), PCI_DMA_TODEVICE);
4941
4942                         tx_buf->skb = NULL;
4943
4944                         last = skb_shinfo(skb)->nr_frags;
4945                         for (k = 0; k < last; k++) {
4946                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4947                                 pci_unmap_page(bp->pdev,
4948                                         pci_unmap_addr(tx_buf, mapping),
4949                                         skb_shinfo(skb)->frags[j].size,
4950                                         PCI_DMA_TODEVICE);
4951                         }
4952                         dev_kfree_skb(skb);
4953                         j += k + 1;
4954                 }
4955         }
4956 }
4957
4958 static void
4959 bnx2_free_rx_skbs(struct bnx2 *bp)
4960 {
4961         int i;
4962
4963         for (i = 0; i < bp->num_rx_rings; i++) {
4964                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4965                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4966                 int j;
4967
4968                 if (rxr->rx_buf_ring == NULL)
4969                         return;
4970
4971                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4972                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4973                         struct sk_buff *skb = rx_buf->skb;
4974
4975                         if (skb == NULL)
4976                                 continue;
4977
4978                         pci_unmap_single(bp->pdev,
4979                                          pci_unmap_addr(rx_buf, mapping),
4980                                          bp->rx_buf_use_size,
4981                                          PCI_DMA_FROMDEVICE);
4982
4983                         rx_buf->skb = NULL;
4984
4985                         dev_kfree_skb(skb);
4986                 }
4987                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4988                         bnx2_free_rx_page(bp, rxr, j);
4989         }
4990 }
4991
4992 static void
4993 bnx2_free_skbs(struct bnx2 *bp)
4994 {
4995         bnx2_free_tx_skbs(bp);
4996         bnx2_free_rx_skbs(bp);
4997 }
4998
4999 static int
5000 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5001 {
5002         int rc;
5003
5004         rc = bnx2_reset_chip(bp, reset_code);
5005         bnx2_free_skbs(bp);
5006         if (rc)
5007                 return rc;
5008
5009         if ((rc = bnx2_init_chip(bp)) != 0)
5010                 return rc;
5011
5012         bnx2_init_all_rings(bp);
5013         return 0;
5014 }
5015
5016 static int
5017 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5018 {
5019         int rc;
5020
5021         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5022                 return rc;
5023
5024         spin_lock_bh(&bp->phy_lock);
5025         bnx2_init_phy(bp, reset_phy);
5026         bnx2_set_link(bp);
5027         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5028                 bnx2_remote_phy_event(bp);
5029         spin_unlock_bh(&bp->phy_lock);
5030         return 0;
5031 }
5032
5033 static int
5034 bnx2_test_registers(struct bnx2 *bp)
5035 {
5036         int ret;
5037         int i, is_5709;
5038         static const struct {
5039                 u16   offset;
5040                 u16   flags;
5041 #define BNX2_FL_NOT_5709        1
5042                 u32   rw_mask;
5043                 u32   ro_mask;
5044         } reg_tbl[] = {
5045                 { 0x006c, 0, 0x00000000, 0x0000003f },
5046                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5047                 { 0x0094, 0, 0x00000000, 0x00000000 },
5048
5049                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5050                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5051                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5052                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5053                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5054                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5055                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5056                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5057                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5058
5059                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5060                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5061                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5062                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5063                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5064                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5065
5066                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5067                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5068                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5069
5070                 { 0x1000, 0, 0x00000000, 0x00000001 },
5071                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5072
5073                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5074                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5075                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5076                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5077                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5078                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5079                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5080                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5081                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5082                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5083
5084                 { 0x1800, 0, 0x00000000, 0x00000001 },
5085                 { 0x1804, 0, 0x00000000, 0x00000003 },
5086
5087                 { 0x2800, 0, 0x00000000, 0x00000001 },
5088                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5089                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5090                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5091                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5092                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5093                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5094                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5095                 { 0x2840, 0, 0x00000000, 0xffffffff },
5096                 { 0x2844, 0, 0x00000000, 0xffffffff },
5097                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5098                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5099
5100                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5101                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5102
5103                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5104                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5105                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5106                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5107                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5108                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5109                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5110                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5111                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5112
5113                 { 0x5004, 0, 0x00000000, 0x0000007f },
5114                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5115
5116                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5117                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5118                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5119                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5120                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5121                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5122                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5123                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5124                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5125
5126                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5127                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5128                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5129                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5130                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5131                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5132                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5133                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5134                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5135                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5136                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5137                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5138                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5139                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5140                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5141                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5142                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5143                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5144                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5145                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5146                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5147                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5148                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5149
5150                 { 0xffff, 0, 0x00000000, 0x00000000 },
5151         };
5152
5153         ret = 0;
5154         is_5709 = 0;
5155         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5156                 is_5709 = 1;
5157
5158         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5159                 u32 offset, rw_mask, ro_mask, save_val, val;
5160                 u16 flags = reg_tbl[i].flags;
5161
5162                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5163                         continue;
5164
5165                 offset = (u32) reg_tbl[i].offset;
5166                 rw_mask = reg_tbl[i].rw_mask;
5167                 ro_mask = reg_tbl[i].ro_mask;
5168
5169                 save_val = readl(bp->regview + offset);
5170
5171                 writel(0, bp->regview + offset);
5172
5173                 val = readl(bp->regview + offset);
5174                 if ((val & rw_mask) != 0) {
5175                         goto reg_test_err;
5176                 }
5177
5178                 if ((val & ro_mask) != (save_val & ro_mask)) {
5179                         goto reg_test_err;
5180                 }
5181
5182                 writel(0xffffffff, bp->regview + offset);
5183
5184                 val = readl(bp->regview + offset);
5185                 if ((val & rw_mask) != rw_mask) {
5186                         goto reg_test_err;
5187                 }
5188
5189                 if ((val & ro_mask) != (save_val & ro_mask)) {
5190                         goto reg_test_err;
5191                 }
5192
5193                 writel(save_val, bp->regview + offset);
5194                 continue;
5195
5196 reg_test_err:
5197                 writel(save_val, bp->regview + offset);
5198                 ret = -ENODEV;
5199                 break;
5200         }
5201         return ret;
5202 }
5203
5204 static int
5205 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5206 {
5207         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5208                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5209         int i;
5210
5211         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5212                 u32 offset;
5213
5214                 for (offset = 0; offset < size; offset += 4) {
5215
5216                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5217
5218                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5219                                 test_pattern[i]) {
5220                                 return -ENODEV;
5221                         }
5222                 }
5223         }
5224         return 0;
5225 }
5226
5227 static int
5228 bnx2_test_memory(struct bnx2 *bp)
5229 {
5230         int ret = 0;
5231         int i;
5232         static struct mem_entry {
5233                 u32   offset;
5234                 u32   len;
5235         } mem_tbl_5706[] = {
5236                 { 0x60000,  0x4000 },
5237                 { 0xa0000,  0x3000 },
5238                 { 0xe0000,  0x4000 },
5239                 { 0x120000, 0x4000 },
5240                 { 0x1a0000, 0x4000 },
5241                 { 0x160000, 0x4000 },
5242                 { 0xffffffff, 0    },
5243         },
5244         mem_tbl_5709[] = {
5245                 { 0x60000,  0x4000 },
5246                 { 0xa0000,  0x3000 },
5247                 { 0xe0000,  0x4000 },
5248                 { 0x120000, 0x4000 },
5249                 { 0x1a0000, 0x4000 },
5250                 { 0xffffffff, 0    },
5251         };
5252         struct mem_entry *mem_tbl;
5253
5254         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5255                 mem_tbl = mem_tbl_5709;
5256         else
5257                 mem_tbl = mem_tbl_5706;
5258
5259         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5260                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5261                         mem_tbl[i].len)) != 0) {
5262                         return ret;
5263                 }
5264         }
5265
5266         return ret;
5267 }
5268
5269 #define BNX2_MAC_LOOPBACK       0
5270 #define BNX2_PHY_LOOPBACK       1
5271
5272 static int
5273 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5274 {
5275         unsigned int pkt_size, num_pkts, i;
5276         struct sk_buff *skb, *rx_skb;
5277         unsigned char *packet;
5278         u16 rx_start_idx, rx_idx;
5279         dma_addr_t map;
5280         struct tx_bd *txbd;
5281         struct sw_bd *rx_buf;
5282         struct l2_fhdr *rx_hdr;
5283         int ret = -ENODEV;
5284         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5285         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5286         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5287
5288         tx_napi = bnapi;
5289
5290         txr = &tx_napi->tx_ring;
5291         rxr = &bnapi->rx_ring;
5292         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5293                 bp->loopback = MAC_LOOPBACK;
5294                 bnx2_set_mac_loopback(bp);
5295         }
5296         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5297                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5298                         return 0;
5299
5300                 bp->loopback = PHY_LOOPBACK;
5301                 bnx2_set_phy_loopback(bp);
5302         }
5303         else
5304                 return -EINVAL;
5305
5306         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5307         skb = netdev_alloc_skb(bp->dev, pkt_size);
5308         if (!skb)
5309                 return -ENOMEM;
5310         packet = skb_put(skb, pkt_size);
5311         memcpy(packet, bp->dev->dev_addr, 6);
5312         memset(packet + 6, 0x0, 8);
5313         for (i = 14; i < pkt_size; i++)
5314                 packet[i] = (unsigned char) (i & 0xff);
5315
5316         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5317                 PCI_DMA_TODEVICE);
5318
5319         REG_WR(bp, BNX2_HC_COMMAND,
5320                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5321
5322         REG_RD(bp, BNX2_HC_COMMAND);
5323
5324         udelay(5);
5325         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5326
5327         num_pkts = 0;
5328
5329         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5330
5331         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5332         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5333         txbd->tx_bd_mss_nbytes = pkt_size;
5334         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5335
5336         num_pkts++;
5337         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5338         txr->tx_prod_bseq += pkt_size;
5339
5340         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5341         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5342
5343         udelay(100);
5344
5345         REG_WR(bp, BNX2_HC_COMMAND,
5346                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5347
5348         REG_RD(bp, BNX2_HC_COMMAND);
5349
5350         udelay(5);
5351
5352         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5353         dev_kfree_skb(skb);
5354
5355         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5356                 goto loopback_test_done;
5357
5358         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5359         if (rx_idx != rx_start_idx + num_pkts) {
5360                 goto loopback_test_done;
5361         }
5362
5363         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5364         rx_skb = rx_buf->skb;
5365
5366         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5367         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5368
5369         pci_dma_sync_single_for_cpu(bp->pdev,
5370                 pci_unmap_addr(rx_buf, mapping),
5371                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5372
5373         if (rx_hdr->l2_fhdr_status &
5374                 (L2_FHDR_ERRORS_BAD_CRC |
5375                 L2_FHDR_ERRORS_PHY_DECODE |
5376                 L2_FHDR_ERRORS_ALIGNMENT |
5377                 L2_FHDR_ERRORS_TOO_SHORT |
5378                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5379
5380                 goto loopback_test_done;
5381         }
5382
5383         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5384                 goto loopback_test_done;
5385         }
5386
5387         for (i = 14; i < pkt_size; i++) {
5388                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5389                         goto loopback_test_done;
5390                 }
5391         }
5392
5393         ret = 0;
5394
5395 loopback_test_done:
5396         bp->loopback = 0;
5397         return ret;
5398 }
5399
5400 #define BNX2_MAC_LOOPBACK_FAILED        1
5401 #define BNX2_PHY_LOOPBACK_FAILED        2
5402 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5403                                          BNX2_PHY_LOOPBACK_FAILED)
5404
5405 static int
5406 bnx2_test_loopback(struct bnx2 *bp)
5407 {
5408         int rc = 0;
5409
5410         if (!netif_running(bp->dev))
5411                 return BNX2_LOOPBACK_FAILED;
5412
5413         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5414         spin_lock_bh(&bp->phy_lock);
5415         bnx2_init_phy(bp, 1);
5416         spin_unlock_bh(&bp->phy_lock);
5417         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5418                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5419         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5420                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5421         return rc;
5422 }
5423
5424 #define NVRAM_SIZE 0x200
5425 #define CRC32_RESIDUAL 0xdebb20e3
5426
5427 static int
5428 bnx2_test_nvram(struct bnx2 *bp)
5429 {
5430         __be32 buf[NVRAM_SIZE / 4];
5431         u8 *data = (u8 *) buf;
5432         int rc = 0;
5433         u32 magic, csum;
5434
5435         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5436                 goto test_nvram_done;
5437
5438         magic = be32_to_cpu(buf[0]);
5439         if (magic != 0x669955aa) {
5440                 rc = -ENODEV;
5441                 goto test_nvram_done;
5442         }
5443
5444         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5445                 goto test_nvram_done;
5446
5447         csum = ether_crc_le(0x100, data);
5448         if (csum != CRC32_RESIDUAL) {
5449                 rc = -ENODEV;
5450                 goto test_nvram_done;
5451         }
5452
5453         csum = ether_crc_le(0x100, data + 0x100);
5454         if (csum != CRC32_RESIDUAL) {
5455                 rc = -ENODEV;
5456         }
5457
5458 test_nvram_done:
5459         return rc;
5460 }
5461
5462 static int
5463 bnx2_test_link(struct bnx2 *bp)
5464 {
5465         u32 bmsr;
5466
5467         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5468                 if (bp->link_up)
5469                         return 0;
5470                 return -ENODEV;
5471         }
5472         spin_lock_bh(&bp->phy_lock);
5473         bnx2_enable_bmsr1(bp);
5474         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5475         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5476         bnx2_disable_bmsr1(bp);
5477         spin_unlock_bh(&bp->phy_lock);
5478
5479         if (bmsr & BMSR_LSTATUS) {
5480                 return 0;
5481         }
5482         return -ENODEV;
5483 }
5484
5485 static int
5486 bnx2_test_intr(struct bnx2 *bp)
5487 {
5488         int i;
5489         u16 status_idx;
5490
5491         if (!netif_running(bp->dev))
5492                 return -ENODEV;
5493
5494         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5495
5496         /* This register is not touched during run-time. */
5497         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5498         REG_RD(bp, BNX2_HC_COMMAND);
5499
5500         for (i = 0; i < 10; i++) {
5501                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5502                         status_idx) {
5503
5504                         break;
5505                 }
5506
5507                 msleep_interruptible(10);
5508         }
5509         if (i < 10)
5510                 return 0;
5511
5512         return -ENODEV;
5513 }
5514
5515 /* Determining link for parallel detection. */
5516 static int
5517 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5518 {
5519         u32 mode_ctl, an_dbg, exp;
5520
5521         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5522                 return 0;
5523
5524         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5525         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5526
5527         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5528                 return 0;
5529
5530         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5531         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5532         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5533
5534         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5535                 return 0;
5536
5537         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5538         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5539         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5540
5541         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5542                 return 0;
5543
5544         return 1;
5545 }
5546
5547 static void
5548 bnx2_5706_serdes_timer(struct bnx2 *bp)
5549 {
5550         int check_link = 1;
5551
5552         spin_lock(&bp->phy_lock);
5553         if (bp->serdes_an_pending) {
5554                 bp->serdes_an_pending--;
5555                 check_link = 0;
5556         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5557                 u32 bmcr;
5558
5559                 bp->current_interval = bp->timer_interval;
5560
5561                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5562
5563                 if (bmcr & BMCR_ANENABLE) {
5564                         if (bnx2_5706_serdes_has_link(bp)) {
5565                                 bmcr &= ~BMCR_ANENABLE;
5566                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5567                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5568                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5569                         }
5570                 }
5571         }
5572         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5573                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5574                 u32 phy2;
5575
5576                 bnx2_write_phy(bp, 0x17, 0x0f01);
5577                 bnx2_read_phy(bp, 0x15, &phy2);
5578                 if (phy2 & 0x20) {
5579                         u32 bmcr;
5580
5581                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5582                         bmcr |= BMCR_ANENABLE;
5583                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5584
5585                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5586                 }
5587         } else
5588                 bp->current_interval = bp->timer_interval;
5589
5590         if (check_link) {
5591                 u32 val;
5592
5593                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5594                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5595                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5596
5597                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5598                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5599                                 bnx2_5706s_force_link_dn(bp, 1);
5600                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5601                         } else
5602                                 bnx2_set_link(bp);
5603                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5604                         bnx2_set_link(bp);
5605         }
5606         spin_unlock(&bp->phy_lock);
5607 }
5608
5609 static void
5610 bnx2_5708_serdes_timer(struct bnx2 *bp)
5611 {
5612         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5613                 return;
5614
5615         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5616                 bp->serdes_an_pending = 0;
5617                 return;
5618         }
5619
5620         spin_lock(&bp->phy_lock);
5621         if (bp->serdes_an_pending)
5622                 bp->serdes_an_pending--;
5623         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5624                 u32 bmcr;
5625
5626                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5627                 if (bmcr & BMCR_ANENABLE) {
5628                         bnx2_enable_forced_2g5(bp);
5629                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5630                 } else {
5631                         bnx2_disable_forced_2g5(bp);
5632                         bp->serdes_an_pending = 2;
5633                         bp->current_interval = bp->timer_interval;
5634                 }
5635
5636         } else
5637                 bp->current_interval = bp->timer_interval;
5638
5639         spin_unlock(&bp->phy_lock);
5640 }
5641
5642 static void
5643 bnx2_timer(unsigned long data)
5644 {
5645         struct bnx2 *bp = (struct bnx2 *) data;
5646
5647         if (!netif_running(bp->dev))
5648                 return;
5649
5650         if (atomic_read(&bp->intr_sem) != 0)
5651                 goto bnx2_restart_timer;
5652
5653         bnx2_send_heart_beat(bp);
5654
5655         bp->stats_blk->stat_FwRxDrop =
5656                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5657
5658         /* workaround occasional corrupted counters */
5659         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5660                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5661                                             BNX2_HC_COMMAND_STATS_NOW);
5662
5663         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5664                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5665                         bnx2_5706_serdes_timer(bp);
5666                 else
5667                         bnx2_5708_serdes_timer(bp);
5668         }
5669
5670 bnx2_restart_timer:
5671         mod_timer(&bp->timer, jiffies + bp->current_interval);
5672 }
5673
5674 static int
5675 bnx2_request_irq(struct bnx2 *bp)
5676 {
5677         unsigned long flags;
5678         struct bnx2_irq *irq;
5679         int rc = 0, i;
5680
5681         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5682                 flags = 0;
5683         else
5684                 flags = IRQF_SHARED;
5685
5686         for (i = 0; i < bp->irq_nvecs; i++) {
5687                 irq = &bp->irq_tbl[i];
5688                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5689                                  &bp->bnx2_napi[i]);
5690                 if (rc)
5691                         break;
5692                 irq->requested = 1;
5693         }
5694         return rc;
5695 }
5696
5697 static void
5698 bnx2_free_irq(struct bnx2 *bp)
5699 {
5700         struct bnx2_irq *irq;
5701         int i;
5702
5703         for (i = 0; i < bp->irq_nvecs; i++) {
5704                 irq = &bp->irq_tbl[i];
5705                 if (irq->requested)
5706                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5707                 irq->requested = 0;
5708         }
5709         if (bp->flags & BNX2_FLAG_USING_MSI)
5710                 pci_disable_msi(bp->pdev);
5711         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5712                 pci_disable_msix(bp->pdev);
5713
5714         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5715 }
5716
5717 static void
5718 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5719 {
5720         int i, rc;
5721         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5722
5723         bnx2_setup_msix_tbl(bp);
5724         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5725         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5726         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5727
5728         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5729                 msix_ent[i].entry = i;
5730                 msix_ent[i].vector = 0;
5731
5732                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5733                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5734         }
5735
5736         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5737         if (rc != 0)
5738                 return;
5739
5740         bp->irq_nvecs = msix_vecs;
5741         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5742         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5743                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5744 }
5745
5746 static void
5747 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5748 {
5749         int cpus = num_online_cpus();
5750         int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5751
5752         bp->irq_tbl[0].handler = bnx2_interrupt;
5753         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5754         bp->irq_nvecs = 1;
5755         bp->irq_tbl[0].vector = bp->pdev->irq;
5756
5757         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5758                 bnx2_enable_msix(bp, msix_vecs);
5759
5760         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5761             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5762                 if (pci_enable_msi(bp->pdev) == 0) {
5763                         bp->flags |= BNX2_FLAG_USING_MSI;
5764                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5765                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5766                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5767                         } else
5768                                 bp->irq_tbl[0].handler = bnx2_msi;
5769
5770                         bp->irq_tbl[0].vector = bp->pdev->irq;
5771                 }
5772         }
5773         bp->num_tx_rings = 1;
5774         bp->num_rx_rings = bp->irq_nvecs;
5775 }
5776
5777 /* Called with rtnl_lock */
5778 static int
5779 bnx2_open(struct net_device *dev)
5780 {
5781         struct bnx2 *bp = netdev_priv(dev);
5782         int rc;
5783
5784         netif_carrier_off(dev);
5785
5786         bnx2_set_power_state(bp, PCI_D0);
5787         bnx2_disable_int(bp);
5788
5789         bnx2_setup_int_mode(bp, disable_msi);
5790         bnx2_napi_enable(bp);
5791         rc = bnx2_alloc_mem(bp);
5792         if (rc)
5793                 goto open_err;
5794
5795         rc = bnx2_request_irq(bp);
5796         if (rc)
5797                 goto open_err;
5798
5799         rc = bnx2_init_nic(bp, 1);
5800         if (rc)
5801                 goto open_err;
5802
5803         mod_timer(&bp->timer, jiffies + bp->current_interval);
5804
5805         atomic_set(&bp->intr_sem, 0);
5806
5807         bnx2_enable_int(bp);
5808
5809         if (bp->flags & BNX2_FLAG_USING_MSI) {
5810                 /* Test MSI to make sure it is working
5811                  * If MSI test fails, go back to INTx mode
5812                  */
5813                 if (bnx2_test_intr(bp) != 0) {
5814                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5815                                " using MSI, switching to INTx mode. Please"
5816                                " report this failure to the PCI maintainer"
5817                                " and include system chipset information.\n",
5818                                bp->dev->name);
5819
5820                         bnx2_disable_int(bp);
5821                         bnx2_free_irq(bp);
5822
5823                         bnx2_setup_int_mode(bp, 1);
5824
5825                         rc = bnx2_init_nic(bp, 0);
5826
5827                         if (!rc)
5828                                 rc = bnx2_request_irq(bp);
5829
5830                         if (rc) {
5831                                 del_timer_sync(&bp->timer);
5832                                 goto open_err;
5833                         }
5834                         bnx2_enable_int(bp);
5835                 }
5836         }
5837         if (bp->flags & BNX2_FLAG_USING_MSI)
5838                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5839         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5840                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5841
5842         netif_start_queue(dev);
5843
5844         return 0;
5845
5846 open_err:
5847         bnx2_napi_disable(bp);
5848         bnx2_free_skbs(bp);
5849         bnx2_free_irq(bp);
5850         bnx2_free_mem(bp);
5851         return rc;
5852 }
5853
5854 static void
5855 bnx2_reset_task(struct work_struct *work)
5856 {
5857         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5858
5859         if (!netif_running(bp->dev))
5860                 return;
5861
5862         bnx2_netif_stop(bp);
5863
5864         bnx2_init_nic(bp, 1);
5865
5866         atomic_set(&bp->intr_sem, 1);
5867         bnx2_netif_start(bp);
5868 }
5869
5870 static void
5871 bnx2_tx_timeout(struct net_device *dev)
5872 {
5873         struct bnx2 *bp = netdev_priv(dev);
5874
5875         /* This allows the netif to be shutdown gracefully before resetting */
5876         schedule_work(&bp->reset_task);
5877 }
5878
5879 #ifdef BCM_VLAN
5880 /* Called with rtnl_lock */
5881 static void
5882 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5883 {
5884         struct bnx2 *bp = netdev_priv(dev);
5885
5886         bnx2_netif_stop(bp);
5887
5888         bp->vlgrp = vlgrp;
5889         bnx2_set_rx_mode(dev);
5890         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
5891                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
5892
5893         bnx2_netif_start(bp);
5894 }
5895 #endif
5896
5897 /* Called with netif_tx_lock.
5898  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5899  * netif_wake_queue().
5900  */
5901 static int
5902 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5903 {
5904         struct bnx2 *bp = netdev_priv(dev);
5905         dma_addr_t mapping;
5906         struct tx_bd *txbd;
5907         struct sw_bd *tx_buf;
5908         u32 len, vlan_tag_flags, last_frag, mss;
5909         u16 prod, ring_prod;
5910         int i;
5911         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5912         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5913
5914         if (unlikely(bnx2_tx_avail(bp, txr) <
5915             (skb_shinfo(skb)->nr_frags + 1))) {
5916                 netif_stop_queue(dev);
5917                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5918                         dev->name);
5919
5920                 return NETDEV_TX_BUSY;
5921         }
5922         len = skb_headlen(skb);
5923         prod = txr->tx_prod;
5924         ring_prod = TX_RING_IDX(prod);
5925
5926         vlan_tag_flags = 0;
5927         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5928                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5929         }
5930
5931         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5932                 vlan_tag_flags |=
5933                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5934         }
5935         if ((mss = skb_shinfo(skb)->gso_size)) {
5936                 u32 tcp_opt_len, ip_tcp_len;
5937                 struct iphdr *iph;
5938
5939                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5940
5941                 tcp_opt_len = tcp_optlen(skb);
5942
5943                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5944                         u32 tcp_off = skb_transport_offset(skb) -
5945                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5946
5947                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5948                                           TX_BD_FLAGS_SW_FLAGS;
5949                         if (likely(tcp_off == 0))
5950                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5951                         else {
5952                                 tcp_off >>= 3;
5953                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5954                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5955                                                   ((tcp_off & 0x10) <<
5956                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5957                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5958                         }
5959                 } else {
5960                         if (skb_header_cloned(skb) &&
5961                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5962                                 dev_kfree_skb(skb);
5963                                 return NETDEV_TX_OK;
5964                         }
5965
5966                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5967
5968                         iph = ip_hdr(skb);
5969                         iph->check = 0;
5970                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5971                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5972                                                                  iph->daddr, 0,
5973                                                                  IPPROTO_TCP,
5974                                                                  0);
5975                         if (tcp_opt_len || (iph->ihl > 5)) {
5976                                 vlan_tag_flags |= ((iph->ihl - 5) +
5977                                                    (tcp_opt_len >> 2)) << 8;
5978                         }
5979                 }
5980         } else
5981                 mss = 0;
5982
5983         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5984
5985         tx_buf = &txr->tx_buf_ring[ring_prod];
5986         tx_buf->skb = skb;
5987         pci_unmap_addr_set(tx_buf, mapping, mapping);
5988
5989         txbd = &txr->tx_desc_ring[ring_prod];
5990
5991         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5992         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5993         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5994         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5995
5996         last_frag = skb_shinfo(skb)->nr_frags;
5997
5998         for (i = 0; i < last_frag; i++) {
5999                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6000
6001                 prod = NEXT_TX_BD(prod);
6002                 ring_prod = TX_RING_IDX(prod);
6003                 txbd = &txr->tx_desc_ring[ring_prod];
6004
6005                 len = frag->size;
6006                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6007                         len, PCI_DMA_TODEVICE);
6008                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
6009                                 mapping, mapping);
6010
6011                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6012                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6013                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6014                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6015
6016         }
6017         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6018
6019         prod = NEXT_TX_BD(prod);
6020         txr->tx_prod_bseq += skb->len;
6021
6022         REG_WR16(bp, txr->tx_bidx_addr, prod);
6023         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6024
6025         mmiowb();
6026
6027         txr->tx_prod = prod;
6028         dev->trans_start = jiffies;
6029
6030         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6031                 netif_stop_queue(dev);
6032                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6033                         netif_wake_queue(dev);
6034         }
6035
6036         return NETDEV_TX_OK;
6037 }
6038
6039 /* Called with rtnl_lock */
6040 static int
6041 bnx2_close(struct net_device *dev)
6042 {
6043         struct bnx2 *bp = netdev_priv(dev);
6044         u32 reset_code;
6045
6046         cancel_work_sync(&bp->reset_task);
6047
6048         bnx2_disable_int_sync(bp);
6049         bnx2_napi_disable(bp);
6050         del_timer_sync(&bp->timer);
6051         if (bp->flags & BNX2_FLAG_NO_WOL)
6052                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6053         else if (bp->wol)
6054                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6055         else
6056                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6057         bnx2_reset_chip(bp, reset_code);
6058         bnx2_free_irq(bp);
6059         bnx2_free_skbs(bp);
6060         bnx2_free_mem(bp);
6061         bp->link_up = 0;
6062         netif_carrier_off(bp->dev);
6063         bnx2_set_power_state(bp, PCI_D3hot);
6064         return 0;
6065 }
6066
6067 #define GET_NET_STATS64(ctr)                                    \
6068         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6069         (unsigned long) (ctr##_lo)
6070
6071 #define GET_NET_STATS32(ctr)            \
6072         (ctr##_lo)
6073
6074 #if (BITS_PER_LONG == 64)
6075 #define GET_NET_STATS   GET_NET_STATS64
6076 #else
6077 #define GET_NET_STATS   GET_NET_STATS32
6078 #endif
6079
6080 static struct net_device_stats *
6081 bnx2_get_stats(struct net_device *dev)
6082 {
6083         struct bnx2 *bp = netdev_priv(dev);
6084         struct statistics_block *stats_blk = bp->stats_blk;
6085         struct net_device_stats *net_stats = &bp->net_stats;
6086
6087         if (bp->stats_blk == NULL) {
6088                 return net_stats;
6089         }
6090         net_stats->rx_packets =
6091                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6092                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6093                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6094
6095         net_stats->tx_packets =
6096                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6097                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6098                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6099
6100         net_stats->rx_bytes =
6101                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6102
6103         net_stats->tx_bytes =
6104                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6105
6106         net_stats->multicast =
6107                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6108
6109         net_stats->collisions =
6110                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6111
6112         net_stats->rx_length_errors =
6113                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6114                 stats_blk->stat_EtherStatsOverrsizePkts);
6115
6116         net_stats->rx_over_errors =
6117                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6118
6119         net_stats->rx_frame_errors =
6120                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6121
6122         net_stats->rx_crc_errors =
6123                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6124
6125         net_stats->rx_errors = net_stats->rx_length_errors +
6126                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6127                 net_stats->rx_crc_errors;
6128
6129         net_stats->tx_aborted_errors =
6130                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6131                 stats_blk->stat_Dot3StatsLateCollisions);
6132
6133         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6134             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6135                 net_stats->tx_carrier_errors = 0;
6136         else {
6137                 net_stats->tx_carrier_errors =
6138                         (unsigned long)
6139                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6140         }
6141
6142         net_stats->tx_errors =
6143                 (unsigned long)
6144                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6145                 +
6146                 net_stats->tx_aborted_errors +
6147                 net_stats->tx_carrier_errors;
6148
6149         net_stats->rx_missed_errors =
6150                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6151                 stats_blk->stat_FwRxDrop);
6152
6153         return net_stats;
6154 }
6155
6156 /* All ethtool functions called with rtnl_lock */
6157
6158 static int
6159 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6160 {
6161         struct bnx2 *bp = netdev_priv(dev);
6162         int support_serdes = 0, support_copper = 0;
6163
6164         cmd->supported = SUPPORTED_Autoneg;
6165         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6166                 support_serdes = 1;
6167                 support_copper = 1;
6168         } else if (bp->phy_port == PORT_FIBRE)
6169                 support_serdes = 1;
6170         else
6171                 support_copper = 1;
6172
6173         if (support_serdes) {
6174                 cmd->supported |= SUPPORTED_1000baseT_Full |
6175                         SUPPORTED_FIBRE;
6176                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6177                         cmd->supported |= SUPPORTED_2500baseX_Full;
6178
6179         }
6180         if (support_copper) {
6181                 cmd->supported |= SUPPORTED_10baseT_Half |
6182                         SUPPORTED_10baseT_Full |
6183                         SUPPORTED_100baseT_Half |
6184                         SUPPORTED_100baseT_Full |
6185                         SUPPORTED_1000baseT_Full |
6186                         SUPPORTED_TP;
6187
6188         }
6189
6190         spin_lock_bh(&bp->phy_lock);
6191         cmd->port = bp->phy_port;
6192         cmd->advertising = bp->advertising;
6193
6194         if (bp->autoneg & AUTONEG_SPEED) {
6195                 cmd->autoneg = AUTONEG_ENABLE;
6196         }
6197         else {
6198                 cmd->autoneg = AUTONEG_DISABLE;
6199         }
6200
6201         if (netif_carrier_ok(dev)) {
6202                 cmd->speed = bp->line_speed;
6203                 cmd->duplex = bp->duplex;
6204         }
6205         else {
6206                 cmd->speed = -1;
6207                 cmd->duplex = -1;
6208         }
6209         spin_unlock_bh(&bp->phy_lock);
6210
6211         cmd->transceiver = XCVR_INTERNAL;
6212         cmd->phy_address = bp->phy_addr;
6213
6214         return 0;
6215 }
6216
6217 static int
6218 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6219 {
6220         struct bnx2 *bp = netdev_priv(dev);
6221         u8 autoneg = bp->autoneg;
6222         u8 req_duplex = bp->req_duplex;
6223         u16 req_line_speed = bp->req_line_speed;
6224         u32 advertising = bp->advertising;
6225         int err = -EINVAL;
6226
6227         spin_lock_bh(&bp->phy_lock);
6228
6229         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6230                 goto err_out_unlock;
6231
6232         if (cmd->port != bp->phy_port &&
6233             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6234                 goto err_out_unlock;
6235
6236         /* If device is down, we can store the settings only if the user
6237          * is setting the currently active port.
6238          */
6239         if (!netif_running(dev) && cmd->port != bp->phy_port)
6240                 goto err_out_unlock;
6241
6242         if (cmd->autoneg == AUTONEG_ENABLE) {
6243                 autoneg |= AUTONEG_SPEED;
6244
6245                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6246
6247                 /* allow advertising 1 speed */
6248                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6249                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6250                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6251                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6252
6253                         if (cmd->port == PORT_FIBRE)
6254                                 goto err_out_unlock;
6255
6256                         advertising = cmd->advertising;
6257
6258                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6259                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6260                             (cmd->port == PORT_TP))
6261                                 goto err_out_unlock;
6262                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6263                         advertising = cmd->advertising;
6264                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6265                         goto err_out_unlock;
6266                 else {
6267                         if (cmd->port == PORT_FIBRE)
6268                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6269                         else
6270                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6271                 }
6272                 advertising |= ADVERTISED_Autoneg;
6273         }
6274         else {
6275                 if (cmd->port == PORT_FIBRE) {
6276                         if ((cmd->speed != SPEED_1000 &&
6277                              cmd->speed != SPEED_2500) ||
6278                             (cmd->duplex != DUPLEX_FULL))
6279                                 goto err_out_unlock;
6280
6281                         if (cmd->speed == SPEED_2500 &&
6282                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6283                                 goto err_out_unlock;
6284                 }
6285                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6286                         goto err_out_unlock;
6287
6288                 autoneg &= ~AUTONEG_SPEED;
6289                 req_line_speed = cmd->speed;
6290                 req_duplex = cmd->duplex;
6291                 advertising = 0;
6292         }
6293
6294         bp->autoneg = autoneg;
6295         bp->advertising = advertising;
6296         bp->req_line_speed = req_line_speed;
6297         bp->req_duplex = req_duplex;
6298
6299         err = 0;
6300         /* If device is down, the new settings will be picked up when it is
6301          * brought up.
6302          */
6303         if (netif_running(dev))
6304                 err = bnx2_setup_phy(bp, cmd->port);
6305
6306 err_out_unlock:
6307         spin_unlock_bh(&bp->phy_lock);
6308
6309         return err;
6310 }
6311
6312 static void
6313 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6314 {
6315         struct bnx2 *bp = netdev_priv(dev);
6316
6317         strcpy(info->driver, DRV_MODULE_NAME);
6318         strcpy(info->version, DRV_MODULE_VERSION);
6319         strcpy(info->bus_info, pci_name(bp->pdev));
6320         strcpy(info->fw_version, bp->fw_version);
6321 }
6322
6323 #define BNX2_REGDUMP_LEN                (32 * 1024)
6324
6325 static int
6326 bnx2_get_regs_len(struct net_device *dev)
6327 {
6328         return BNX2_REGDUMP_LEN;
6329 }
6330
6331 static void
6332 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6333 {
6334         u32 *p = _p, i, offset;
6335         u8 *orig_p = _p;
6336         struct bnx2 *bp = netdev_priv(dev);
6337         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6338                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6339                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6340                                  0x1040, 0x1048, 0x1080, 0x10a4,
6341                                  0x1400, 0x1490, 0x1498, 0x14f0,
6342                                  0x1500, 0x155c, 0x1580, 0x15dc,
6343                                  0x1600, 0x1658, 0x1680, 0x16d8,
6344                                  0x1800, 0x1820, 0x1840, 0x1854,
6345                                  0x1880, 0x1894, 0x1900, 0x1984,
6346                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6347                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6348                                  0x2000, 0x2030, 0x23c0, 0x2400,
6349                                  0x2800, 0x2820, 0x2830, 0x2850,
6350                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6351                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6352                                  0x4080, 0x4090, 0x43c0, 0x4458,
6353                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6354                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6355                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6356                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6357                                  0x6800, 0x6848, 0x684c, 0x6860,
6358                                  0x6888, 0x6910, 0x8000 };
6359
6360         regs->version = 0;
6361
6362         memset(p, 0, BNX2_REGDUMP_LEN);
6363
6364         if (!netif_running(bp->dev))
6365                 return;
6366
6367         i = 0;
6368         offset = reg_boundaries[0];
6369         p += offset;
6370         while (offset < BNX2_REGDUMP_LEN) {
6371                 *p++ = REG_RD(bp, offset);
6372                 offset += 4;
6373                 if (offset == reg_boundaries[i + 1]) {
6374                         offset = reg_boundaries[i + 2];
6375                         p = (u32 *) (orig_p + offset);
6376                         i += 2;
6377                 }
6378         }
6379 }
6380
6381 static void
6382 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6383 {
6384         struct bnx2 *bp = netdev_priv(dev);
6385
6386         if (bp->flags & BNX2_FLAG_NO_WOL) {
6387                 wol->supported = 0;
6388                 wol->wolopts = 0;
6389         }
6390         else {
6391                 wol->supported = WAKE_MAGIC;
6392                 if (bp->wol)
6393                         wol->wolopts = WAKE_MAGIC;
6394                 else
6395                         wol->wolopts = 0;
6396         }
6397         memset(&wol->sopass, 0, sizeof(wol->sopass));
6398 }
6399
6400 static int
6401 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6402 {
6403         struct bnx2 *bp = netdev_priv(dev);
6404
6405         if (wol->wolopts & ~WAKE_MAGIC)
6406                 return -EINVAL;
6407
6408         if (wol->wolopts & WAKE_MAGIC) {
6409                 if (bp->flags & BNX2_FLAG_NO_WOL)
6410                         return -EINVAL;
6411
6412                 bp->wol = 1;
6413         }
6414         else {
6415                 bp->wol = 0;
6416         }
6417         return 0;
6418 }
6419
6420 static int
6421 bnx2_nway_reset(struct net_device *dev)
6422 {
6423         struct bnx2 *bp = netdev_priv(dev);
6424         u32 bmcr;
6425
6426         if (!(bp->autoneg & AUTONEG_SPEED)) {
6427                 return -EINVAL;
6428         }
6429
6430         spin_lock_bh(&bp->phy_lock);
6431
6432         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6433                 int rc;
6434
6435                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6436                 spin_unlock_bh(&bp->phy_lock);
6437                 return rc;
6438         }
6439
6440         /* Force a link down visible on the other side */
6441         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6442                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6443                 spin_unlock_bh(&bp->phy_lock);
6444
6445                 msleep(20);
6446
6447                 spin_lock_bh(&bp->phy_lock);
6448
6449                 bp->current_interval = SERDES_AN_TIMEOUT;
6450                 bp->serdes_an_pending = 1;
6451                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6452         }
6453
6454         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6455         bmcr &= ~BMCR_LOOPBACK;
6456         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6457
6458         spin_unlock_bh(&bp->phy_lock);
6459
6460         return 0;
6461 }
6462
6463 static int
6464 bnx2_get_eeprom_len(struct net_device *dev)
6465 {
6466         struct bnx2 *bp = netdev_priv(dev);
6467
6468         if (bp->flash_info == NULL)
6469                 return 0;
6470
6471         return (int) bp->flash_size;
6472 }
6473
6474 static int
6475 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6476                 u8 *eebuf)
6477 {
6478         struct bnx2 *bp = netdev_priv(dev);
6479         int rc;
6480
6481         /* parameters already validated in ethtool_get_eeprom */
6482
6483         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6484
6485         return rc;
6486 }
6487
6488 static int
6489 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6490                 u8 *eebuf)
6491 {
6492         struct bnx2 *bp = netdev_priv(dev);
6493         int rc;
6494
6495         /* parameters already validated in ethtool_set_eeprom */
6496
6497         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6498
6499         return rc;
6500 }
6501
6502 static int
6503 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6504 {
6505         struct bnx2 *bp = netdev_priv(dev);
6506
6507         memset(coal, 0, sizeof(struct ethtool_coalesce));
6508
6509         coal->rx_coalesce_usecs = bp->rx_ticks;
6510         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6511         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6512         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6513
6514         coal->tx_coalesce_usecs = bp->tx_ticks;
6515         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6516         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6517         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6518
6519         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6520
6521         return 0;
6522 }
6523
6524 static int
6525 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6526 {
6527         struct bnx2 *bp = netdev_priv(dev);
6528
6529         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6530         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6531
6532         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6533         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6534
6535         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6536         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6537
6538         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6539         if (bp->rx_quick_cons_trip_int > 0xff)
6540                 bp->rx_quick_cons_trip_int = 0xff;
6541
6542         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6543         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6544
6545         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6546         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6547
6548         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6549         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6550
6551         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6552         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6553                 0xff;
6554
6555         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6556         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6557                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6558                         bp->stats_ticks = USEC_PER_SEC;
6559         }
6560         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6561                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6562         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6563
6564         if (netif_running(bp->dev)) {
6565                 bnx2_netif_stop(bp);
6566                 bnx2_init_nic(bp, 0);
6567                 bnx2_netif_start(bp);
6568         }
6569
6570         return 0;
6571 }
6572
6573 static void
6574 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6575 {
6576         struct bnx2 *bp = netdev_priv(dev);
6577
6578         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6579         ering->rx_mini_max_pending = 0;
6580         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6581
6582         ering->rx_pending = bp->rx_ring_size;
6583         ering->rx_mini_pending = 0;
6584         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6585
6586         ering->tx_max_pending = MAX_TX_DESC_CNT;
6587         ering->tx_pending = bp->tx_ring_size;
6588 }
6589
6590 static int
6591 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6592 {
6593         if (netif_running(bp->dev)) {
6594                 bnx2_netif_stop(bp);
6595                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6596                 bnx2_free_skbs(bp);
6597                 bnx2_free_mem(bp);
6598         }
6599
6600         bnx2_set_rx_ring_size(bp, rx);
6601         bp->tx_ring_size = tx;
6602
6603         if (netif_running(bp->dev)) {
6604                 int rc;
6605
6606                 rc = bnx2_alloc_mem(bp);
6607                 if (rc)
6608                         return rc;
6609                 bnx2_init_nic(bp, 0);
6610                 bnx2_netif_start(bp);
6611         }
6612         return 0;
6613 }
6614
6615 static int
6616 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6617 {
6618         struct bnx2 *bp = netdev_priv(dev);
6619         int rc;
6620
6621         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6622                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6623                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6624
6625                 return -EINVAL;
6626         }
6627         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6628         return rc;
6629 }
6630
6631 static void
6632 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6633 {
6634         struct bnx2 *bp = netdev_priv(dev);
6635
6636         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6637         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6638         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6639 }
6640
6641 static int
6642 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6643 {
6644         struct bnx2 *bp = netdev_priv(dev);
6645
6646         bp->req_flow_ctrl = 0;
6647         if (epause->rx_pause)
6648                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6649         if (epause->tx_pause)
6650                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6651
6652         if (epause->autoneg) {
6653                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6654         }
6655         else {
6656                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6657         }
6658
6659         spin_lock_bh(&bp->phy_lock);
6660
6661         bnx2_setup_phy(bp, bp->phy_port);
6662
6663         spin_unlock_bh(&bp->phy_lock);
6664
6665         return 0;
6666 }
6667
6668 static u32
6669 bnx2_get_rx_csum(struct net_device *dev)
6670 {
6671         struct bnx2 *bp = netdev_priv(dev);
6672
6673         return bp->rx_csum;
6674 }
6675
6676 static int
6677 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6678 {
6679         struct bnx2 *bp = netdev_priv(dev);
6680
6681         bp->rx_csum = data;
6682         return 0;
6683 }
6684
6685 static int
6686 bnx2_set_tso(struct net_device *dev, u32 data)
6687 {
6688         struct bnx2 *bp = netdev_priv(dev);
6689
6690         if (data) {
6691                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6692                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6693                         dev->features |= NETIF_F_TSO6;
6694         } else
6695                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6696                                    NETIF_F_TSO_ECN);
6697         return 0;
6698 }
6699
6700 #define BNX2_NUM_STATS 46
6701
6702 static struct {
6703         char string[ETH_GSTRING_LEN];
6704 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6705         { "rx_bytes" },
6706         { "rx_error_bytes" },
6707         { "tx_bytes" },
6708         { "tx_error_bytes" },
6709         { "rx_ucast_packets" },
6710         { "rx_mcast_packets" },
6711         { "rx_bcast_packets" },
6712         { "tx_ucast_packets" },
6713         { "tx_mcast_packets" },
6714         { "tx_bcast_packets" },
6715         { "tx_mac_errors" },
6716         { "tx_carrier_errors" },
6717         { "rx_crc_errors" },
6718         { "rx_align_errors" },
6719         { "tx_single_collisions" },
6720         { "tx_multi_collisions" },
6721         { "tx_deferred" },
6722         { "tx_excess_collisions" },
6723         { "tx_late_collisions" },
6724         { "tx_total_collisions" },
6725         { "rx_fragments" },
6726         { "rx_jabbers" },
6727         { "rx_undersize_packets" },
6728         { "rx_oversize_packets" },
6729         { "rx_64_byte_packets" },
6730         { "rx_65_to_127_byte_packets" },
6731         { "rx_128_to_255_byte_packets" },
6732         { "rx_256_to_511_byte_packets" },
6733         { "rx_512_to_1023_byte_packets" },
6734         { "rx_1024_to_1522_byte_packets" },
6735         { "rx_1523_to_9022_byte_packets" },
6736         { "tx_64_byte_packets" },
6737         { "tx_65_to_127_byte_packets" },
6738         { "tx_128_to_255_byte_packets" },
6739         { "tx_256_to_511_byte_packets" },
6740         { "tx_512_to_1023_byte_packets" },
6741         { "tx_1024_to_1522_byte_packets" },
6742         { "tx_1523_to_9022_byte_packets" },
6743         { "rx_xon_frames" },
6744         { "rx_xoff_frames" },
6745         { "tx_xon_frames" },
6746         { "tx_xoff_frames" },
6747         { "rx_mac_ctrl_frames" },
6748         { "rx_filtered_packets" },
6749         { "rx_discards" },
6750         { "rx_fw_discards" },
6751 };
6752
6753 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6754
6755 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6756     STATS_OFFSET32(stat_IfHCInOctets_hi),
6757     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6758     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6759     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6760     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6761     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6762     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6763     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6764     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6765     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6766     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6767     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6768     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6769     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6770     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6771     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6772     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6773     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6774     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6775     STATS_OFFSET32(stat_EtherStatsCollisions),
6776     STATS_OFFSET32(stat_EtherStatsFragments),
6777     STATS_OFFSET32(stat_EtherStatsJabbers),
6778     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6779     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6780     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6781     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6782     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6783     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6784     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6785     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6786     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6787     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6788     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6789     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6790     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6791     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6792     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6793     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6794     STATS_OFFSET32(stat_XonPauseFramesReceived),
6795     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6796     STATS_OFFSET32(stat_OutXonSent),
6797     STATS_OFFSET32(stat_OutXoffSent),
6798     STATS_OFFSET32(stat_MacControlFramesReceived),
6799     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6800     STATS_OFFSET32(stat_IfInMBUFDiscards),
6801     STATS_OFFSET32(stat_FwRxDrop),
6802 };
6803
6804 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6805  * skipped because of errata.
6806  */
6807 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6808         8,0,8,8,8,8,8,8,8,8,
6809         4,0,4,4,4,4,4,4,4,4,
6810         4,4,4,4,4,4,4,4,4,4,
6811         4,4,4,4,4,4,4,4,4,4,
6812         4,4,4,4,4,4,
6813 };
6814
6815 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6816         8,0,8,8,8,8,8,8,8,8,
6817         4,4,4,4,4,4,4,4,4,4,
6818         4,4,4,4,4,4,4,4,4,4,
6819         4,4,4,4,4,4,4,4,4,4,
6820         4,4,4,4,4,4,
6821 };
6822
6823 #define BNX2_NUM_TESTS 6
6824
6825 static struct {
6826         char string[ETH_GSTRING_LEN];
6827 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6828         { "register_test (offline)" },
6829         { "memory_test (offline)" },
6830         { "loopback_test (offline)" },
6831         { "nvram_test (online)" },
6832         { "interrupt_test (online)" },
6833         { "link_test (online)" },
6834 };
6835
6836 static int
6837 bnx2_get_sset_count(struct net_device *dev, int sset)
6838 {
6839         switch (sset) {
6840         case ETH_SS_TEST:
6841                 return BNX2_NUM_TESTS;
6842         case ETH_SS_STATS:
6843                 return BNX2_NUM_STATS;
6844         default:
6845                 return -EOPNOTSUPP;
6846         }
6847 }
6848
6849 static void
6850 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6851 {
6852         struct bnx2 *bp = netdev_priv(dev);
6853
6854         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6855         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6856                 int i;
6857
6858                 bnx2_netif_stop(bp);
6859                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6860                 bnx2_free_skbs(bp);
6861
6862                 if (bnx2_test_registers(bp) != 0) {
6863                         buf[0] = 1;
6864                         etest->flags |= ETH_TEST_FL_FAILED;
6865                 }
6866                 if (bnx2_test_memory(bp) != 0) {
6867                         buf[1] = 1;
6868                         etest->flags |= ETH_TEST_FL_FAILED;
6869                 }
6870                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6871                         etest->flags |= ETH_TEST_FL_FAILED;
6872
6873                 if (!netif_running(bp->dev)) {
6874                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6875                 }
6876                 else {
6877                         bnx2_init_nic(bp, 1);
6878                         bnx2_netif_start(bp);
6879                 }
6880
6881                 /* wait for link up */
6882                 for (i = 0; i < 7; i++) {
6883                         if (bp->link_up)
6884                                 break;
6885                         msleep_interruptible(1000);
6886                 }
6887         }
6888
6889         if (bnx2_test_nvram(bp) != 0) {
6890                 buf[3] = 1;
6891                 etest->flags |= ETH_TEST_FL_FAILED;
6892         }
6893         if (bnx2_test_intr(bp) != 0) {
6894                 buf[4] = 1;
6895                 etest->flags |= ETH_TEST_FL_FAILED;
6896         }
6897
6898         if (bnx2_test_link(bp) != 0) {
6899                 buf[5] = 1;
6900                 etest->flags |= ETH_TEST_FL_FAILED;
6901
6902         }
6903 }
6904
6905 static void
6906 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6907 {
6908         switch (stringset) {
6909         case ETH_SS_STATS:
6910                 memcpy(buf, bnx2_stats_str_arr,
6911                         sizeof(bnx2_stats_str_arr));
6912                 break;
6913         case ETH_SS_TEST:
6914                 memcpy(buf, bnx2_tests_str_arr,
6915                         sizeof(bnx2_tests_str_arr));
6916                 break;
6917         }
6918 }
6919
6920 static void
6921 bnx2_get_ethtool_stats(struct net_device *dev,
6922                 struct ethtool_stats *stats, u64 *buf)
6923 {
6924         struct bnx2 *bp = netdev_priv(dev);
6925         int i;
6926         u32 *hw_stats = (u32 *) bp->stats_blk;
6927         u8 *stats_len_arr = NULL;
6928
6929         if (hw_stats == NULL) {
6930                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6931                 return;
6932         }
6933
6934         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6935             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6936             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6937             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6938                 stats_len_arr = bnx2_5706_stats_len_arr;
6939         else
6940                 stats_len_arr = bnx2_5708_stats_len_arr;
6941
6942         for (i = 0; i < BNX2_NUM_STATS; i++) {
6943                 if (stats_len_arr[i] == 0) {
6944                         /* skip this counter */
6945                         buf[i] = 0;
6946                         continue;
6947                 }
6948                 if (stats_len_arr[i] == 4) {
6949                         /* 4-byte counter */
6950                         buf[i] = (u64)
6951                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6952                         continue;
6953                 }
6954                 /* 8-byte counter */
6955                 buf[i] = (((u64) *(hw_stats +
6956                                         bnx2_stats_offset_arr[i])) << 32) +
6957                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6958         }
6959 }
6960
6961 static int
6962 bnx2_phys_id(struct net_device *dev, u32 data)
6963 {
6964         struct bnx2 *bp = netdev_priv(dev);
6965         int i;
6966         u32 save;
6967
6968         if (data == 0)
6969                 data = 2;
6970
6971         save = REG_RD(bp, BNX2_MISC_CFG);
6972         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6973
6974         for (i = 0; i < (data * 2); i++) {
6975                 if ((i % 2) == 0) {
6976                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6977                 }
6978                 else {
6979                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6980                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6981                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6982                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6983                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6984                                 BNX2_EMAC_LED_TRAFFIC);
6985                 }
6986                 msleep_interruptible(500);
6987                 if (signal_pending(current))
6988                         break;
6989         }
6990         REG_WR(bp, BNX2_EMAC_LED, 0);
6991         REG_WR(bp, BNX2_MISC_CFG, save);
6992         return 0;
6993 }
6994
6995 static int
6996 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6997 {
6998         struct bnx2 *bp = netdev_priv(dev);
6999
7000         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7001                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7002         else
7003                 return (ethtool_op_set_tx_csum(dev, data));
7004 }
7005
7006 static const struct ethtool_ops bnx2_ethtool_ops = {
7007         .get_settings           = bnx2_get_settings,
7008         .set_settings           = bnx2_set_settings,
7009         .get_drvinfo            = bnx2_get_drvinfo,
7010         .get_regs_len           = bnx2_get_regs_len,
7011         .get_regs               = bnx2_get_regs,
7012         .get_wol                = bnx2_get_wol,
7013         .set_wol                = bnx2_set_wol,
7014         .nway_reset             = bnx2_nway_reset,
7015         .get_link               = ethtool_op_get_link,
7016         .get_eeprom_len         = bnx2_get_eeprom_len,
7017         .get_eeprom             = bnx2_get_eeprom,
7018         .set_eeprom             = bnx2_set_eeprom,
7019         .get_coalesce           = bnx2_get_coalesce,
7020         .set_coalesce           = bnx2_set_coalesce,
7021         .get_ringparam          = bnx2_get_ringparam,
7022         .set_ringparam          = bnx2_set_ringparam,
7023         .get_pauseparam         = bnx2_get_pauseparam,
7024         .set_pauseparam         = bnx2_set_pauseparam,
7025         .get_rx_csum            = bnx2_get_rx_csum,
7026         .set_rx_csum            = bnx2_set_rx_csum,
7027         .set_tx_csum            = bnx2_set_tx_csum,
7028         .set_sg                 = ethtool_op_set_sg,
7029         .set_tso                = bnx2_set_tso,
7030         .self_test              = bnx2_self_test,
7031         .get_strings            = bnx2_get_strings,
7032         .phys_id                = bnx2_phys_id,
7033         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7034         .get_sset_count         = bnx2_get_sset_count,
7035 };
7036
7037 /* Called with rtnl_lock */
7038 static int
7039 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7040 {
7041         struct mii_ioctl_data *data = if_mii(ifr);
7042         struct bnx2 *bp = netdev_priv(dev);
7043         int err;
7044
7045         switch(cmd) {
7046         case SIOCGMIIPHY:
7047                 data->phy_id = bp->phy_addr;
7048
7049                 /* fallthru */
7050         case SIOCGMIIREG: {
7051                 u32 mii_regval;
7052
7053                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7054                         return -EOPNOTSUPP;
7055
7056                 if (!netif_running(dev))
7057                         return -EAGAIN;
7058
7059                 spin_lock_bh(&bp->phy_lock);
7060                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7061                 spin_unlock_bh(&bp->phy_lock);
7062
7063                 data->val_out = mii_regval;
7064
7065                 return err;
7066         }
7067
7068         case SIOCSMIIREG:
7069                 if (!capable(CAP_NET_ADMIN))
7070                         return -EPERM;
7071
7072                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7073                         return -EOPNOTSUPP;
7074
7075                 if (!netif_running(dev))
7076                         return -EAGAIN;
7077
7078                 spin_lock_bh(&bp->phy_lock);
7079                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7080                 spin_unlock_bh(&bp->phy_lock);
7081
7082                 return err;
7083
7084         default:
7085                 /* do nothing */
7086                 break;
7087         }
7088         return -EOPNOTSUPP;
7089 }
7090
7091 /* Called with rtnl_lock */
7092 static int
7093 bnx2_change_mac_addr(struct net_device *dev, void *p)
7094 {
7095         struct sockaddr *addr = p;
7096         struct bnx2 *bp = netdev_priv(dev);
7097
7098         if (!is_valid_ether_addr(addr->sa_data))
7099                 return -EINVAL;
7100
7101         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7102         if (netif_running(dev))
7103                 bnx2_set_mac_addr(bp);
7104
7105         return 0;
7106 }
7107
7108 /* Called with rtnl_lock */
7109 static int
7110 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7111 {
7112         struct bnx2 *bp = netdev_priv(dev);
7113
7114         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7115                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7116                 return -EINVAL;
7117
7118         dev->mtu = new_mtu;
7119         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7120 }
7121
7122 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7123 static void
7124 poll_bnx2(struct net_device *dev)
7125 {
7126         struct bnx2 *bp = netdev_priv(dev);
7127
7128         disable_irq(bp->pdev->irq);
7129         bnx2_interrupt(bp->pdev->irq, dev);
7130         enable_irq(bp->pdev->irq);
7131 }
7132 #endif
7133
7134 static void __devinit
7135 bnx2_get_5709_media(struct bnx2 *bp)
7136 {
7137         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7138         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7139         u32 strap;
7140
7141         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7142                 return;
7143         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7144                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7145                 return;
7146         }
7147
7148         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7149                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7150         else
7151                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7152
7153         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7154                 switch (strap) {
7155                 case 0x4:
7156                 case 0x5:
7157                 case 0x6:
7158                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7159                         return;
7160                 }
7161         } else {
7162                 switch (strap) {
7163                 case 0x1:
7164                 case 0x2:
7165                 case 0x4:
7166                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7167                         return;
7168                 }
7169         }
7170 }
7171
7172 static void __devinit
7173 bnx2_get_pci_speed(struct bnx2 *bp)
7174 {
7175         u32 reg;
7176
7177         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7178         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7179                 u32 clkreg;
7180
7181                 bp->flags |= BNX2_FLAG_PCIX;
7182
7183                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7184
7185                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7186                 switch (clkreg) {
7187                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7188                         bp->bus_speed_mhz = 133;
7189                         break;
7190
7191                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7192                         bp->bus_speed_mhz = 100;
7193                         break;
7194
7195                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7196                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7197                         bp->bus_speed_mhz = 66;
7198                         break;
7199
7200                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7201                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7202                         bp->bus_speed_mhz = 50;
7203                         break;
7204
7205                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7206                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7207                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7208                         bp->bus_speed_mhz = 33;
7209                         break;
7210                 }
7211         }
7212         else {
7213                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7214                         bp->bus_speed_mhz = 66;
7215                 else
7216                         bp->bus_speed_mhz = 33;
7217         }
7218
7219         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7220                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7221
7222 }
7223
7224 static int __devinit
7225 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7226 {
7227         struct bnx2 *bp;
7228         unsigned long mem_len;
7229         int rc, i, j;
7230         u32 reg;
7231         u64 dma_mask, persist_dma_mask;
7232
7233         SET_NETDEV_DEV(dev, &pdev->dev);
7234         bp = netdev_priv(dev);
7235
7236         bp->flags = 0;
7237         bp->phy_flags = 0;
7238
7239         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7240         rc = pci_enable_device(pdev);
7241         if (rc) {
7242                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7243                 goto err_out;
7244         }
7245
7246         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7247                 dev_err(&pdev->dev,
7248                         "Cannot find PCI device base address, aborting.\n");
7249                 rc = -ENODEV;
7250                 goto err_out_disable;
7251         }
7252
7253         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7254         if (rc) {
7255                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7256                 goto err_out_disable;
7257         }
7258
7259         pci_set_master(pdev);
7260         pci_save_state(pdev);
7261
7262         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7263         if (bp->pm_cap == 0) {
7264                 dev_err(&pdev->dev,
7265                         "Cannot find power management capability, aborting.\n");
7266                 rc = -EIO;
7267                 goto err_out_release;
7268         }
7269
7270         bp->dev = dev;
7271         bp->pdev = pdev;
7272
7273         spin_lock_init(&bp->phy_lock);
7274         spin_lock_init(&bp->indirect_lock);
7275         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7276
7277         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7278         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7279         dev->mem_end = dev->mem_start + mem_len;
7280         dev->irq = pdev->irq;
7281
7282         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7283
7284         if (!bp->regview) {
7285                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7286                 rc = -ENOMEM;
7287                 goto err_out_release;
7288         }
7289
7290         /* Configure byte swap and enable write to the reg_window registers.
7291          * Rely on CPU to do target byte swapping on big endian systems
7292          * The chip's target access swapping will not swap all accesses
7293          */
7294         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7295                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7296                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7297
7298         bnx2_set_power_state(bp, PCI_D0);
7299
7300         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7301
7302         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7303                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7304                         dev_err(&pdev->dev,
7305                                 "Cannot find PCIE capability, aborting.\n");
7306                         rc = -EIO;
7307                         goto err_out_unmap;
7308                 }
7309                 bp->flags |= BNX2_FLAG_PCIE;
7310                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7311                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7312         } else {
7313                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7314                 if (bp->pcix_cap == 0) {
7315                         dev_err(&pdev->dev,
7316                                 "Cannot find PCIX capability, aborting.\n");
7317                         rc = -EIO;
7318                         goto err_out_unmap;
7319                 }
7320         }
7321
7322         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7323                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7324                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7325         }
7326
7327         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7328                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7329                         bp->flags |= BNX2_FLAG_MSI_CAP;
7330         }
7331
7332         /* 5708 cannot support DMA addresses > 40-bit.  */
7333         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7334                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7335         else
7336                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7337
7338         /* Configure DMA attributes. */
7339         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7340                 dev->features |= NETIF_F_HIGHDMA;
7341                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7342                 if (rc) {
7343                         dev_err(&pdev->dev,
7344                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7345                         goto err_out_unmap;
7346                 }
7347         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7348                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7349                 goto err_out_unmap;
7350         }
7351
7352         if (!(bp->flags & BNX2_FLAG_PCIE))
7353                 bnx2_get_pci_speed(bp);
7354
7355         /* 5706A0 may falsely detect SERR and PERR. */
7356         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7357                 reg = REG_RD(bp, PCI_COMMAND);
7358                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7359                 REG_WR(bp, PCI_COMMAND, reg);
7360         }
7361         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7362                 !(bp->flags & BNX2_FLAG_PCIX)) {
7363
7364                 dev_err(&pdev->dev,
7365                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7366                 goto err_out_unmap;
7367         }
7368
7369         bnx2_init_nvram(bp);
7370
7371         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7372
7373         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7374             BNX2_SHM_HDR_SIGNATURE_SIG) {
7375                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7376
7377                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7378         } else
7379                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7380
7381         /* Get the permanent MAC address.  First we need to make sure the
7382          * firmware is actually running.
7383          */
7384         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7385
7386         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7387             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7388                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7389                 rc = -ENODEV;
7390                 goto err_out_unmap;
7391         }
7392
7393         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7394         for (i = 0, j = 0; i < 3; i++) {
7395                 u8 num, k, skip0;
7396
7397                 num = (u8) (reg >> (24 - (i * 8)));
7398                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7399                         if (num >= k || !skip0 || k == 1) {
7400                                 bp->fw_version[j++] = (num / k) + '0';
7401                                 skip0 = 0;
7402                         }
7403                 }
7404                 if (i != 2)
7405                         bp->fw_version[j++] = '.';
7406         }
7407         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7408         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7409                 bp->wol = 1;
7410
7411         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7412                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7413
7414                 for (i = 0; i < 30; i++) {
7415                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7416                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7417                                 break;
7418                         msleep(10);
7419                 }
7420         }
7421         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7422         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7423         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7424             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7425                 int i;
7426                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7427
7428                 bp->fw_version[j++] = ' ';
7429                 for (i = 0; i < 3; i++) {
7430                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7431                         reg = swab32(reg);
7432                         memcpy(&bp->fw_version[j], &reg, 4);
7433                         j += 4;
7434                 }
7435         }
7436
7437         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7438         bp->mac_addr[0] = (u8) (reg >> 8);
7439         bp->mac_addr[1] = (u8) reg;
7440
7441         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7442         bp->mac_addr[2] = (u8) (reg >> 24);
7443         bp->mac_addr[3] = (u8) (reg >> 16);
7444         bp->mac_addr[4] = (u8) (reg >> 8);
7445         bp->mac_addr[5] = (u8) reg;
7446
7447         bp->tx_ring_size = MAX_TX_DESC_CNT;
7448         bnx2_set_rx_ring_size(bp, 255);
7449
7450         bp->rx_csum = 1;
7451
7452         bp->tx_quick_cons_trip_int = 20;
7453         bp->tx_quick_cons_trip = 20;
7454         bp->tx_ticks_int = 80;
7455         bp->tx_ticks = 80;
7456
7457         bp->rx_quick_cons_trip_int = 6;
7458         bp->rx_quick_cons_trip = 6;
7459         bp->rx_ticks_int = 18;
7460         bp->rx_ticks = 18;
7461
7462         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7463
7464         bp->timer_interval =  HZ;
7465         bp->current_interval =  HZ;
7466
7467         bp->phy_addr = 1;
7468
7469         /* Disable WOL support if we are running on a SERDES chip. */
7470         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7471                 bnx2_get_5709_media(bp);
7472         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7473                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7474
7475         bp->phy_port = PORT_TP;
7476         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7477                 bp->phy_port = PORT_FIBRE;
7478                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7479                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7480                         bp->flags |= BNX2_FLAG_NO_WOL;
7481                         bp->wol = 0;
7482                 }
7483                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7484                         /* Don't do parallel detect on this board because of
7485                          * some board problems.  The link will not go down
7486                          * if we do parallel detect.
7487                          */
7488                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7489                             pdev->subsystem_device == 0x310c)
7490                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7491                 } else {
7492                         bp->phy_addr = 2;
7493                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7494                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7495                 }
7496         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7497                    CHIP_NUM(bp) == CHIP_NUM_5708)
7498                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7499         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7500                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7501                   CHIP_REV(bp) == CHIP_REV_Bx))
7502                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7503
7504         bnx2_init_fw_cap(bp);
7505
7506         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7507             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7508             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7509                 bp->flags |= BNX2_FLAG_NO_WOL;
7510                 bp->wol = 0;
7511         }
7512
7513         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7514                 bp->tx_quick_cons_trip_int =
7515                         bp->tx_quick_cons_trip;
7516                 bp->tx_ticks_int = bp->tx_ticks;
7517                 bp->rx_quick_cons_trip_int =
7518                         bp->rx_quick_cons_trip;
7519                 bp->rx_ticks_int = bp->rx_ticks;
7520                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7521                 bp->com_ticks_int = bp->com_ticks;
7522                 bp->cmd_ticks_int = bp->cmd_ticks;
7523         }
7524
7525         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7526          *
7527          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7528          * with byte enables disabled on the unused 32-bit word.  This is legal
7529          * but causes problems on the AMD 8132 which will eventually stop
7530          * responding after a while.
7531          *
7532          * AMD believes this incompatibility is unique to the 5706, and
7533          * prefers to locally disable MSI rather than globally disabling it.
7534          */
7535         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7536                 struct pci_dev *amd_8132 = NULL;
7537
7538                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7539                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7540                                                   amd_8132))) {
7541
7542                         if (amd_8132->revision >= 0x10 &&
7543                             amd_8132->revision <= 0x13) {
7544                                 disable_msi = 1;
7545                                 pci_dev_put(amd_8132);
7546                                 break;
7547                         }
7548                 }
7549         }
7550
7551         bnx2_set_default_link(bp);
7552         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7553
7554         init_timer(&bp->timer);
7555         bp->timer.expires = RUN_AT(bp->timer_interval);
7556         bp->timer.data = (unsigned long) bp;
7557         bp->timer.function = bnx2_timer;
7558
7559         return 0;
7560
7561 err_out_unmap:
7562         if (bp->regview) {
7563                 iounmap(bp->regview);
7564                 bp->regview = NULL;
7565         }
7566
7567 err_out_release:
7568         pci_release_regions(pdev);
7569
7570 err_out_disable:
7571         pci_disable_device(pdev);
7572         pci_set_drvdata(pdev, NULL);
7573
7574 err_out:
7575         return rc;
7576 }
7577
7578 static char * __devinit
7579 bnx2_bus_string(struct bnx2 *bp, char *str)
7580 {
7581         char *s = str;
7582
7583         if (bp->flags & BNX2_FLAG_PCIE) {
7584                 s += sprintf(s, "PCI Express");
7585         } else {
7586                 s += sprintf(s, "PCI");
7587                 if (bp->flags & BNX2_FLAG_PCIX)
7588                         s += sprintf(s, "-X");
7589                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7590                         s += sprintf(s, " 32-bit");
7591                 else
7592                         s += sprintf(s, " 64-bit");
7593                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7594         }
7595         return str;
7596 }
7597
7598 static void __devinit
7599 bnx2_init_napi(struct bnx2 *bp)
7600 {
7601         int i;
7602
7603         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7604                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7605                 int (*poll)(struct napi_struct *, int);
7606
7607                 if (i == 0)
7608                         poll = bnx2_poll;
7609                 else
7610                         poll = bnx2_poll_msix;
7611
7612                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7613                 bnapi->bp = bp;
7614         }
7615 }
7616
7617 static int __devinit
7618 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7619 {
7620         static int version_printed = 0;
7621         struct net_device *dev = NULL;
7622         struct bnx2 *bp;
7623         int rc;
7624         char str[40];
7625         DECLARE_MAC_BUF(mac);
7626
7627         if (version_printed++ == 0)
7628                 printk(KERN_INFO "%s", version);
7629
7630         /* dev zeroed in init_etherdev */
7631         dev = alloc_etherdev(sizeof(*bp));
7632
7633         if (!dev)
7634                 return -ENOMEM;
7635
7636         rc = bnx2_init_board(pdev, dev);
7637         if (rc < 0) {
7638                 free_netdev(dev);
7639                 return rc;
7640         }
7641
7642         dev->open = bnx2_open;
7643         dev->hard_start_xmit = bnx2_start_xmit;
7644         dev->stop = bnx2_close;
7645         dev->get_stats = bnx2_get_stats;
7646         dev->set_multicast_list = bnx2_set_rx_mode;
7647         dev->do_ioctl = bnx2_ioctl;
7648         dev->set_mac_address = bnx2_change_mac_addr;
7649         dev->change_mtu = bnx2_change_mtu;
7650         dev->tx_timeout = bnx2_tx_timeout;
7651         dev->watchdog_timeo = TX_TIMEOUT;
7652 #ifdef BCM_VLAN
7653         dev->vlan_rx_register = bnx2_vlan_rx_register;
7654 #endif
7655         dev->ethtool_ops = &bnx2_ethtool_ops;
7656
7657         bp = netdev_priv(dev);
7658         bnx2_init_napi(bp);
7659
7660 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7661         dev->poll_controller = poll_bnx2;
7662 #endif
7663
7664         pci_set_drvdata(pdev, dev);
7665
7666         memcpy(dev->dev_addr, bp->mac_addr, 6);
7667         memcpy(dev->perm_addr, bp->mac_addr, 6);
7668         bp->name = board_info[ent->driver_data].name;
7669
7670         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7671         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7672                 dev->features |= NETIF_F_IPV6_CSUM;
7673
7674 #ifdef BCM_VLAN
7675         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7676 #endif
7677         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7678         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7679                 dev->features |= NETIF_F_TSO6;
7680
7681         if ((rc = register_netdev(dev))) {
7682                 dev_err(&pdev->dev, "Cannot register net device\n");
7683                 if (bp->regview)
7684                         iounmap(bp->regview);
7685                 pci_release_regions(pdev);
7686                 pci_disable_device(pdev);
7687                 pci_set_drvdata(pdev, NULL);
7688                 free_netdev(dev);
7689                 return rc;
7690         }
7691
7692         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7693                 "IRQ %d, node addr %s\n",
7694                 dev->name,
7695                 bp->name,
7696                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7697                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7698                 bnx2_bus_string(bp, str),
7699                 dev->base_addr,
7700                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7701
7702         return 0;
7703 }
7704
7705 static void __devexit
7706 bnx2_remove_one(struct pci_dev *pdev)
7707 {
7708         struct net_device *dev = pci_get_drvdata(pdev);
7709         struct bnx2 *bp = netdev_priv(dev);
7710
7711         flush_scheduled_work();
7712
7713         unregister_netdev(dev);
7714
7715         if (bp->regview)
7716                 iounmap(bp->regview);
7717
7718         free_netdev(dev);
7719         pci_release_regions(pdev);
7720         pci_disable_device(pdev);
7721         pci_set_drvdata(pdev, NULL);
7722 }
7723
7724 static int
7725 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7726 {
7727         struct net_device *dev = pci_get_drvdata(pdev);
7728         struct bnx2 *bp = netdev_priv(dev);
7729         u32 reset_code;
7730
7731         /* PCI register 4 needs to be saved whether netif_running() or not.
7732          * MSI address and data need to be saved if using MSI and
7733          * netif_running().
7734          */
7735         pci_save_state(pdev);
7736         if (!netif_running(dev))
7737                 return 0;
7738
7739         flush_scheduled_work();
7740         bnx2_netif_stop(bp);
7741         netif_device_detach(dev);
7742         del_timer_sync(&bp->timer);
7743         if (bp->flags & BNX2_FLAG_NO_WOL)
7744                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7745         else if (bp->wol)
7746                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7747         else
7748                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7749         bnx2_reset_chip(bp, reset_code);
7750         bnx2_free_skbs(bp);
7751         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7752         return 0;
7753 }
7754
7755 static int
7756 bnx2_resume(struct pci_dev *pdev)
7757 {
7758         struct net_device *dev = pci_get_drvdata(pdev);
7759         struct bnx2 *bp = netdev_priv(dev);
7760
7761         pci_restore_state(pdev);
7762         if (!netif_running(dev))
7763                 return 0;
7764
7765         bnx2_set_power_state(bp, PCI_D0);
7766         netif_device_attach(dev);
7767         bnx2_init_nic(bp, 1);
7768         bnx2_netif_start(bp);
7769         return 0;
7770 }
7771
7772 /**
7773  * bnx2_io_error_detected - called when PCI error is detected
7774  * @pdev: Pointer to PCI device
7775  * @state: The current pci connection state
7776  *
7777  * This function is called after a PCI bus error affecting
7778  * this device has been detected.
7779  */
7780 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7781                                                pci_channel_state_t state)
7782 {
7783         struct net_device *dev = pci_get_drvdata(pdev);
7784         struct bnx2 *bp = netdev_priv(dev);
7785
7786         rtnl_lock();
7787         netif_device_detach(dev);
7788
7789         if (netif_running(dev)) {
7790                 bnx2_netif_stop(bp);
7791                 del_timer_sync(&bp->timer);
7792                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7793         }
7794
7795         pci_disable_device(pdev);
7796         rtnl_unlock();
7797
7798         /* Request a slot slot reset. */
7799         return PCI_ERS_RESULT_NEED_RESET;
7800 }
7801
7802 /**
7803  * bnx2_io_slot_reset - called after the pci bus has been reset.
7804  * @pdev: Pointer to PCI device
7805  *
7806  * Restart the card from scratch, as if from a cold-boot.
7807  */
7808 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7809 {
7810         struct net_device *dev = pci_get_drvdata(pdev);
7811         struct bnx2 *bp = netdev_priv(dev);
7812
7813         rtnl_lock();
7814         if (pci_enable_device(pdev)) {
7815                 dev_err(&pdev->dev,
7816                         "Cannot re-enable PCI device after reset.\n");
7817                 rtnl_unlock();
7818                 return PCI_ERS_RESULT_DISCONNECT;
7819         }
7820         pci_set_master(pdev);
7821         pci_restore_state(pdev);
7822
7823         if (netif_running(dev)) {
7824                 bnx2_set_power_state(bp, PCI_D0);
7825                 bnx2_init_nic(bp, 1);
7826         }
7827
7828         rtnl_unlock();
7829         return PCI_ERS_RESULT_RECOVERED;
7830 }
7831
7832 /**
7833  * bnx2_io_resume - called when traffic can start flowing again.
7834  * @pdev: Pointer to PCI device
7835  *
7836  * This callback is called when the error recovery driver tells us that
7837  * its OK to resume normal operation.
7838  */
7839 static void bnx2_io_resume(struct pci_dev *pdev)
7840 {
7841         struct net_device *dev = pci_get_drvdata(pdev);
7842         struct bnx2 *bp = netdev_priv(dev);
7843
7844         rtnl_lock();
7845         if (netif_running(dev))
7846                 bnx2_netif_start(bp);
7847
7848         netif_device_attach(dev);
7849         rtnl_unlock();
7850 }
7851
7852 static struct pci_error_handlers bnx2_err_handler = {
7853         .error_detected = bnx2_io_error_detected,
7854         .slot_reset     = bnx2_io_slot_reset,
7855         .resume         = bnx2_io_resume,
7856 };
7857
7858 static struct pci_driver bnx2_pci_driver = {
7859         .name           = DRV_MODULE_NAME,
7860         .id_table       = bnx2_pci_tbl,
7861         .probe          = bnx2_init_one,
7862         .remove         = __devexit_p(bnx2_remove_one),
7863         .suspend        = bnx2_suspend,
7864         .resume         = bnx2_resume,
7865         .err_handler    = &bnx2_err_handler,
7866 };
7867
7868 static int __init bnx2_init(void)
7869 {
7870         return pci_register_driver(&bnx2_pci_driver);
7871 }
7872
7873 static void __exit bnx2_cleanup(void)
7874 {
7875         pci_unregister_driver(&bnx2_pci_driver);
7876 }
7877
7878 module_init(bnx2_init);
7879 module_exit(bnx2_cleanup);
7880
7881
7882