bnx2: Put tx ring variables in a separate struct.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.6"
60 #define DRV_MODULE_RELDATE      "May 16, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static int
519 bnx2_alloc_tx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_tx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
526
527                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
528                 if (txr->tx_buf_ring == NULL)
529                         return -ENOMEM;
530
531                 txr->tx_desc_ring =
532                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
533                                              &txr->tx_desc_mapping);
534                 if (txr->tx_desc_ring == NULL)
535                         return -ENOMEM;
536         }
537         return 0;
538 }
539
540 static void
541 bnx2_free_mem(struct bnx2 *bp)
542 {
543         int i;
544
545         bnx2_free_tx_mem(bp);
546
547         for (i = 0; i < bp->ctx_pages; i++) {
548                 if (bp->ctx_blk[i]) {
549                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
550                                             bp->ctx_blk[i],
551                                             bp->ctx_blk_mapping[i]);
552                         bp->ctx_blk[i] = NULL;
553                 }
554         }
555         if (bp->status_blk) {
556                 pci_free_consistent(bp->pdev, bp->status_stats_size,
557                                     bp->status_blk, bp->status_blk_mapping);
558                 bp->status_blk = NULL;
559                 bp->stats_blk = NULL;
560         }
561         for (i = 0; i < bp->rx_max_ring; i++) {
562                 if (bp->rx_desc_ring[i])
563                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
564                                             bp->rx_desc_ring[i],
565                                             bp->rx_desc_mapping[i]);
566                 bp->rx_desc_ring[i] = NULL;
567         }
568         vfree(bp->rx_buf_ring);
569         bp->rx_buf_ring = NULL;
570         for (i = 0; i < bp->rx_max_pg_ring; i++) {
571                 if (bp->rx_pg_desc_ring[i])
572                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
573                                             bp->rx_pg_desc_ring[i],
574                                             bp->rx_pg_desc_mapping[i]);
575                 bp->rx_pg_desc_ring[i] = NULL;
576         }
577         if (bp->rx_pg_ring)
578                 vfree(bp->rx_pg_ring);
579         bp->rx_pg_ring = NULL;
580 }
581
582 static int
583 bnx2_alloc_mem(struct bnx2 *bp)
584 {
585         int i, status_blk_size, err;
586
587         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
588         if (bp->rx_buf_ring == NULL)
589                 goto alloc_mem_err;
590
591         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
592
593         for (i = 0; i < bp->rx_max_ring; i++) {
594                 bp->rx_desc_ring[i] =
595                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
596                                              &bp->rx_desc_mapping[i]);
597                 if (bp->rx_desc_ring[i] == NULL)
598                         goto alloc_mem_err;
599
600         }
601
602         if (bp->rx_pg_ring_size) {
603                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
604                                          bp->rx_max_pg_ring);
605                 if (bp->rx_pg_ring == NULL)
606                         goto alloc_mem_err;
607
608                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
609                        bp->rx_max_pg_ring);
610         }
611
612         for (i = 0; i < bp->rx_max_pg_ring; i++) {
613                 bp->rx_pg_desc_ring[i] =
614                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
615                                              &bp->rx_pg_desc_mapping[i]);
616                 if (bp->rx_pg_desc_ring[i] == NULL)
617                         goto alloc_mem_err;
618
619         }
620
621         /* Combine status and statistics blocks into one allocation. */
622         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
623         if (bp->flags & BNX2_FLAG_MSIX_CAP)
624                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
625                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
626         bp->status_stats_size = status_blk_size +
627                                 sizeof(struct statistics_block);
628
629         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
630                                               &bp->status_blk_mapping);
631         if (bp->status_blk == NULL)
632                 goto alloc_mem_err;
633
634         memset(bp->status_blk, 0, bp->status_stats_size);
635
636         bp->bnx2_napi[0].status_blk = bp->status_blk;
637         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
638                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
639                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
640
641                         bnapi->status_blk_msix = (void *)
642                                 ((unsigned long) bp->status_blk +
643                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
644                         bnapi->int_num = i << 24;
645                 }
646         }
647
648         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
649                                   status_blk_size);
650
651         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
652
653         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
654                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
655                 if (bp->ctx_pages == 0)
656                         bp->ctx_pages = 1;
657                 for (i = 0; i < bp->ctx_pages; i++) {
658                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
659                                                 BCM_PAGE_SIZE,
660                                                 &bp->ctx_blk_mapping[i]);
661                         if (bp->ctx_blk[i] == NULL)
662                                 goto alloc_mem_err;
663                 }
664         }
665
666         err = bnx2_alloc_tx_mem(bp);
667         if (err)
668                 goto alloc_mem_err;
669
670         return 0;
671
672 alloc_mem_err:
673         bnx2_free_mem(bp);
674         return -ENOMEM;
675 }
676
677 static void
678 bnx2_report_fw_link(struct bnx2 *bp)
679 {
680         u32 fw_link_status = 0;
681
682         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
683                 return;
684
685         if (bp->link_up) {
686                 u32 bmsr;
687
688                 switch (bp->line_speed) {
689                 case SPEED_10:
690                         if (bp->duplex == DUPLEX_HALF)
691                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
692                         else
693                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
694                         break;
695                 case SPEED_100:
696                         if (bp->duplex == DUPLEX_HALF)
697                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
698                         else
699                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
700                         break;
701                 case SPEED_1000:
702                         if (bp->duplex == DUPLEX_HALF)
703                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
704                         else
705                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
706                         break;
707                 case SPEED_2500:
708                         if (bp->duplex == DUPLEX_HALF)
709                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
710                         else
711                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
712                         break;
713                 }
714
715                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
716
717                 if (bp->autoneg) {
718                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
719
720                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
721                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
722
723                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
724                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
725                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
726                         else
727                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
728                 }
729         }
730         else
731                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
732
733         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
734 }
735
736 static char *
737 bnx2_xceiver_str(struct bnx2 *bp)
738 {
739         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
740                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
741                  "Copper"));
742 }
743
744 static void
745 bnx2_report_link(struct bnx2 *bp)
746 {
747         if (bp->link_up) {
748                 netif_carrier_on(bp->dev);
749                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
750                        bnx2_xceiver_str(bp));
751
752                 printk("%d Mbps ", bp->line_speed);
753
754                 if (bp->duplex == DUPLEX_FULL)
755                         printk("full duplex");
756                 else
757                         printk("half duplex");
758
759                 if (bp->flow_ctrl) {
760                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
761                                 printk(", receive ");
762                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
763                                         printk("& transmit ");
764                         }
765                         else {
766                                 printk(", transmit ");
767                         }
768                         printk("flow control ON");
769                 }
770                 printk("\n");
771         }
772         else {
773                 netif_carrier_off(bp->dev);
774                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
775                        bnx2_xceiver_str(bp));
776         }
777
778         bnx2_report_fw_link(bp);
779 }
780
781 static void
782 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
783 {
784         u32 local_adv, remote_adv;
785
786         bp->flow_ctrl = 0;
787         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
788                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
789
790                 if (bp->duplex == DUPLEX_FULL) {
791                         bp->flow_ctrl = bp->req_flow_ctrl;
792                 }
793                 return;
794         }
795
796         if (bp->duplex != DUPLEX_FULL) {
797                 return;
798         }
799
800         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
801             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
802                 u32 val;
803
804                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
805                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
806                         bp->flow_ctrl |= FLOW_CTRL_TX;
807                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
808                         bp->flow_ctrl |= FLOW_CTRL_RX;
809                 return;
810         }
811
812         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
813         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
814
815         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
816                 u32 new_local_adv = 0;
817                 u32 new_remote_adv = 0;
818
819                 if (local_adv & ADVERTISE_1000XPAUSE)
820                         new_local_adv |= ADVERTISE_PAUSE_CAP;
821                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
822                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
823                 if (remote_adv & ADVERTISE_1000XPAUSE)
824                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
825                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
826                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
827
828                 local_adv = new_local_adv;
829                 remote_adv = new_remote_adv;
830         }
831
832         /* See Table 28B-3 of 802.3ab-1999 spec. */
833         if (local_adv & ADVERTISE_PAUSE_CAP) {
834                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
835                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
836                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
837                         }
838                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
839                                 bp->flow_ctrl = FLOW_CTRL_RX;
840                         }
841                 }
842                 else {
843                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
844                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
845                         }
846                 }
847         }
848         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
849                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
850                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
851
852                         bp->flow_ctrl = FLOW_CTRL_TX;
853                 }
854         }
855 }
856
857 static int
858 bnx2_5709s_linkup(struct bnx2 *bp)
859 {
860         u32 val, speed;
861
862         bp->link_up = 1;
863
864         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
865         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
866         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
867
868         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
869                 bp->line_speed = bp->req_line_speed;
870                 bp->duplex = bp->req_duplex;
871                 return 0;
872         }
873         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
874         switch (speed) {
875                 case MII_BNX2_GP_TOP_AN_SPEED_10:
876                         bp->line_speed = SPEED_10;
877                         break;
878                 case MII_BNX2_GP_TOP_AN_SPEED_100:
879                         bp->line_speed = SPEED_100;
880                         break;
881                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
882                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
883                         bp->line_speed = SPEED_1000;
884                         break;
885                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
886                         bp->line_speed = SPEED_2500;
887                         break;
888         }
889         if (val & MII_BNX2_GP_TOP_AN_FD)
890                 bp->duplex = DUPLEX_FULL;
891         else
892                 bp->duplex = DUPLEX_HALF;
893         return 0;
894 }
895
896 static int
897 bnx2_5708s_linkup(struct bnx2 *bp)
898 {
899         u32 val;
900
901         bp->link_up = 1;
902         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
903         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
904                 case BCM5708S_1000X_STAT1_SPEED_10:
905                         bp->line_speed = SPEED_10;
906                         break;
907                 case BCM5708S_1000X_STAT1_SPEED_100:
908                         bp->line_speed = SPEED_100;
909                         break;
910                 case BCM5708S_1000X_STAT1_SPEED_1G:
911                         bp->line_speed = SPEED_1000;
912                         break;
913                 case BCM5708S_1000X_STAT1_SPEED_2G5:
914                         bp->line_speed = SPEED_2500;
915                         break;
916         }
917         if (val & BCM5708S_1000X_STAT1_FD)
918                 bp->duplex = DUPLEX_FULL;
919         else
920                 bp->duplex = DUPLEX_HALF;
921
922         return 0;
923 }
924
925 static int
926 bnx2_5706s_linkup(struct bnx2 *bp)
927 {
928         u32 bmcr, local_adv, remote_adv, common;
929
930         bp->link_up = 1;
931         bp->line_speed = SPEED_1000;
932
933         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
934         if (bmcr & BMCR_FULLDPLX) {
935                 bp->duplex = DUPLEX_FULL;
936         }
937         else {
938                 bp->duplex = DUPLEX_HALF;
939         }
940
941         if (!(bmcr & BMCR_ANENABLE)) {
942                 return 0;
943         }
944
945         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
946         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
947
948         common = local_adv & remote_adv;
949         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
950
951                 if (common & ADVERTISE_1000XFULL) {
952                         bp->duplex = DUPLEX_FULL;
953                 }
954                 else {
955                         bp->duplex = DUPLEX_HALF;
956                 }
957         }
958
959         return 0;
960 }
961
962 static int
963 bnx2_copper_linkup(struct bnx2 *bp)
964 {
965         u32 bmcr;
966
967         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
968         if (bmcr & BMCR_ANENABLE) {
969                 u32 local_adv, remote_adv, common;
970
971                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
972                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
973
974                 common = local_adv & (remote_adv >> 2);
975                 if (common & ADVERTISE_1000FULL) {
976                         bp->line_speed = SPEED_1000;
977                         bp->duplex = DUPLEX_FULL;
978                 }
979                 else if (common & ADVERTISE_1000HALF) {
980                         bp->line_speed = SPEED_1000;
981                         bp->duplex = DUPLEX_HALF;
982                 }
983                 else {
984                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
985                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
986
987                         common = local_adv & remote_adv;
988                         if (common & ADVERTISE_100FULL) {
989                                 bp->line_speed = SPEED_100;
990                                 bp->duplex = DUPLEX_FULL;
991                         }
992                         else if (common & ADVERTISE_100HALF) {
993                                 bp->line_speed = SPEED_100;
994                                 bp->duplex = DUPLEX_HALF;
995                         }
996                         else if (common & ADVERTISE_10FULL) {
997                                 bp->line_speed = SPEED_10;
998                                 bp->duplex = DUPLEX_FULL;
999                         }
1000                         else if (common & ADVERTISE_10HALF) {
1001                                 bp->line_speed = SPEED_10;
1002                                 bp->duplex = DUPLEX_HALF;
1003                         }
1004                         else {
1005                                 bp->line_speed = 0;
1006                                 bp->link_up = 0;
1007                         }
1008                 }
1009         }
1010         else {
1011                 if (bmcr & BMCR_SPEED100) {
1012                         bp->line_speed = SPEED_100;
1013                 }
1014                 else {
1015                         bp->line_speed = SPEED_10;
1016                 }
1017                 if (bmcr & BMCR_FULLDPLX) {
1018                         bp->duplex = DUPLEX_FULL;
1019                 }
1020                 else {
1021                         bp->duplex = DUPLEX_HALF;
1022                 }
1023         }
1024
1025         return 0;
1026 }
1027
1028 static void
1029 bnx2_init_rx_context0(struct bnx2 *bp)
1030 {
1031         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1032
1033         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1034         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1035         val |= 0x02 << 8;
1036
1037         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1038                 u32 lo_water, hi_water;
1039
1040                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1042                 else
1043                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1044                 if (lo_water >= bp->rx_ring_size)
1045                         lo_water = 0;
1046
1047                 hi_water = bp->rx_ring_size / 4;
1048
1049                 if (hi_water <= lo_water)
1050                         lo_water = 0;
1051
1052                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1053                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1054
1055                 if (hi_water > 0xf)
1056                         hi_water = 0xf;
1057                 else if (hi_water == 0)
1058                         lo_water = 0;
1059                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1060         }
1061         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1062 }
1063
1064 static int
1065 bnx2_set_mac_link(struct bnx2 *bp)
1066 {
1067         u32 val;
1068
1069         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1070         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1071                 (bp->duplex == DUPLEX_HALF)) {
1072                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1073         }
1074
1075         /* Configure the EMAC mode register. */
1076         val = REG_RD(bp, BNX2_EMAC_MODE);
1077
1078         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1079                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1080                 BNX2_EMAC_MODE_25G_MODE);
1081
1082         if (bp->link_up) {
1083                 switch (bp->line_speed) {
1084                         case SPEED_10:
1085                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1086                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1087                                         break;
1088                                 }
1089                                 /* fall through */
1090                         case SPEED_100:
1091                                 val |= BNX2_EMAC_MODE_PORT_MII;
1092                                 break;
1093                         case SPEED_2500:
1094                                 val |= BNX2_EMAC_MODE_25G_MODE;
1095                                 /* fall through */
1096                         case SPEED_1000:
1097                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1098                                 break;
1099                 }
1100         }
1101         else {
1102                 val |= BNX2_EMAC_MODE_PORT_GMII;
1103         }
1104
1105         /* Set the MAC to operate in the appropriate duplex mode. */
1106         if (bp->duplex == DUPLEX_HALF)
1107                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1108         REG_WR(bp, BNX2_EMAC_MODE, val);
1109
1110         /* Enable/disable rx PAUSE. */
1111         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1112
1113         if (bp->flow_ctrl & FLOW_CTRL_RX)
1114                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1115         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1116
1117         /* Enable/disable tx PAUSE. */
1118         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1119         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1120
1121         if (bp->flow_ctrl & FLOW_CTRL_TX)
1122                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1123         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1124
1125         /* Acknowledge the interrupt. */
1126         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1127
1128         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1129                 bnx2_init_rx_context0(bp);
1130
1131         return 0;
1132 }
1133
1134 static void
1135 bnx2_enable_bmsr1(struct bnx2 *bp)
1136 {
1137         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1138             (CHIP_NUM(bp) == CHIP_NUM_5709))
1139                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1140                                MII_BNX2_BLK_ADDR_GP_STATUS);
1141 }
1142
1143 static void
1144 bnx2_disable_bmsr1(struct bnx2 *bp)
1145 {
1146         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1147             (CHIP_NUM(bp) == CHIP_NUM_5709))
1148                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1149                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1150 }
1151
1152 static int
1153 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1154 {
1155         u32 up1;
1156         int ret = 1;
1157
1158         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1159                 return 0;
1160
1161         if (bp->autoneg & AUTONEG_SPEED)
1162                 bp->advertising |= ADVERTISED_2500baseX_Full;
1163
1164         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1165                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1166
1167         bnx2_read_phy(bp, bp->mii_up1, &up1);
1168         if (!(up1 & BCM5708S_UP1_2G5)) {
1169                 up1 |= BCM5708S_UP1_2G5;
1170                 bnx2_write_phy(bp, bp->mii_up1, up1);
1171                 ret = 0;
1172         }
1173
1174         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177
1178         return ret;
1179 }
1180
1181 static int
1182 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1183 {
1184         u32 up1;
1185         int ret = 0;
1186
1187         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1188                 return 0;
1189
1190         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1192
1193         bnx2_read_phy(bp, bp->mii_up1, &up1);
1194         if (up1 & BCM5708S_UP1_2G5) {
1195                 up1 &= ~BCM5708S_UP1_2G5;
1196                 bnx2_write_phy(bp, bp->mii_up1, up1);
1197                 ret = 1;
1198         }
1199
1200         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1201                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1203
1204         return ret;
1205 }
1206
1207 static void
1208 bnx2_enable_forced_2g5(struct bnx2 *bp)
1209 {
1210         u32 bmcr;
1211
1212         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1213                 return;
1214
1215         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1216                 u32 val;
1217
1218                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1219                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1220                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1221                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1222                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1223                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1224
1225                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1226                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1227                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1228
1229         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1230                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1231                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1232         }
1233
1234         if (bp->autoneg & AUTONEG_SPEED) {
1235                 bmcr &= ~BMCR_ANENABLE;
1236                 if (bp->req_duplex == DUPLEX_FULL)
1237                         bmcr |= BMCR_FULLDPLX;
1238         }
1239         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1240 }
1241
1242 static void
1243 bnx2_disable_forced_2g5(struct bnx2 *bp)
1244 {
1245         u32 bmcr;
1246
1247         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1248                 return;
1249
1250         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1251                 u32 val;
1252
1253                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1254                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1255                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1256                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1257                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1258
1259                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1260                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1261                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1262
1263         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1264                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1265                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1266         }
1267
1268         if (bp->autoneg & AUTONEG_SPEED)
1269                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1270         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1271 }
1272
1273 static void
1274 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1275 {
1276         u32 val;
1277
1278         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1279         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1280         if (start)
1281                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1282         else
1283                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1284 }
1285
1286 static int
1287 bnx2_set_link(struct bnx2 *bp)
1288 {
1289         u32 bmsr;
1290         u8 link_up;
1291
1292         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1293                 bp->link_up = 1;
1294                 return 0;
1295         }
1296
1297         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1298                 return 0;
1299
1300         link_up = bp->link_up;
1301
1302         bnx2_enable_bmsr1(bp);
1303         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1304         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1305         bnx2_disable_bmsr1(bp);
1306
1307         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1308             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1309                 u32 val, an_dbg;
1310
1311                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1312                         bnx2_5706s_force_link_dn(bp, 0);
1313                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1314                 }
1315                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1316
1317                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1318                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1319                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1320
1321                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1322                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1323                         bmsr |= BMSR_LSTATUS;
1324                 else
1325                         bmsr &= ~BMSR_LSTATUS;
1326         }
1327
1328         if (bmsr & BMSR_LSTATUS) {
1329                 bp->link_up = 1;
1330
1331                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1332                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1333                                 bnx2_5706s_linkup(bp);
1334                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1335                                 bnx2_5708s_linkup(bp);
1336                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1337                                 bnx2_5709s_linkup(bp);
1338                 }
1339                 else {
1340                         bnx2_copper_linkup(bp);
1341                 }
1342                 bnx2_resolve_flow_ctrl(bp);
1343         }
1344         else {
1345                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1346                     (bp->autoneg & AUTONEG_SPEED))
1347                         bnx2_disable_forced_2g5(bp);
1348
1349                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1350                         u32 bmcr;
1351
1352                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1353                         bmcr |= BMCR_ANENABLE;
1354                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1355
1356                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1357                 }
1358                 bp->link_up = 0;
1359         }
1360
1361         if (bp->link_up != link_up) {
1362                 bnx2_report_link(bp);
1363         }
1364
1365         bnx2_set_mac_link(bp);
1366
1367         return 0;
1368 }
1369
1370 static int
1371 bnx2_reset_phy(struct bnx2 *bp)
1372 {
1373         int i;
1374         u32 reg;
1375
1376         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1377
1378 #define PHY_RESET_MAX_WAIT 100
1379         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1380                 udelay(10);
1381
1382                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1383                 if (!(reg & BMCR_RESET)) {
1384                         udelay(20);
1385                         break;
1386                 }
1387         }
1388         if (i == PHY_RESET_MAX_WAIT) {
1389                 return -EBUSY;
1390         }
1391         return 0;
1392 }
1393
1394 static u32
1395 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1396 {
1397         u32 adv = 0;
1398
1399         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1400                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1401
1402                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1403                         adv = ADVERTISE_1000XPAUSE;
1404                 }
1405                 else {
1406                         adv = ADVERTISE_PAUSE_CAP;
1407                 }
1408         }
1409         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1410                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1411                         adv = ADVERTISE_1000XPSE_ASYM;
1412                 }
1413                 else {
1414                         adv = ADVERTISE_PAUSE_ASYM;
1415                 }
1416         }
1417         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1418                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1419                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420                 }
1421                 else {
1422                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1423                 }
1424         }
1425         return adv;
1426 }
1427
1428 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1429
1430 static int
1431 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1432 {
1433         u32 speed_arg = 0, pause_adv;
1434
1435         pause_adv = bnx2_phy_get_pause_adv(bp);
1436
1437         if (bp->autoneg & AUTONEG_SPEED) {
1438                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1439                 if (bp->advertising & ADVERTISED_10baseT_Half)
1440                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1441                 if (bp->advertising & ADVERTISED_10baseT_Full)
1442                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1443                 if (bp->advertising & ADVERTISED_100baseT_Half)
1444                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1445                 if (bp->advertising & ADVERTISED_100baseT_Full)
1446                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1447                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1448                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1449                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1450                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1451         } else {
1452                 if (bp->req_line_speed == SPEED_2500)
1453                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1454                 else if (bp->req_line_speed == SPEED_1000)
1455                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1456                 else if (bp->req_line_speed == SPEED_100) {
1457                         if (bp->req_duplex == DUPLEX_FULL)
1458                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1459                         else
1460                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1461                 } else if (bp->req_line_speed == SPEED_10) {
1462                         if (bp->req_duplex == DUPLEX_FULL)
1463                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1464                         else
1465                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1466                 }
1467         }
1468
1469         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1470                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1471         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1472                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1473
1474         if (port == PORT_TP)
1475                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1476                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1477
1478         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1479
1480         spin_unlock_bh(&bp->phy_lock);
1481         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1482         spin_lock_bh(&bp->phy_lock);
1483
1484         return 0;
1485 }
1486
1487 static int
1488 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1489 {
1490         u32 adv, bmcr;
1491         u32 new_adv = 0;
1492
1493         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1494                 return (bnx2_setup_remote_phy(bp, port));
1495
1496         if (!(bp->autoneg & AUTONEG_SPEED)) {
1497                 u32 new_bmcr;
1498                 int force_link_down = 0;
1499
1500                 if (bp->req_line_speed == SPEED_2500) {
1501                         if (!bnx2_test_and_enable_2g5(bp))
1502                                 force_link_down = 1;
1503                 } else if (bp->req_line_speed == SPEED_1000) {
1504                         if (bnx2_test_and_disable_2g5(bp))
1505                                 force_link_down = 1;
1506                 }
1507                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1508                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1509
1510                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1511                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1512                 new_bmcr |= BMCR_SPEED1000;
1513
1514                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1515                         if (bp->req_line_speed == SPEED_2500)
1516                                 bnx2_enable_forced_2g5(bp);
1517                         else if (bp->req_line_speed == SPEED_1000) {
1518                                 bnx2_disable_forced_2g5(bp);
1519                                 new_bmcr &= ~0x2000;
1520                         }
1521
1522                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1523                         if (bp->req_line_speed == SPEED_2500)
1524                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1525                         else
1526                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1527                 }
1528
1529                 if (bp->req_duplex == DUPLEX_FULL) {
1530                         adv |= ADVERTISE_1000XFULL;
1531                         new_bmcr |= BMCR_FULLDPLX;
1532                 }
1533                 else {
1534                         adv |= ADVERTISE_1000XHALF;
1535                         new_bmcr &= ~BMCR_FULLDPLX;
1536                 }
1537                 if ((new_bmcr != bmcr) || (force_link_down)) {
1538                         /* Force a link down visible on the other side */
1539                         if (bp->link_up) {
1540                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1541                                                ~(ADVERTISE_1000XFULL |
1542                                                  ADVERTISE_1000XHALF));
1543                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1544                                         BMCR_ANRESTART | BMCR_ANENABLE);
1545
1546                                 bp->link_up = 0;
1547                                 netif_carrier_off(bp->dev);
1548                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1549                                 bnx2_report_link(bp);
1550                         }
1551                         bnx2_write_phy(bp, bp->mii_adv, adv);
1552                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1553                 } else {
1554                         bnx2_resolve_flow_ctrl(bp);
1555                         bnx2_set_mac_link(bp);
1556                 }
1557                 return 0;
1558         }
1559
1560         bnx2_test_and_enable_2g5(bp);
1561
1562         if (bp->advertising & ADVERTISED_1000baseT_Full)
1563                 new_adv |= ADVERTISE_1000XFULL;
1564
1565         new_adv |= bnx2_phy_get_pause_adv(bp);
1566
1567         bnx2_read_phy(bp, bp->mii_adv, &adv);
1568         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1569
1570         bp->serdes_an_pending = 0;
1571         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1572                 /* Force a link down visible on the other side */
1573                 if (bp->link_up) {
1574                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1575                         spin_unlock_bh(&bp->phy_lock);
1576                         msleep(20);
1577                         spin_lock_bh(&bp->phy_lock);
1578                 }
1579
1580                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1581                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1582                         BMCR_ANENABLE);
1583                 /* Speed up link-up time when the link partner
1584                  * does not autonegotiate which is very common
1585                  * in blade servers. Some blade servers use
1586                  * IPMI for kerboard input and it's important
1587                  * to minimize link disruptions. Autoneg. involves
1588                  * exchanging base pages plus 3 next pages and
1589                  * normally completes in about 120 msec.
1590                  */
1591                 bp->current_interval = SERDES_AN_TIMEOUT;
1592                 bp->serdes_an_pending = 1;
1593                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1594         } else {
1595                 bnx2_resolve_flow_ctrl(bp);
1596                 bnx2_set_mac_link(bp);
1597         }
1598
1599         return 0;
1600 }
1601
1602 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1603         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1604                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1605                 (ADVERTISED_1000baseT_Full)
1606
1607 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1608         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1609         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1610         ADVERTISED_1000baseT_Full)
1611
1612 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1613         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1614
1615 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1616
1617 static void
1618 bnx2_set_default_remote_link(struct bnx2 *bp)
1619 {
1620         u32 link;
1621
1622         if (bp->phy_port == PORT_TP)
1623                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1624         else
1625                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1626
1627         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1628                 bp->req_line_speed = 0;
1629                 bp->autoneg |= AUTONEG_SPEED;
1630                 bp->advertising = ADVERTISED_Autoneg;
1631                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1632                         bp->advertising |= ADVERTISED_10baseT_Half;
1633                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1634                         bp->advertising |= ADVERTISED_10baseT_Full;
1635                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1636                         bp->advertising |= ADVERTISED_100baseT_Half;
1637                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1638                         bp->advertising |= ADVERTISED_100baseT_Full;
1639                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1640                         bp->advertising |= ADVERTISED_1000baseT_Full;
1641                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1642                         bp->advertising |= ADVERTISED_2500baseX_Full;
1643         } else {
1644                 bp->autoneg = 0;
1645                 bp->advertising = 0;
1646                 bp->req_duplex = DUPLEX_FULL;
1647                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1648                         bp->req_line_speed = SPEED_10;
1649                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1650                                 bp->req_duplex = DUPLEX_HALF;
1651                 }
1652                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1653                         bp->req_line_speed = SPEED_100;
1654                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1655                                 bp->req_duplex = DUPLEX_HALF;
1656                 }
1657                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1658                         bp->req_line_speed = SPEED_1000;
1659                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1660                         bp->req_line_speed = SPEED_2500;
1661         }
1662 }
1663
1664 static void
1665 bnx2_set_default_link(struct bnx2 *bp)
1666 {
1667         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1668                 bnx2_set_default_remote_link(bp);
1669                 return;
1670         }
1671
1672         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1673         bp->req_line_speed = 0;
1674         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675                 u32 reg;
1676
1677                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1678
1679                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1680                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1681                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1682                         bp->autoneg = 0;
1683                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1684                         bp->req_duplex = DUPLEX_FULL;
1685                 }
1686         } else
1687                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1688 }
1689
1690 static void
1691 bnx2_send_heart_beat(struct bnx2 *bp)
1692 {
1693         u32 msg;
1694         u32 addr;
1695
1696         spin_lock(&bp->indirect_lock);
1697         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1698         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1699         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1700         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1701         spin_unlock(&bp->indirect_lock);
1702 }
1703
1704 static void
1705 bnx2_remote_phy_event(struct bnx2 *bp)
1706 {
1707         u32 msg;
1708         u8 link_up = bp->link_up;
1709         u8 old_port;
1710
1711         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1712
1713         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1714                 bnx2_send_heart_beat(bp);
1715
1716         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1717
1718         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1719                 bp->link_up = 0;
1720         else {
1721                 u32 speed;
1722
1723                 bp->link_up = 1;
1724                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1725                 bp->duplex = DUPLEX_FULL;
1726                 switch (speed) {
1727                         case BNX2_LINK_STATUS_10HALF:
1728                                 bp->duplex = DUPLEX_HALF;
1729                         case BNX2_LINK_STATUS_10FULL:
1730                                 bp->line_speed = SPEED_10;
1731                                 break;
1732                         case BNX2_LINK_STATUS_100HALF:
1733                                 bp->duplex = DUPLEX_HALF;
1734                         case BNX2_LINK_STATUS_100BASE_T4:
1735                         case BNX2_LINK_STATUS_100FULL:
1736                                 bp->line_speed = SPEED_100;
1737                                 break;
1738                         case BNX2_LINK_STATUS_1000HALF:
1739                                 bp->duplex = DUPLEX_HALF;
1740                         case BNX2_LINK_STATUS_1000FULL:
1741                                 bp->line_speed = SPEED_1000;
1742                                 break;
1743                         case BNX2_LINK_STATUS_2500HALF:
1744                                 bp->duplex = DUPLEX_HALF;
1745                         case BNX2_LINK_STATUS_2500FULL:
1746                                 bp->line_speed = SPEED_2500;
1747                                 break;
1748                         default:
1749                                 bp->line_speed = 0;
1750                                 break;
1751                 }
1752
1753                 bp->flow_ctrl = 0;
1754                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1755                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1756                         if (bp->duplex == DUPLEX_FULL)
1757                                 bp->flow_ctrl = bp->req_flow_ctrl;
1758                 } else {
1759                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1760                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1761                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1762                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1763                 }
1764
1765                 old_port = bp->phy_port;
1766                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1767                         bp->phy_port = PORT_FIBRE;
1768                 else
1769                         bp->phy_port = PORT_TP;
1770
1771                 if (old_port != bp->phy_port)
1772                         bnx2_set_default_link(bp);
1773
1774         }
1775         if (bp->link_up != link_up)
1776                 bnx2_report_link(bp);
1777
1778         bnx2_set_mac_link(bp);
1779 }
1780
1781 static int
1782 bnx2_set_remote_link(struct bnx2 *bp)
1783 {
1784         u32 evt_code;
1785
1786         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1787         switch (evt_code) {
1788                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1789                         bnx2_remote_phy_event(bp);
1790                         break;
1791                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1792                 default:
1793                         bnx2_send_heart_beat(bp);
1794                         break;
1795         }
1796         return 0;
1797 }
1798
1799 static int
1800 bnx2_setup_copper_phy(struct bnx2 *bp)
1801 {
1802         u32 bmcr;
1803         u32 new_bmcr;
1804
1805         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1806
1807         if (bp->autoneg & AUTONEG_SPEED) {
1808                 u32 adv_reg, adv1000_reg;
1809                 u32 new_adv_reg = 0;
1810                 u32 new_adv1000_reg = 0;
1811
1812                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1813                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1814                         ADVERTISE_PAUSE_ASYM);
1815
1816                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1817                 adv1000_reg &= PHY_ALL_1000_SPEED;
1818
1819                 if (bp->advertising & ADVERTISED_10baseT_Half)
1820                         new_adv_reg |= ADVERTISE_10HALF;
1821                 if (bp->advertising & ADVERTISED_10baseT_Full)
1822                         new_adv_reg |= ADVERTISE_10FULL;
1823                 if (bp->advertising & ADVERTISED_100baseT_Half)
1824                         new_adv_reg |= ADVERTISE_100HALF;
1825                 if (bp->advertising & ADVERTISED_100baseT_Full)
1826                         new_adv_reg |= ADVERTISE_100FULL;
1827                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1828                         new_adv1000_reg |= ADVERTISE_1000FULL;
1829
1830                 new_adv_reg |= ADVERTISE_CSMA;
1831
1832                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1833
1834                 if ((adv1000_reg != new_adv1000_reg) ||
1835                         (adv_reg != new_adv_reg) ||
1836                         ((bmcr & BMCR_ANENABLE) == 0)) {
1837
1838                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1839                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1840                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1841                                 BMCR_ANENABLE);
1842                 }
1843                 else if (bp->link_up) {
1844                         /* Flow ctrl may have changed from auto to forced */
1845                         /* or vice-versa. */
1846
1847                         bnx2_resolve_flow_ctrl(bp);
1848                         bnx2_set_mac_link(bp);
1849                 }
1850                 return 0;
1851         }
1852
1853         new_bmcr = 0;
1854         if (bp->req_line_speed == SPEED_100) {
1855                 new_bmcr |= BMCR_SPEED100;
1856         }
1857         if (bp->req_duplex == DUPLEX_FULL) {
1858                 new_bmcr |= BMCR_FULLDPLX;
1859         }
1860         if (new_bmcr != bmcr) {
1861                 u32 bmsr;
1862
1863                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1864                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1865
1866                 if (bmsr & BMSR_LSTATUS) {
1867                         /* Force link down */
1868                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1869                         spin_unlock_bh(&bp->phy_lock);
1870                         msleep(50);
1871                         spin_lock_bh(&bp->phy_lock);
1872
1873                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1874                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1875                 }
1876
1877                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1878
1879                 /* Normally, the new speed is setup after the link has
1880                  * gone down and up again. In some cases, link will not go
1881                  * down so we need to set up the new speed here.
1882                  */
1883                 if (bmsr & BMSR_LSTATUS) {
1884                         bp->line_speed = bp->req_line_speed;
1885                         bp->duplex = bp->req_duplex;
1886                         bnx2_resolve_flow_ctrl(bp);
1887                         bnx2_set_mac_link(bp);
1888                 }
1889         } else {
1890                 bnx2_resolve_flow_ctrl(bp);
1891                 bnx2_set_mac_link(bp);
1892         }
1893         return 0;
1894 }
1895
1896 static int
1897 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1898 {
1899         if (bp->loopback == MAC_LOOPBACK)
1900                 return 0;
1901
1902         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1903                 return (bnx2_setup_serdes_phy(bp, port));
1904         }
1905         else {
1906                 return (bnx2_setup_copper_phy(bp));
1907         }
1908 }
1909
1910 static int
1911 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1912 {
1913         u32 val;
1914
1915         bp->mii_bmcr = MII_BMCR + 0x10;
1916         bp->mii_bmsr = MII_BMSR + 0x10;
1917         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1918         bp->mii_adv = MII_ADVERTISE + 0x10;
1919         bp->mii_lpa = MII_LPA + 0x10;
1920         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1921
1922         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1923         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1924
1925         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1926         if (reset_phy)
1927                 bnx2_reset_phy(bp);
1928
1929         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1930
1931         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1932         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1933         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1934         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1935
1936         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1937         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1938         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1939                 val |= BCM5708S_UP1_2G5;
1940         else
1941                 val &= ~BCM5708S_UP1_2G5;
1942         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1943
1944         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1945         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1946         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1947         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1948
1949         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1950
1951         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1952               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1953         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1954
1955         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1956
1957         return 0;
1958 }
1959
1960 static int
1961 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1962 {
1963         u32 val;
1964
1965         if (reset_phy)
1966                 bnx2_reset_phy(bp);
1967
1968         bp->mii_up1 = BCM5708S_UP1;
1969
1970         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1971         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1972         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1973
1974         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1975         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1976         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1977
1978         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1979         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1980         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1983                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1984                 val |= BCM5708S_UP1_2G5;
1985                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1986         }
1987
1988         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1989             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1990             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1991                 /* increase tx signal amplitude */
1992                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1993                                BCM5708S_BLK_ADDR_TX_MISC);
1994                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1995                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1996                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1997                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1998         }
1999
2000         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2001               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2002
2003         if (val) {
2004                 u32 is_backplane;
2005
2006                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2007                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2008                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2009                                        BCM5708S_BLK_ADDR_TX_MISC);
2010                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2011                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2012                                        BCM5708S_BLK_ADDR_DIG);
2013                 }
2014         }
2015         return 0;
2016 }
2017
2018 static int
2019 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2020 {
2021         if (reset_phy)
2022                 bnx2_reset_phy(bp);
2023
2024         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2025
2026         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2027                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2028
2029         if (bp->dev->mtu > 1500) {
2030                 u32 val;
2031
2032                 /* Set extended packet length bit */
2033                 bnx2_write_phy(bp, 0x18, 0x7);
2034                 bnx2_read_phy(bp, 0x18, &val);
2035                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2036
2037                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2038                 bnx2_read_phy(bp, 0x1c, &val);
2039                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2040         }
2041         else {
2042                 u32 val;
2043
2044                 bnx2_write_phy(bp, 0x18, 0x7);
2045                 bnx2_read_phy(bp, 0x18, &val);
2046                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2047
2048                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2049                 bnx2_read_phy(bp, 0x1c, &val);
2050                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2051         }
2052
2053         return 0;
2054 }
2055
2056 static int
2057 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2058 {
2059         u32 val;
2060
2061         if (reset_phy)
2062                 bnx2_reset_phy(bp);
2063
2064         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2065                 bnx2_write_phy(bp, 0x18, 0x0c00);
2066                 bnx2_write_phy(bp, 0x17, 0x000a);
2067                 bnx2_write_phy(bp, 0x15, 0x310b);
2068                 bnx2_write_phy(bp, 0x17, 0x201f);
2069                 bnx2_write_phy(bp, 0x15, 0x9506);
2070                 bnx2_write_phy(bp, 0x17, 0x401f);
2071                 bnx2_write_phy(bp, 0x15, 0x14e2);
2072                 bnx2_write_phy(bp, 0x18, 0x0400);
2073         }
2074
2075         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2076                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2077                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2078                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2079                 val &= ~(1 << 8);
2080                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2081         }
2082
2083         if (bp->dev->mtu > 1500) {
2084                 /* Set extended packet length bit */
2085                 bnx2_write_phy(bp, 0x18, 0x7);
2086                 bnx2_read_phy(bp, 0x18, &val);
2087                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2088
2089                 bnx2_read_phy(bp, 0x10, &val);
2090                 bnx2_write_phy(bp, 0x10, val | 0x1);
2091         }
2092         else {
2093                 bnx2_write_phy(bp, 0x18, 0x7);
2094                 bnx2_read_phy(bp, 0x18, &val);
2095                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2096
2097                 bnx2_read_phy(bp, 0x10, &val);
2098                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2099         }
2100
2101         /* ethernet@wirespeed */
2102         bnx2_write_phy(bp, 0x18, 0x7007);
2103         bnx2_read_phy(bp, 0x18, &val);
2104         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2105         return 0;
2106 }
2107
2108
2109 static int
2110 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2111 {
2112         u32 val;
2113         int rc = 0;
2114
2115         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2116         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2117
2118         bp->mii_bmcr = MII_BMCR;
2119         bp->mii_bmsr = MII_BMSR;
2120         bp->mii_bmsr1 = MII_BMSR;
2121         bp->mii_adv = MII_ADVERTISE;
2122         bp->mii_lpa = MII_LPA;
2123
2124         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2125
2126         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2127                 goto setup_phy;
2128
2129         bnx2_read_phy(bp, MII_PHYSID1, &val);
2130         bp->phy_id = val << 16;
2131         bnx2_read_phy(bp, MII_PHYSID2, &val);
2132         bp->phy_id |= val & 0xffff;
2133
2134         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2135                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2136                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2137                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2138                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2139                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2140                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2141         }
2142         else {
2143                 rc = bnx2_init_copper_phy(bp, reset_phy);
2144         }
2145
2146 setup_phy:
2147         if (!rc)
2148                 rc = bnx2_setup_phy(bp, bp->phy_port);
2149
2150         return rc;
2151 }
2152
2153 static int
2154 bnx2_set_mac_loopback(struct bnx2 *bp)
2155 {
2156         u32 mac_mode;
2157
2158         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2159         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2160         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2161         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2162         bp->link_up = 1;
2163         return 0;
2164 }
2165
2166 static int bnx2_test_link(struct bnx2 *);
2167
2168 static int
2169 bnx2_set_phy_loopback(struct bnx2 *bp)
2170 {
2171         u32 mac_mode;
2172         int rc, i;
2173
2174         spin_lock_bh(&bp->phy_lock);
2175         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2176                             BMCR_SPEED1000);
2177         spin_unlock_bh(&bp->phy_lock);
2178         if (rc)
2179                 return rc;
2180
2181         for (i = 0; i < 10; i++) {
2182                 if (bnx2_test_link(bp) == 0)
2183                         break;
2184                 msleep(100);
2185         }
2186
2187         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2188         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2189                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2190                       BNX2_EMAC_MODE_25G_MODE);
2191
2192         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2193         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2194         bp->link_up = 1;
2195         return 0;
2196 }
2197
2198 static int
2199 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2200 {
2201         int i;
2202         u32 val;
2203
2204         bp->fw_wr_seq++;
2205         msg_data |= bp->fw_wr_seq;
2206
2207         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2208
2209         /* wait for an acknowledgement. */
2210         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2211                 msleep(10);
2212
2213                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2214
2215                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2216                         break;
2217         }
2218         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2219                 return 0;
2220
2221         /* If we timed out, inform the firmware that this is the case. */
2222         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2223                 if (!silent)
2224                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2225                                             "%x\n", msg_data);
2226
2227                 msg_data &= ~BNX2_DRV_MSG_CODE;
2228                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2229
2230                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2231
2232                 return -EBUSY;
2233         }
2234
2235         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2236                 return -EIO;
2237
2238         return 0;
2239 }
2240
2241 static int
2242 bnx2_init_5709_context(struct bnx2 *bp)
2243 {
2244         int i, ret = 0;
2245         u32 val;
2246
2247         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2248         val |= (BCM_PAGE_BITS - 8) << 16;
2249         REG_WR(bp, BNX2_CTX_COMMAND, val);
2250         for (i = 0; i < 10; i++) {
2251                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2252                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2253                         break;
2254                 udelay(2);
2255         }
2256         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2257                 return -EBUSY;
2258
2259         for (i = 0; i < bp->ctx_pages; i++) {
2260                 int j;
2261
2262                 if (bp->ctx_blk[i])
2263                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2264                 else
2265                         return -ENOMEM;
2266
2267                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2268                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2269                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2270                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2271                        (u64) bp->ctx_blk_mapping[i] >> 32);
2272                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2273                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2274                 for (j = 0; j < 10; j++) {
2275
2276                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2277                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2278                                 break;
2279                         udelay(5);
2280                 }
2281                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2282                         ret = -EBUSY;
2283                         break;
2284                 }
2285         }
2286         return ret;
2287 }
2288
2289 static void
2290 bnx2_init_context(struct bnx2 *bp)
2291 {
2292         u32 vcid;
2293
2294         vcid = 96;
2295         while (vcid) {
2296                 u32 vcid_addr, pcid_addr, offset;
2297                 int i;
2298
2299                 vcid--;
2300
2301                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2302                         u32 new_vcid;
2303
2304                         vcid_addr = GET_PCID_ADDR(vcid);
2305                         if (vcid & 0x8) {
2306                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2307                         }
2308                         else {
2309                                 new_vcid = vcid;
2310                         }
2311                         pcid_addr = GET_PCID_ADDR(new_vcid);
2312                 }
2313                 else {
2314                         vcid_addr = GET_CID_ADDR(vcid);
2315                         pcid_addr = vcid_addr;
2316                 }
2317
2318                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2319                         vcid_addr += (i << PHY_CTX_SHIFT);
2320                         pcid_addr += (i << PHY_CTX_SHIFT);
2321
2322                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2323                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2324
2325                         /* Zero out the context. */
2326                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2327                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2328                 }
2329         }
2330 }
2331
2332 static int
2333 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2334 {
2335         u16 *good_mbuf;
2336         u32 good_mbuf_cnt;
2337         u32 val;
2338
2339         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2340         if (good_mbuf == NULL) {
2341                 printk(KERN_ERR PFX "Failed to allocate memory in "
2342                                     "bnx2_alloc_bad_rbuf\n");
2343                 return -ENOMEM;
2344         }
2345
2346         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2347                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2348
2349         good_mbuf_cnt = 0;
2350
2351         /* Allocate a bunch of mbufs and save the good ones in an array. */
2352         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2353         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2354                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2355                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2356
2357                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2358
2359                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2360
2361                 /* The addresses with Bit 9 set are bad memory blocks. */
2362                 if (!(val & (1 << 9))) {
2363                         good_mbuf[good_mbuf_cnt] = (u16) val;
2364                         good_mbuf_cnt++;
2365                 }
2366
2367                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2368         }
2369
2370         /* Free the good ones back to the mbuf pool thus discarding
2371          * all the bad ones. */
2372         while (good_mbuf_cnt) {
2373                 good_mbuf_cnt--;
2374
2375                 val = good_mbuf[good_mbuf_cnt];
2376                 val = (val << 9) | val | 1;
2377
2378                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2379         }
2380         kfree(good_mbuf);
2381         return 0;
2382 }
2383
2384 static void
2385 bnx2_set_mac_addr(struct bnx2 *bp)
2386 {
2387         u32 val;
2388         u8 *mac_addr = bp->dev->dev_addr;
2389
2390         val = (mac_addr[0] << 8) | mac_addr[1];
2391
2392         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2393
2394         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2395                 (mac_addr[4] << 8) | mac_addr[5];
2396
2397         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2398 }
2399
2400 static inline int
2401 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2402 {
2403         dma_addr_t mapping;
2404         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2405         struct rx_bd *rxbd =
2406                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2407         struct page *page = alloc_page(GFP_ATOMIC);
2408
2409         if (!page)
2410                 return -ENOMEM;
2411         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2412                                PCI_DMA_FROMDEVICE);
2413         rx_pg->page = page;
2414         pci_unmap_addr_set(rx_pg, mapping, mapping);
2415         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2416         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2417         return 0;
2418 }
2419
2420 static void
2421 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2422 {
2423         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2424         struct page *page = rx_pg->page;
2425
2426         if (!page)
2427                 return;
2428
2429         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2430                        PCI_DMA_FROMDEVICE);
2431
2432         __free_page(page);
2433         rx_pg->page = NULL;
2434 }
2435
2436 static inline int
2437 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2438 {
2439         struct sk_buff *skb;
2440         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2441         dma_addr_t mapping;
2442         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2443         unsigned long align;
2444
2445         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2446         if (skb == NULL) {
2447                 return -ENOMEM;
2448         }
2449
2450         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2451                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2452
2453         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2454                 PCI_DMA_FROMDEVICE);
2455
2456         rx_buf->skb = skb;
2457         pci_unmap_addr_set(rx_buf, mapping, mapping);
2458
2459         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2460         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2461
2462         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2463
2464         return 0;
2465 }
2466
2467 static int
2468 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2469 {
2470         struct status_block *sblk = bnapi->status_blk;
2471         u32 new_link_state, old_link_state;
2472         int is_set = 1;
2473
2474         new_link_state = sblk->status_attn_bits & event;
2475         old_link_state = sblk->status_attn_bits_ack & event;
2476         if (new_link_state != old_link_state) {
2477                 if (new_link_state)
2478                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2479                 else
2480                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2481         } else
2482                 is_set = 0;
2483
2484         return is_set;
2485 }
2486
2487 static void
2488 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2489 {
2490         spin_lock(&bp->phy_lock);
2491
2492         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2493                 bnx2_set_link(bp);
2494         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2495                 bnx2_set_remote_link(bp);
2496
2497         spin_unlock(&bp->phy_lock);
2498
2499 }
2500
2501 static inline u16
2502 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2503 {
2504         u16 cons;
2505
2506         if (bnapi->int_num == 0)
2507                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2508         else
2509                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2510
2511         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2512                 cons++;
2513         return cons;
2514 }
2515
2516 static int
2517 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2518 {
2519         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2520         u16 hw_cons, sw_cons, sw_ring_cons;
2521         int tx_pkt = 0;
2522
2523         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2524         sw_cons = txr->tx_cons;
2525
2526         while (sw_cons != hw_cons) {
2527                 struct sw_bd *tx_buf;
2528                 struct sk_buff *skb;
2529                 int i, last;
2530
2531                 sw_ring_cons = TX_RING_IDX(sw_cons);
2532
2533                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2534                 skb = tx_buf->skb;
2535
2536                 /* partial BD completions possible with TSO packets */
2537                 if (skb_is_gso(skb)) {
2538                         u16 last_idx, last_ring_idx;
2539
2540                         last_idx = sw_cons +
2541                                 skb_shinfo(skb)->nr_frags + 1;
2542                         last_ring_idx = sw_ring_cons +
2543                                 skb_shinfo(skb)->nr_frags + 1;
2544                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2545                                 last_idx++;
2546                         }
2547                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2548                                 break;
2549                         }
2550                 }
2551
2552                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2553                         skb_headlen(skb), PCI_DMA_TODEVICE);
2554
2555                 tx_buf->skb = NULL;
2556                 last = skb_shinfo(skb)->nr_frags;
2557
2558                 for (i = 0; i < last; i++) {
2559                         sw_cons = NEXT_TX_BD(sw_cons);
2560
2561                         pci_unmap_page(bp->pdev,
2562                                 pci_unmap_addr(
2563                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2564                                         mapping),
2565                                 skb_shinfo(skb)->frags[i].size,
2566                                 PCI_DMA_TODEVICE);
2567                 }
2568
2569                 sw_cons = NEXT_TX_BD(sw_cons);
2570
2571                 dev_kfree_skb(skb);
2572                 tx_pkt++;
2573                 if (tx_pkt == budget)
2574                         break;
2575
2576                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2577         }
2578
2579         txr->hw_tx_cons = hw_cons;
2580         txr->tx_cons = sw_cons;
2581         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2582          * before checking for netif_queue_stopped().  Without the
2583          * memory barrier, there is a small possibility that bnx2_start_xmit()
2584          * will miss it and cause the queue to be stopped forever.
2585          */
2586         smp_mb();
2587
2588         if (unlikely(netif_queue_stopped(bp->dev)) &&
2589                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2590                 netif_tx_lock(bp->dev);
2591                 if ((netif_queue_stopped(bp->dev)) &&
2592                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2593                         netif_wake_queue(bp->dev);
2594                 netif_tx_unlock(bp->dev);
2595         }
2596         return tx_pkt;
2597 }
2598
2599 static void
2600 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2601                         struct sk_buff *skb, int count)
2602 {
2603         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2604         struct rx_bd *cons_bd, *prod_bd;
2605         dma_addr_t mapping;
2606         int i;
2607         u16 hw_prod = bnapi->rx_pg_prod, prod;
2608         u16 cons = bnapi->rx_pg_cons;
2609
2610         for (i = 0; i < count; i++) {
2611                 prod = RX_PG_RING_IDX(hw_prod);
2612
2613                 prod_rx_pg = &bp->rx_pg_ring[prod];
2614                 cons_rx_pg = &bp->rx_pg_ring[cons];
2615                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2616                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2617
2618                 if (i == 0 && skb) {
2619                         struct page *page;
2620                         struct skb_shared_info *shinfo;
2621
2622                         shinfo = skb_shinfo(skb);
2623                         shinfo->nr_frags--;
2624                         page = shinfo->frags[shinfo->nr_frags].page;
2625                         shinfo->frags[shinfo->nr_frags].page = NULL;
2626                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2627                                                PCI_DMA_FROMDEVICE);
2628                         cons_rx_pg->page = page;
2629                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2630                         dev_kfree_skb(skb);
2631                 }
2632                 if (prod != cons) {
2633                         prod_rx_pg->page = cons_rx_pg->page;
2634                         cons_rx_pg->page = NULL;
2635                         pci_unmap_addr_set(prod_rx_pg, mapping,
2636                                 pci_unmap_addr(cons_rx_pg, mapping));
2637
2638                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2639                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2640
2641                 }
2642                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2643                 hw_prod = NEXT_RX_BD(hw_prod);
2644         }
2645         bnapi->rx_pg_prod = hw_prod;
2646         bnapi->rx_pg_cons = cons;
2647 }
2648
2649 static inline void
2650 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2651         u16 cons, u16 prod)
2652 {
2653         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2654         struct rx_bd *cons_bd, *prod_bd;
2655
2656         cons_rx_buf = &bp->rx_buf_ring[cons];
2657         prod_rx_buf = &bp->rx_buf_ring[prod];
2658
2659         pci_dma_sync_single_for_device(bp->pdev,
2660                 pci_unmap_addr(cons_rx_buf, mapping),
2661                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2662
2663         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2664
2665         prod_rx_buf->skb = skb;
2666
2667         if (cons == prod)
2668                 return;
2669
2670         pci_unmap_addr_set(prod_rx_buf, mapping,
2671                         pci_unmap_addr(cons_rx_buf, mapping));
2672
2673         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2674         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2675         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2676         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2677 }
2678
2679 static int
2680 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2681             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2682             u32 ring_idx)
2683 {
2684         int err;
2685         u16 prod = ring_idx & 0xffff;
2686
2687         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2688         if (unlikely(err)) {
2689                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2690                 if (hdr_len) {
2691                         unsigned int raw_len = len + 4;
2692                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2693
2694                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2695                 }
2696                 return err;
2697         }
2698
2699         skb_reserve(skb, BNX2_RX_OFFSET);
2700         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2701                          PCI_DMA_FROMDEVICE);
2702
2703         if (hdr_len == 0) {
2704                 skb_put(skb, len);
2705                 return 0;
2706         } else {
2707                 unsigned int i, frag_len, frag_size, pages;
2708                 struct sw_pg *rx_pg;
2709                 u16 pg_cons = bnapi->rx_pg_cons;
2710                 u16 pg_prod = bnapi->rx_pg_prod;
2711
2712                 frag_size = len + 4 - hdr_len;
2713                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2714                 skb_put(skb, hdr_len);
2715
2716                 for (i = 0; i < pages; i++) {
2717                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2718                         if (unlikely(frag_len <= 4)) {
2719                                 unsigned int tail = 4 - frag_len;
2720
2721                                 bnapi->rx_pg_cons = pg_cons;
2722                                 bnapi->rx_pg_prod = pg_prod;
2723                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2724                                                         pages - i);
2725                                 skb->len -= tail;
2726                                 if (i == 0) {
2727                                         skb->tail -= tail;
2728                                 } else {
2729                                         skb_frag_t *frag =
2730                                                 &skb_shinfo(skb)->frags[i - 1];
2731                                         frag->size -= tail;
2732                                         skb->data_len -= tail;
2733                                         skb->truesize -= tail;
2734                                 }
2735                                 return 0;
2736                         }
2737                         rx_pg = &bp->rx_pg_ring[pg_cons];
2738
2739                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2740                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2741
2742                         if (i == pages - 1)
2743                                 frag_len -= 4;
2744
2745                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2746                         rx_pg->page = NULL;
2747
2748                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2749                         if (unlikely(err)) {
2750                                 bnapi->rx_pg_cons = pg_cons;
2751                                 bnapi->rx_pg_prod = pg_prod;
2752                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2753                                                         pages - i);
2754                                 return err;
2755                         }
2756
2757                         frag_size -= frag_len;
2758                         skb->data_len += frag_len;
2759                         skb->truesize += frag_len;
2760                         skb->len += frag_len;
2761
2762                         pg_prod = NEXT_RX_BD(pg_prod);
2763                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2764                 }
2765                 bnapi->rx_pg_prod = pg_prod;
2766                 bnapi->rx_pg_cons = pg_cons;
2767         }
2768         return 0;
2769 }
2770
2771 static inline u16
2772 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2773 {
2774         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2775
2776         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2777                 cons++;
2778         return cons;
2779 }
2780
2781 static int
2782 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2783 {
2784         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2785         struct l2_fhdr *rx_hdr;
2786         int rx_pkt = 0, pg_ring_used = 0;
2787
2788         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2789         sw_cons = bnapi->rx_cons;
2790         sw_prod = bnapi->rx_prod;
2791
2792         /* Memory barrier necessary as speculative reads of the rx
2793          * buffer can be ahead of the index in the status block
2794          */
2795         rmb();
2796         while (sw_cons != hw_cons) {
2797                 unsigned int len, hdr_len;
2798                 u32 status;
2799                 struct sw_bd *rx_buf;
2800                 struct sk_buff *skb;
2801                 dma_addr_t dma_addr;
2802
2803                 sw_ring_cons = RX_RING_IDX(sw_cons);
2804                 sw_ring_prod = RX_RING_IDX(sw_prod);
2805
2806                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2807                 skb = rx_buf->skb;
2808
2809                 rx_buf->skb = NULL;
2810
2811                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2812
2813                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2814                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2815                         PCI_DMA_FROMDEVICE);
2816
2817                 rx_hdr = (struct l2_fhdr *) skb->data;
2818                 len = rx_hdr->l2_fhdr_pkt_len;
2819
2820                 if ((status = rx_hdr->l2_fhdr_status) &
2821                         (L2_FHDR_ERRORS_BAD_CRC |
2822                         L2_FHDR_ERRORS_PHY_DECODE |
2823                         L2_FHDR_ERRORS_ALIGNMENT |
2824                         L2_FHDR_ERRORS_TOO_SHORT |
2825                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2826
2827                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2828                                           sw_ring_prod);
2829                         goto next_rx;
2830                 }
2831                 hdr_len = 0;
2832                 if (status & L2_FHDR_STATUS_SPLIT) {
2833                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2834                         pg_ring_used = 1;
2835                 } else if (len > bp->rx_jumbo_thresh) {
2836                         hdr_len = bp->rx_jumbo_thresh;
2837                         pg_ring_used = 1;
2838                 }
2839
2840                 len -= 4;
2841
2842                 if (len <= bp->rx_copy_thresh) {
2843                         struct sk_buff *new_skb;
2844
2845                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2846                         if (new_skb == NULL) {
2847                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2848                                                   sw_ring_prod);
2849                                 goto next_rx;
2850                         }
2851
2852                         /* aligned copy */
2853                         skb_copy_from_linear_data_offset(skb,
2854                                                          BNX2_RX_OFFSET - 2,
2855                                       new_skb->data, len + 2);
2856                         skb_reserve(new_skb, 2);
2857                         skb_put(new_skb, len);
2858
2859                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2860                                 sw_ring_cons, sw_ring_prod);
2861
2862                         skb = new_skb;
2863                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2864                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2865                         goto next_rx;
2866
2867                 skb->protocol = eth_type_trans(skb, bp->dev);
2868
2869                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2870                         (ntohs(skb->protocol) != 0x8100)) {
2871
2872                         dev_kfree_skb(skb);
2873                         goto next_rx;
2874
2875                 }
2876
2877                 skb->ip_summed = CHECKSUM_NONE;
2878                 if (bp->rx_csum &&
2879                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2880                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2881
2882                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2883                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2884                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2885                 }
2886
2887 #ifdef BCM_VLAN
2888                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2889                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2890                                 rx_hdr->l2_fhdr_vlan_tag);
2891                 }
2892                 else
2893 #endif
2894                         netif_receive_skb(skb);
2895
2896                 bp->dev->last_rx = jiffies;
2897                 rx_pkt++;
2898
2899 next_rx:
2900                 sw_cons = NEXT_RX_BD(sw_cons);
2901                 sw_prod = NEXT_RX_BD(sw_prod);
2902
2903                 if ((rx_pkt == budget))
2904                         break;
2905
2906                 /* Refresh hw_cons to see if there is new work */
2907                 if (sw_cons == hw_cons) {
2908                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2909                         rmb();
2910                 }
2911         }
2912         bnapi->rx_cons = sw_cons;
2913         bnapi->rx_prod = sw_prod;
2914
2915         if (pg_ring_used)
2916                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2917                          bnapi->rx_pg_prod);
2918
2919         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2920
2921         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2922
2923         mmiowb();
2924
2925         return rx_pkt;
2926
2927 }
2928
2929 /* MSI ISR - The only difference between this and the INTx ISR
2930  * is that the MSI interrupt is always serviced.
2931  */
2932 static irqreturn_t
2933 bnx2_msi(int irq, void *dev_instance)
2934 {
2935         struct net_device *dev = dev_instance;
2936         struct bnx2 *bp = netdev_priv(dev);
2937         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2938
2939         prefetch(bnapi->status_blk);
2940         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2941                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2942                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2943
2944         /* Return here if interrupt is disabled. */
2945         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2946                 return IRQ_HANDLED;
2947
2948         netif_rx_schedule(dev, &bnapi->napi);
2949
2950         return IRQ_HANDLED;
2951 }
2952
2953 static irqreturn_t
2954 bnx2_msi_1shot(int irq, void *dev_instance)
2955 {
2956         struct net_device *dev = dev_instance;
2957         struct bnx2 *bp = netdev_priv(dev);
2958         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2959
2960         prefetch(bnapi->status_blk);
2961
2962         /* Return here if interrupt is disabled. */
2963         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2964                 return IRQ_HANDLED;
2965
2966         netif_rx_schedule(dev, &bnapi->napi);
2967
2968         return IRQ_HANDLED;
2969 }
2970
2971 static irqreturn_t
2972 bnx2_interrupt(int irq, void *dev_instance)
2973 {
2974         struct net_device *dev = dev_instance;
2975         struct bnx2 *bp = netdev_priv(dev);
2976         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2977         struct status_block *sblk = bnapi->status_blk;
2978
2979         /* When using INTx, it is possible for the interrupt to arrive
2980          * at the CPU before the status block posted prior to the
2981          * interrupt. Reading a register will flush the status block.
2982          * When using MSI, the MSI message will always complete after
2983          * the status block write.
2984          */
2985         if ((sblk->status_idx == bnapi->last_status_idx) &&
2986             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2987              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2988                 return IRQ_NONE;
2989
2990         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2991                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2992                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2993
2994         /* Read back to deassert IRQ immediately to avoid too many
2995          * spurious interrupts.
2996          */
2997         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2998
2999         /* Return here if interrupt is shared and is disabled. */
3000         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3001                 return IRQ_HANDLED;
3002
3003         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3004                 bnapi->last_status_idx = sblk->status_idx;
3005                 __netif_rx_schedule(dev, &bnapi->napi);
3006         }
3007
3008         return IRQ_HANDLED;
3009 }
3010
3011 static irqreturn_t
3012 bnx2_tx_msix(int irq, void *dev_instance)
3013 {
3014         struct net_device *dev = dev_instance;
3015         struct bnx2 *bp = netdev_priv(dev);
3016         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
3017
3018         prefetch(bnapi->status_blk_msix);
3019
3020         /* Return here if interrupt is disabled. */
3021         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3022                 return IRQ_HANDLED;
3023
3024         netif_rx_schedule(dev, &bnapi->napi);
3025         return IRQ_HANDLED;
3026 }
3027
3028 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3029                                  STATUS_ATTN_BITS_TIMER_ABORT)
3030
3031 static inline int
3032 bnx2_has_work(struct bnx2_napi *bnapi)
3033 {
3034         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3035         struct status_block *sblk = bnapi->status_blk;
3036
3037         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3038             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3039                 return 1;
3040
3041         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3042             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3043                 return 1;
3044
3045         return 0;
3046 }
3047
3048 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3049 {
3050         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3051         struct bnx2 *bp = bnapi->bp;
3052         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3053         int work_done = 0;
3054         struct status_block_msix *sblk = bnapi->status_blk_msix;
3055
3056         do {
3057                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3058                 if (unlikely(work_done >= budget))
3059                         return work_done;
3060
3061                 bnapi->last_status_idx = sblk->status_idx;
3062                 rmb();
3063         } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
3064
3065         netif_rx_complete(bp->dev, napi);
3066         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3067                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3068                bnapi->last_status_idx);
3069         return work_done;
3070 }
3071
3072 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3073                           int work_done, int budget)
3074 {
3075         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3076         struct status_block *sblk = bnapi->status_blk;
3077         u32 status_attn_bits = sblk->status_attn_bits;
3078         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3079
3080         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3081             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3082
3083                 bnx2_phy_int(bp, bnapi);
3084
3085                 /* This is needed to take care of transient status
3086                  * during link changes.
3087                  */
3088                 REG_WR(bp, BNX2_HC_COMMAND,
3089                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3090                 REG_RD(bp, BNX2_HC_COMMAND);
3091         }
3092
3093         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3094                 bnx2_tx_int(bp, bnapi, 0);
3095
3096         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3097                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3098
3099         return work_done;
3100 }
3101
3102 static int bnx2_poll(struct napi_struct *napi, int budget)
3103 {
3104         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3105         struct bnx2 *bp = bnapi->bp;
3106         int work_done = 0;
3107         struct status_block *sblk = bnapi->status_blk;
3108
3109         while (1) {
3110                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3111
3112                 if (unlikely(work_done >= budget))
3113                         break;
3114
3115                 /* bnapi->last_status_idx is used below to tell the hw how
3116                  * much work has been processed, so we must read it before
3117                  * checking for more work.
3118                  */
3119                 bnapi->last_status_idx = sblk->status_idx;
3120                 rmb();
3121                 if (likely(!bnx2_has_work(bnapi))) {
3122                         netif_rx_complete(bp->dev, napi);
3123                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3124                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3125                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3126                                        bnapi->last_status_idx);
3127                                 break;
3128                         }
3129                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3130                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3131                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3132                                bnapi->last_status_idx);
3133
3134                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3135                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3136                                bnapi->last_status_idx);
3137                         break;
3138                 }
3139         }
3140
3141         return work_done;
3142 }
3143
3144 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3145  * from set_multicast.
3146  */
3147 static void
3148 bnx2_set_rx_mode(struct net_device *dev)
3149 {
3150         struct bnx2 *bp = netdev_priv(dev);
3151         u32 rx_mode, sort_mode;
3152         int i;
3153
3154         spin_lock_bh(&bp->phy_lock);
3155
3156         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3157                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3158         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3159 #ifdef BCM_VLAN
3160         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3161                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3162 #else
3163         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3164                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3165 #endif
3166         if (dev->flags & IFF_PROMISC) {
3167                 /* Promiscuous mode. */
3168                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3169                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3170                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3171         }
3172         else if (dev->flags & IFF_ALLMULTI) {
3173                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3174                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3175                                0xffffffff);
3176                 }
3177                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3178         }
3179         else {
3180                 /* Accept one or more multicast(s). */
3181                 struct dev_mc_list *mclist;
3182                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3183                 u32 regidx;
3184                 u32 bit;
3185                 u32 crc;
3186
3187                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3188
3189                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3190                      i++, mclist = mclist->next) {
3191
3192                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3193                         bit = crc & 0xff;
3194                         regidx = (bit & 0xe0) >> 5;
3195                         bit &= 0x1f;
3196                         mc_filter[regidx] |= (1 << bit);
3197                 }
3198
3199                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3200                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3201                                mc_filter[i]);
3202                 }
3203
3204                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3205         }
3206
3207         if (rx_mode != bp->rx_mode) {
3208                 bp->rx_mode = rx_mode;
3209                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3210         }
3211
3212         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3213         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3214         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3215
3216         spin_unlock_bh(&bp->phy_lock);
3217 }
3218
3219 static void
3220 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3221         u32 rv2p_proc)
3222 {
3223         int i;
3224         u32 val;
3225
3226         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3227                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3228                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3229                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3230                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3231         }
3232
3233         for (i = 0; i < rv2p_code_len; i += 8) {
3234                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3235                 rv2p_code++;
3236                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3237                 rv2p_code++;
3238
3239                 if (rv2p_proc == RV2P_PROC1) {
3240                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3241                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3242                 }
3243                 else {
3244                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3245                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3246                 }
3247         }
3248
3249         /* Reset the processor, un-stall is done later. */
3250         if (rv2p_proc == RV2P_PROC1) {
3251                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3252         }
3253         else {
3254                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3255         }
3256 }
3257
3258 static int
3259 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3260 {
3261         u32 offset;
3262         u32 val;
3263         int rc;
3264
3265         /* Halt the CPU. */
3266         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3267         val |= cpu_reg->mode_value_halt;
3268         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3269         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3270
3271         /* Load the Text area. */
3272         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3273         if (fw->gz_text) {
3274                 int j;
3275
3276                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3277                                        fw->gz_text_len);
3278                 if (rc < 0)
3279                         return rc;
3280
3281                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3282                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3283                 }
3284         }
3285
3286         /* Load the Data area. */
3287         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3288         if (fw->data) {
3289                 int j;
3290
3291                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3292                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3293                 }
3294         }
3295
3296         /* Load the SBSS area. */
3297         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3298         if (fw->sbss_len) {
3299                 int j;
3300
3301                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3302                         bnx2_reg_wr_ind(bp, offset, 0);
3303                 }
3304         }
3305
3306         /* Load the BSS area. */
3307         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3308         if (fw->bss_len) {
3309                 int j;
3310
3311                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3312                         bnx2_reg_wr_ind(bp, offset, 0);
3313                 }
3314         }
3315
3316         /* Load the Read-Only area. */
3317         offset = cpu_reg->spad_base +
3318                 (fw->rodata_addr - cpu_reg->mips_view_base);
3319         if (fw->rodata) {
3320                 int j;
3321
3322                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3323                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3324                 }
3325         }
3326
3327         /* Clear the pre-fetch instruction. */
3328         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3329         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3330
3331         /* Start the CPU. */
3332         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3333         val &= ~cpu_reg->mode_value_halt;
3334         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3335         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3336
3337         return 0;
3338 }
3339
3340 static int
3341 bnx2_init_cpus(struct bnx2 *bp)
3342 {
3343         struct fw_info *fw;
3344         int rc, rv2p_len;
3345         void *text, *rv2p;
3346
3347         /* Initialize the RV2P processor. */
3348         text = vmalloc(FW_BUF_SIZE);
3349         if (!text)
3350                 return -ENOMEM;
3351         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3352                 rv2p = bnx2_xi_rv2p_proc1;
3353                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3354         } else {
3355                 rv2p = bnx2_rv2p_proc1;
3356                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3357         }
3358         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3359         if (rc < 0)
3360                 goto init_cpu_err;
3361
3362         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3363
3364         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3365                 rv2p = bnx2_xi_rv2p_proc2;
3366                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3367         } else {
3368                 rv2p = bnx2_rv2p_proc2;
3369                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3370         }
3371         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3372         if (rc < 0)
3373                 goto init_cpu_err;
3374
3375         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3376
3377         /* Initialize the RX Processor. */
3378         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3379                 fw = &bnx2_rxp_fw_09;
3380         else
3381                 fw = &bnx2_rxp_fw_06;
3382
3383         fw->text = text;
3384         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3385         if (rc)
3386                 goto init_cpu_err;
3387
3388         /* Initialize the TX Processor. */
3389         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3390                 fw = &bnx2_txp_fw_09;
3391         else
3392                 fw = &bnx2_txp_fw_06;
3393
3394         fw->text = text;
3395         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3396         if (rc)
3397                 goto init_cpu_err;
3398
3399         /* Initialize the TX Patch-up Processor. */
3400         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3401                 fw = &bnx2_tpat_fw_09;
3402         else
3403                 fw = &bnx2_tpat_fw_06;
3404
3405         fw->text = text;
3406         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3407         if (rc)
3408                 goto init_cpu_err;
3409
3410         /* Initialize the Completion Processor. */
3411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3412                 fw = &bnx2_com_fw_09;
3413         else
3414                 fw = &bnx2_com_fw_06;
3415
3416         fw->text = text;
3417         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3418         if (rc)
3419                 goto init_cpu_err;
3420
3421         /* Initialize the Command Processor. */
3422         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3423                 fw = &bnx2_cp_fw_09;
3424         else
3425                 fw = &bnx2_cp_fw_06;
3426
3427         fw->text = text;
3428         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3429
3430 init_cpu_err:
3431         vfree(text);
3432         return rc;
3433 }
3434
3435 static int
3436 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3437 {
3438         u16 pmcsr;
3439
3440         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3441
3442         switch (state) {
3443         case PCI_D0: {
3444                 u32 val;
3445
3446                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3447                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3448                         PCI_PM_CTRL_PME_STATUS);
3449
3450                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3451                         /* delay required during transition out of D3hot */
3452                         msleep(20);
3453
3454                 val = REG_RD(bp, BNX2_EMAC_MODE);
3455                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3456                 val &= ~BNX2_EMAC_MODE_MPKT;
3457                 REG_WR(bp, BNX2_EMAC_MODE, val);
3458
3459                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3460                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3461                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3462                 break;
3463         }
3464         case PCI_D3hot: {
3465                 int i;
3466                 u32 val, wol_msg;
3467
3468                 if (bp->wol) {
3469                         u32 advertising;
3470                         u8 autoneg;
3471
3472                         autoneg = bp->autoneg;
3473                         advertising = bp->advertising;
3474
3475                         if (bp->phy_port == PORT_TP) {
3476                                 bp->autoneg = AUTONEG_SPEED;
3477                                 bp->advertising = ADVERTISED_10baseT_Half |
3478                                         ADVERTISED_10baseT_Full |
3479                                         ADVERTISED_100baseT_Half |
3480                                         ADVERTISED_100baseT_Full |
3481                                         ADVERTISED_Autoneg;
3482                         }
3483
3484                         spin_lock_bh(&bp->phy_lock);
3485                         bnx2_setup_phy(bp, bp->phy_port);
3486                         spin_unlock_bh(&bp->phy_lock);
3487
3488                         bp->autoneg = autoneg;
3489                         bp->advertising = advertising;
3490
3491                         bnx2_set_mac_addr(bp);
3492
3493                         val = REG_RD(bp, BNX2_EMAC_MODE);
3494
3495                         /* Enable port mode. */
3496                         val &= ~BNX2_EMAC_MODE_PORT;
3497                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3498                                BNX2_EMAC_MODE_ACPI_RCVD |
3499                                BNX2_EMAC_MODE_MPKT;
3500                         if (bp->phy_port == PORT_TP)
3501                                 val |= BNX2_EMAC_MODE_PORT_MII;
3502                         else {
3503                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3504                                 if (bp->line_speed == SPEED_2500)
3505                                         val |= BNX2_EMAC_MODE_25G_MODE;
3506                         }
3507
3508                         REG_WR(bp, BNX2_EMAC_MODE, val);
3509
3510                         /* receive all multicast */
3511                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3512                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3513                                        0xffffffff);
3514                         }
3515                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3516                                BNX2_EMAC_RX_MODE_SORT_MODE);
3517
3518                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3519                               BNX2_RPM_SORT_USER0_MC_EN;
3520                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3521                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3522                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3523                                BNX2_RPM_SORT_USER0_ENA);
3524
3525                         /* Need to enable EMAC and RPM for WOL. */
3526                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3527                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3528                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3529                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3530
3531                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3532                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3533                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3534
3535                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3536                 }
3537                 else {
3538                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3539                 }
3540
3541                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3542                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3543
3544                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3545                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3546                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3547
3548                         if (bp->wol)
3549                                 pmcsr |= 3;
3550                 }
3551                 else {
3552                         pmcsr |= 3;
3553                 }
3554                 if (bp->wol) {
3555                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3556                 }
3557                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3558                                       pmcsr);
3559
3560                 /* No more memory access after this point until
3561                  * device is brought back to D0.
3562                  */
3563                 udelay(50);
3564                 break;
3565         }
3566         default:
3567                 return -EINVAL;
3568         }
3569         return 0;
3570 }
3571
3572 static int
3573 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3574 {
3575         u32 val;
3576         int j;
3577
3578         /* Request access to the flash interface. */
3579         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3580         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3581                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3582                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3583                         break;
3584
3585                 udelay(5);
3586         }
3587
3588         if (j >= NVRAM_TIMEOUT_COUNT)
3589                 return -EBUSY;
3590
3591         return 0;
3592 }
3593
3594 static int
3595 bnx2_release_nvram_lock(struct bnx2 *bp)
3596 {
3597         int j;
3598         u32 val;
3599
3600         /* Relinquish nvram interface. */
3601         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3602
3603         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3604                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3605                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3606                         break;
3607
3608                 udelay(5);
3609         }
3610
3611         if (j >= NVRAM_TIMEOUT_COUNT)
3612                 return -EBUSY;
3613
3614         return 0;
3615 }
3616
3617
3618 static int
3619 bnx2_enable_nvram_write(struct bnx2 *bp)
3620 {
3621         u32 val;
3622
3623         val = REG_RD(bp, BNX2_MISC_CFG);
3624         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3625
3626         if (bp->flash_info->flags & BNX2_NV_WREN) {
3627                 int j;
3628
3629                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3630                 REG_WR(bp, BNX2_NVM_COMMAND,
3631                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3632
3633                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3634                         udelay(5);
3635
3636                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3637                         if (val & BNX2_NVM_COMMAND_DONE)
3638                                 break;
3639                 }
3640
3641                 if (j >= NVRAM_TIMEOUT_COUNT)
3642                         return -EBUSY;
3643         }
3644         return 0;
3645 }
3646
3647 static void
3648 bnx2_disable_nvram_write(struct bnx2 *bp)
3649 {
3650         u32 val;
3651
3652         val = REG_RD(bp, BNX2_MISC_CFG);
3653         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3654 }
3655
3656
3657 static void
3658 bnx2_enable_nvram_access(struct bnx2 *bp)
3659 {
3660         u32 val;
3661
3662         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3663         /* Enable both bits, even on read. */
3664         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3665                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3666 }
3667
3668 static void
3669 bnx2_disable_nvram_access(struct bnx2 *bp)
3670 {
3671         u32 val;
3672
3673         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3674         /* Disable both bits, even after read. */
3675         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3676                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3677                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3678 }
3679
3680 static int
3681 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3682 {
3683         u32 cmd;
3684         int j;
3685
3686         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3687                 /* Buffered flash, no erase needed */
3688                 return 0;
3689
3690         /* Build an erase command */
3691         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3692               BNX2_NVM_COMMAND_DOIT;
3693
3694         /* Need to clear DONE bit separately. */
3695         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3696
3697         /* Address of the NVRAM to read from. */
3698         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3699
3700         /* Issue an erase command. */
3701         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3702
3703         /* Wait for completion. */
3704         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3705                 u32 val;
3706
3707                 udelay(5);
3708
3709                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3710                 if (val & BNX2_NVM_COMMAND_DONE)
3711                         break;
3712         }
3713
3714         if (j >= NVRAM_TIMEOUT_COUNT)
3715                 return -EBUSY;
3716
3717         return 0;
3718 }
3719
3720 static int
3721 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3722 {
3723         u32 cmd;
3724         int j;
3725
3726         /* Build the command word. */
3727         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3728
3729         /* Calculate an offset of a buffered flash, not needed for 5709. */
3730         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3731                 offset = ((offset / bp->flash_info->page_size) <<
3732                            bp->flash_info->page_bits) +
3733                           (offset % bp->flash_info->page_size);
3734         }
3735
3736         /* Need to clear DONE bit separately. */
3737         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3738
3739         /* Address of the NVRAM to read from. */
3740         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3741
3742         /* Issue a read command. */
3743         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3744
3745         /* Wait for completion. */
3746         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3747                 u32 val;
3748
3749                 udelay(5);
3750
3751                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3752                 if (val & BNX2_NVM_COMMAND_DONE) {
3753                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3754                         memcpy(ret_val, &v, 4);
3755                         break;
3756                 }
3757         }
3758         if (j >= NVRAM_TIMEOUT_COUNT)
3759                 return -EBUSY;
3760
3761         return 0;
3762 }
3763
3764
3765 static int
3766 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3767 {
3768         u32 cmd;
3769         __be32 val32;
3770         int j;
3771
3772         /* Build the command word. */
3773         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3774
3775         /* Calculate an offset of a buffered flash, not needed for 5709. */
3776         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3777                 offset = ((offset / bp->flash_info->page_size) <<
3778                           bp->flash_info->page_bits) +
3779                          (offset % bp->flash_info->page_size);
3780         }
3781
3782         /* Need to clear DONE bit separately. */
3783         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3784
3785         memcpy(&val32, val, 4);
3786
3787         /* Write the data. */
3788         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3789
3790         /* Address of the NVRAM to write to. */
3791         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3792
3793         /* Issue the write command. */
3794         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3795
3796         /* Wait for completion. */
3797         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3798                 udelay(5);
3799
3800                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3801                         break;
3802         }
3803         if (j >= NVRAM_TIMEOUT_COUNT)
3804                 return -EBUSY;
3805
3806         return 0;
3807 }
3808
3809 static int
3810 bnx2_init_nvram(struct bnx2 *bp)
3811 {
3812         u32 val;
3813         int j, entry_count, rc = 0;
3814         struct flash_spec *flash;
3815
3816         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3817                 bp->flash_info = &flash_5709;
3818                 goto get_flash_size;
3819         }
3820
3821         /* Determine the selected interface. */
3822         val = REG_RD(bp, BNX2_NVM_CFG1);
3823
3824         entry_count = ARRAY_SIZE(flash_table);
3825
3826         if (val & 0x40000000) {
3827
3828                 /* Flash interface has been reconfigured */
3829                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3830                      j++, flash++) {
3831                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3832                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3833                                 bp->flash_info = flash;
3834                                 break;
3835                         }
3836                 }
3837         }
3838         else {
3839                 u32 mask;
3840                 /* Not yet been reconfigured */
3841
3842                 if (val & (1 << 23))
3843                         mask = FLASH_BACKUP_STRAP_MASK;
3844                 else
3845                         mask = FLASH_STRAP_MASK;
3846
3847                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3848                         j++, flash++) {
3849
3850                         if ((val & mask) == (flash->strapping & mask)) {
3851                                 bp->flash_info = flash;
3852
3853                                 /* Request access to the flash interface. */
3854                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3855                                         return rc;
3856
3857                                 /* Enable access to flash interface */
3858                                 bnx2_enable_nvram_access(bp);
3859
3860                                 /* Reconfigure the flash interface */
3861                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3862                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3863                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3864                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3865
3866                                 /* Disable access to flash interface */
3867                                 bnx2_disable_nvram_access(bp);
3868                                 bnx2_release_nvram_lock(bp);
3869
3870                                 break;
3871                         }
3872                 }
3873         } /* if (val & 0x40000000) */
3874
3875         if (j == entry_count) {
3876                 bp->flash_info = NULL;
3877                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3878                 return -ENODEV;
3879         }
3880
3881 get_flash_size:
3882         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3883         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3884         if (val)
3885                 bp->flash_size = val;
3886         else
3887                 bp->flash_size = bp->flash_info->total_size;
3888
3889         return rc;
3890 }
3891
3892 static int
3893 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3894                 int buf_size)
3895 {
3896         int rc = 0;
3897         u32 cmd_flags, offset32, len32, extra;
3898
3899         if (buf_size == 0)
3900                 return 0;
3901
3902         /* Request access to the flash interface. */
3903         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3904                 return rc;
3905
3906         /* Enable access to flash interface */
3907         bnx2_enable_nvram_access(bp);
3908
3909         len32 = buf_size;
3910         offset32 = offset;
3911         extra = 0;
3912
3913         cmd_flags = 0;
3914
3915         if (offset32 & 3) {
3916                 u8 buf[4];
3917                 u32 pre_len;
3918
3919                 offset32 &= ~3;
3920                 pre_len = 4 - (offset & 3);
3921
3922                 if (pre_len >= len32) {
3923                         pre_len = len32;
3924                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3925                                     BNX2_NVM_COMMAND_LAST;
3926                 }
3927                 else {
3928                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3929                 }
3930
3931                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3932
3933                 if (rc)
3934                         return rc;
3935
3936                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3937
3938                 offset32 += 4;
3939                 ret_buf += pre_len;
3940                 len32 -= pre_len;
3941         }
3942         if (len32 & 3) {
3943                 extra = 4 - (len32 & 3);
3944                 len32 = (len32 + 4) & ~3;
3945         }
3946
3947         if (len32 == 4) {
3948                 u8 buf[4];
3949
3950                 if (cmd_flags)
3951                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3952                 else
3953                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3954                                     BNX2_NVM_COMMAND_LAST;
3955
3956                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3957
3958                 memcpy(ret_buf, buf, 4 - extra);
3959         }
3960         else if (len32 > 0) {
3961                 u8 buf[4];
3962
3963                 /* Read the first word. */
3964                 if (cmd_flags)
3965                         cmd_flags = 0;
3966                 else
3967                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3968
3969                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3970
3971                 /* Advance to the next dword. */
3972                 offset32 += 4;
3973                 ret_buf += 4;
3974                 len32 -= 4;
3975
3976                 while (len32 > 4 && rc == 0) {
3977                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3978
3979                         /* Advance to the next dword. */
3980                         offset32 += 4;
3981                         ret_buf += 4;
3982                         len32 -= 4;
3983                 }
3984
3985                 if (rc)
3986                         return rc;
3987
3988                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3989                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3990
3991                 memcpy(ret_buf, buf, 4 - extra);
3992         }
3993
3994         /* Disable access to flash interface */
3995         bnx2_disable_nvram_access(bp);
3996
3997         bnx2_release_nvram_lock(bp);
3998
3999         return rc;
4000 }
4001
4002 static int
4003 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4004                 int buf_size)
4005 {
4006         u32 written, offset32, len32;
4007         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4008         int rc = 0;
4009         int align_start, align_end;
4010
4011         buf = data_buf;
4012         offset32 = offset;
4013         len32 = buf_size;
4014         align_start = align_end = 0;
4015
4016         if ((align_start = (offset32 & 3))) {
4017                 offset32 &= ~3;
4018                 len32 += align_start;
4019                 if (len32 < 4)
4020                         len32 = 4;
4021                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4022                         return rc;
4023         }
4024
4025         if (len32 & 3) {
4026                 align_end = 4 - (len32 & 3);
4027                 len32 += align_end;
4028                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4029                         return rc;
4030         }
4031
4032         if (align_start || align_end) {
4033                 align_buf = kmalloc(len32, GFP_KERNEL);
4034                 if (align_buf == NULL)
4035                         return -ENOMEM;
4036                 if (align_start) {
4037                         memcpy(align_buf, start, 4);
4038                 }
4039                 if (align_end) {
4040                         memcpy(align_buf + len32 - 4, end, 4);
4041                 }
4042                 memcpy(align_buf + align_start, data_buf, buf_size);
4043                 buf = align_buf;
4044         }
4045
4046         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4047                 flash_buffer = kmalloc(264, GFP_KERNEL);
4048                 if (flash_buffer == NULL) {
4049                         rc = -ENOMEM;
4050                         goto nvram_write_end;
4051                 }
4052         }
4053
4054         written = 0;
4055         while ((written < len32) && (rc == 0)) {
4056                 u32 page_start, page_end, data_start, data_end;
4057                 u32 addr, cmd_flags;
4058                 int i;
4059
4060                 /* Find the page_start addr */
4061                 page_start = offset32 + written;
4062                 page_start -= (page_start % bp->flash_info->page_size);
4063                 /* Find the page_end addr */
4064                 page_end = page_start + bp->flash_info->page_size;
4065                 /* Find the data_start addr */
4066                 data_start = (written == 0) ? offset32 : page_start;
4067                 /* Find the data_end addr */
4068                 data_end = (page_end > offset32 + len32) ?
4069                         (offset32 + len32) : page_end;
4070
4071                 /* Request access to the flash interface. */
4072                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4073                         goto nvram_write_end;
4074
4075                 /* Enable access to flash interface */
4076                 bnx2_enable_nvram_access(bp);
4077
4078                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4079                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4080                         int j;
4081
4082                         /* Read the whole page into the buffer
4083                          * (non-buffer flash only) */
4084                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4085                                 if (j == (bp->flash_info->page_size - 4)) {
4086                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4087                                 }
4088                                 rc = bnx2_nvram_read_dword(bp,
4089                                         page_start + j,
4090                                         &flash_buffer[j],
4091                                         cmd_flags);
4092
4093                                 if (rc)
4094                                         goto nvram_write_end;
4095
4096                                 cmd_flags = 0;
4097                         }
4098                 }
4099
4100                 /* Enable writes to flash interface (unlock write-protect) */
4101                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4102                         goto nvram_write_end;
4103
4104                 /* Loop to write back the buffer data from page_start to
4105                  * data_start */
4106                 i = 0;
4107                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4108                         /* Erase the page */
4109                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4110                                 goto nvram_write_end;
4111
4112                         /* Re-enable the write again for the actual write */
4113                         bnx2_enable_nvram_write(bp);
4114
4115                         for (addr = page_start; addr < data_start;
4116                                 addr += 4, i += 4) {
4117
4118                                 rc = bnx2_nvram_write_dword(bp, addr,
4119                                         &flash_buffer[i], cmd_flags);
4120
4121                                 if (rc != 0)
4122                                         goto nvram_write_end;
4123
4124                                 cmd_flags = 0;
4125                         }
4126                 }
4127
4128                 /* Loop to write the new data from data_start to data_end */
4129                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4130                         if ((addr == page_end - 4) ||
4131                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4132                                  (addr == data_end - 4))) {
4133
4134                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4135                         }
4136                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4137                                 cmd_flags);
4138
4139                         if (rc != 0)
4140                                 goto nvram_write_end;
4141
4142                         cmd_flags = 0;
4143                         buf += 4;
4144                 }
4145
4146                 /* Loop to write back the buffer data from data_end
4147                  * to page_end */
4148                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4149                         for (addr = data_end; addr < page_end;
4150                                 addr += 4, i += 4) {
4151
4152                                 if (addr == page_end-4) {
4153                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4154                                 }
4155                                 rc = bnx2_nvram_write_dword(bp, addr,
4156                                         &flash_buffer[i], cmd_flags);
4157
4158                                 if (rc != 0)
4159                                         goto nvram_write_end;
4160
4161                                 cmd_flags = 0;
4162                         }
4163                 }
4164
4165                 /* Disable writes to flash interface (lock write-protect) */
4166                 bnx2_disable_nvram_write(bp);
4167
4168                 /* Disable access to flash interface */
4169                 bnx2_disable_nvram_access(bp);
4170                 bnx2_release_nvram_lock(bp);
4171
4172                 /* Increment written */
4173                 written += data_end - data_start;
4174         }
4175
4176 nvram_write_end:
4177         kfree(flash_buffer);
4178         kfree(align_buf);
4179         return rc;
4180 }
4181
4182 static void
4183 bnx2_init_remote_phy(struct bnx2 *bp)
4184 {
4185         u32 val;
4186
4187         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4188         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4189                 return;
4190
4191         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4192         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4193                 return;
4194
4195         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4196                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4197
4198                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4199                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4200                         bp->phy_port = PORT_FIBRE;
4201                 else
4202                         bp->phy_port = PORT_TP;
4203
4204                 if (netif_running(bp->dev)) {
4205                         u32 sig;
4206
4207                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4208                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4209                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4210                 }
4211         }
4212 }
4213
4214 static void
4215 bnx2_setup_msix_tbl(struct bnx2 *bp)
4216 {
4217         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4218
4219         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4220         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4221 }
4222
4223 static int
4224 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4225 {
4226         u32 val;
4227         int i, rc = 0;
4228         u8 old_port;
4229
4230         /* Wait for the current PCI transaction to complete before
4231          * issuing a reset. */
4232         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4233                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4234                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4235                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4236                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4237         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4238         udelay(5);
4239
4240         /* Wait for the firmware to tell us it is ok to issue a reset. */
4241         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4242
4243         /* Deposit a driver reset signature so the firmware knows that
4244          * this is a soft reset. */
4245         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4246                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4247
4248         /* Do a dummy read to force the chip to complete all current transaction
4249          * before we issue a reset. */
4250         val = REG_RD(bp, BNX2_MISC_ID);
4251
4252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4253                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4254                 REG_RD(bp, BNX2_MISC_COMMAND);
4255                 udelay(5);
4256
4257                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4258                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4259
4260                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4261
4262         } else {
4263                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4264                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4265                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4266
4267                 /* Chip reset. */
4268                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4269
4270                 /* Reading back any register after chip reset will hang the
4271                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4272                  * of margin for write posting.
4273                  */
4274                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4275                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4276                         msleep(20);
4277
4278                 /* Reset takes approximate 30 usec */
4279                 for (i = 0; i < 10; i++) {
4280                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4281                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4282                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4283                                 break;
4284                         udelay(10);
4285                 }
4286
4287                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4288                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4289                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4290                         return -EBUSY;
4291                 }
4292         }
4293
4294         /* Make sure byte swapping is properly configured. */
4295         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4296         if (val != 0x01020304) {
4297                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4298                 return -ENODEV;
4299         }
4300
4301         /* Wait for the firmware to finish its initialization. */
4302         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4303         if (rc)
4304                 return rc;
4305
4306         spin_lock_bh(&bp->phy_lock);
4307         old_port = bp->phy_port;
4308         bnx2_init_remote_phy(bp);
4309         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4310             old_port != bp->phy_port)
4311                 bnx2_set_default_remote_link(bp);
4312         spin_unlock_bh(&bp->phy_lock);
4313
4314         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4315                 /* Adjust the voltage regular to two steps lower.  The default
4316                  * of this register is 0x0000000e. */
4317                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4318
4319                 /* Remove bad rbuf memory from the free pool. */
4320                 rc = bnx2_alloc_bad_rbuf(bp);
4321         }
4322
4323         if (bp->flags & BNX2_FLAG_USING_MSIX)
4324                 bnx2_setup_msix_tbl(bp);
4325
4326         return rc;
4327 }
4328
4329 static int
4330 bnx2_init_chip(struct bnx2 *bp)
4331 {
4332         u32 val;
4333         int rc, i;
4334
4335         /* Make sure the interrupt is not active. */
4336         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4337
4338         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4339               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4340 #ifdef __BIG_ENDIAN
4341               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4342 #endif
4343               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4344               DMA_READ_CHANS << 12 |
4345               DMA_WRITE_CHANS << 16;
4346
4347         val |= (0x2 << 20) | (1 << 11);
4348
4349         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4350                 val |= (1 << 23);
4351
4352         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4353             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4354                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4355
4356         REG_WR(bp, BNX2_DMA_CONFIG, val);
4357
4358         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4359                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4360                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4361                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4362         }
4363
4364         if (bp->flags & BNX2_FLAG_PCIX) {
4365                 u16 val16;
4366
4367                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4368                                      &val16);
4369                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4370                                       val16 & ~PCI_X_CMD_ERO);
4371         }
4372
4373         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4374                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4375                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4376                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4377
4378         /* Initialize context mapping and zero out the quick contexts.  The
4379          * context block must have already been enabled. */
4380         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4381                 rc = bnx2_init_5709_context(bp);
4382                 if (rc)
4383                         return rc;
4384         } else
4385                 bnx2_init_context(bp);
4386
4387         if ((rc = bnx2_init_cpus(bp)) != 0)
4388                 return rc;
4389
4390         bnx2_init_nvram(bp);
4391
4392         bnx2_set_mac_addr(bp);
4393
4394         val = REG_RD(bp, BNX2_MQ_CONFIG);
4395         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4396         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4397         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4398                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4399
4400         REG_WR(bp, BNX2_MQ_CONFIG, val);
4401
4402         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4403         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4404         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4405
4406         val = (BCM_PAGE_BITS - 8) << 24;
4407         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4408
4409         /* Configure page size. */
4410         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4411         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4412         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4413         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4414
4415         val = bp->mac_addr[0] +
4416               (bp->mac_addr[1] << 8) +
4417               (bp->mac_addr[2] << 16) +
4418               bp->mac_addr[3] +
4419               (bp->mac_addr[4] << 8) +
4420               (bp->mac_addr[5] << 16);
4421         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4422
4423         /* Program the MTU.  Also include 4 bytes for CRC32. */
4424         val = bp->dev->mtu + ETH_HLEN + 4;
4425         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4426                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4427         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4428
4429         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4430                 bp->bnx2_napi[i].last_status_idx = 0;
4431
4432         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4433
4434         /* Set up how to generate a link change interrupt. */
4435         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4436
4437         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4438                (u64) bp->status_blk_mapping & 0xffffffff);
4439         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4440
4441         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4442                (u64) bp->stats_blk_mapping & 0xffffffff);
4443         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4444                (u64) bp->stats_blk_mapping >> 32);
4445
4446         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4447                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4448
4449         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4450                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4451
4452         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4453                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4454
4455         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4456
4457         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4458
4459         REG_WR(bp, BNX2_HC_COM_TICKS,
4460                (bp->com_ticks_int << 16) | bp->com_ticks);
4461
4462         REG_WR(bp, BNX2_HC_CMD_TICKS,
4463                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4464
4465         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4466                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4467         else
4468                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4469         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4470
4471         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4472                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4473         else {
4474                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4475                       BNX2_HC_CONFIG_COLLECT_STATS;
4476         }
4477
4478         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4479                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4480                            BNX2_HC_SB_CONFIG_1;
4481
4482                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4483                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4484
4485                 REG_WR(bp, base,
4486                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4487                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4488
4489                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4490                         (bp->tx_quick_cons_trip_int << 16) |
4491                          bp->tx_quick_cons_trip);
4492
4493                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4494                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4495
4496                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4497         }
4498
4499         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4500                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4501
4502         REG_WR(bp, BNX2_HC_CONFIG, val);
4503
4504         /* Clear internal stats counters. */
4505         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4506
4507         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4508
4509         /* Initialize the receive filter. */
4510         bnx2_set_rx_mode(bp->dev);
4511
4512         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4513                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4514                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4515                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4516         }
4517         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4518                           0);
4519
4520         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4521         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4522
4523         udelay(20);
4524
4525         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4526
4527         return rc;
4528 }
4529
4530 static void
4531 bnx2_clear_ring_states(struct bnx2 *bp)
4532 {
4533         struct bnx2_napi *bnapi;
4534         struct bnx2_tx_ring_info *txr;
4535         int i;
4536
4537         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4538                 bnapi = &bp->bnx2_napi[i];
4539                 txr = &bnapi->tx_ring;
4540
4541                 txr->tx_cons = 0;
4542                 txr->hw_tx_cons = 0;
4543                 bnapi->rx_prod_bseq = 0;
4544                 bnapi->rx_prod = 0;
4545                 bnapi->rx_cons = 0;
4546                 bnapi->rx_pg_prod = 0;
4547                 bnapi->rx_pg_cons = 0;
4548         }
4549 }
4550
4551 static void
4552 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4553 {
4554         u32 val, offset0, offset1, offset2, offset3;
4555         u32 cid_addr = GET_CID_ADDR(cid);
4556
4557         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4558                 offset0 = BNX2_L2CTX_TYPE_XI;
4559                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4560                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4561                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4562         } else {
4563                 offset0 = BNX2_L2CTX_TYPE;
4564                 offset1 = BNX2_L2CTX_CMD_TYPE;
4565                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4566                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4567         }
4568         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4569         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4570
4571         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4572         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4573
4574         val = (u64) txr->tx_desc_mapping >> 32;
4575         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4576
4577         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4578         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4579 }
4580
4581 static void
4582 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4583 {
4584         struct tx_bd *txbd;
4585         u32 cid = TX_CID;
4586         struct bnx2_napi *bnapi;
4587         struct bnx2_tx_ring_info *txr;
4588
4589         bnapi = &bp->bnx2_napi[ring_num];
4590         txr = &bnapi->tx_ring;
4591
4592         if (ring_num == 0)
4593                 cid = TX_CID;
4594         else
4595                 cid = TX_TSS_CID + ring_num - 1;
4596
4597         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4598
4599         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4600
4601         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4602         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4603
4604         txr->tx_prod = 0;
4605         txr->tx_prod_bseq = 0;
4606
4607         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4608         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4609
4610         bnx2_init_tx_context(bp, cid, txr);
4611 }
4612
4613 static void
4614 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4615                      int num_rings)
4616 {
4617         int i;
4618         struct rx_bd *rxbd;
4619
4620         for (i = 0; i < num_rings; i++) {
4621                 int j;
4622
4623                 rxbd = &rx_ring[i][0];
4624                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4625                         rxbd->rx_bd_len = buf_size;
4626                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4627                 }
4628                 if (i == (num_rings - 1))
4629                         j = 0;
4630                 else
4631                         j = i + 1;
4632                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4633                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4634         }
4635 }
4636
4637 static void
4638 bnx2_init_rx_ring(struct bnx2 *bp)
4639 {
4640         int i;
4641         u16 prod, ring_prod;
4642         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4643         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4644
4645         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4646                              bp->rx_buf_use_size, bp->rx_max_ring);
4647
4648         bnx2_init_rx_context0(bp);
4649
4650         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4651                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4652                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4653         }
4654
4655         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4656         if (bp->rx_pg_ring_size) {
4657                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4658                                      bp->rx_pg_desc_mapping,
4659                                      PAGE_SIZE, bp->rx_max_pg_ring);
4660                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4661                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4662                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4663                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4664
4665                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4666                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4667
4668                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4669                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4670
4671                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4672                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4673         }
4674
4675         val = (u64) bp->rx_desc_mapping[0] >> 32;
4676         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4677
4678         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4679         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4680
4681         ring_prod = prod = bnapi->rx_pg_prod;
4682         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4683                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4684                         break;
4685                 prod = NEXT_RX_BD(prod);
4686                 ring_prod = RX_PG_RING_IDX(prod);
4687         }
4688         bnapi->rx_pg_prod = prod;
4689
4690         ring_prod = prod = bnapi->rx_prod;
4691         for (i = 0; i < bp->rx_ring_size; i++) {
4692                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4693                         break;
4694                 }
4695                 prod = NEXT_RX_BD(prod);
4696                 ring_prod = RX_RING_IDX(prod);
4697         }
4698         bnapi->rx_prod = prod;
4699
4700         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4701                  bnapi->rx_pg_prod);
4702         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4703
4704         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4705 }
4706
4707 static void
4708 bnx2_init_all_rings(struct bnx2 *bp)
4709 {
4710         int i;
4711
4712         bnx2_clear_ring_states(bp);
4713
4714         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4715         for (i = 0; i < bp->num_tx_rings; i++)
4716                 bnx2_init_tx_ring(bp, i);
4717
4718         if (bp->num_tx_rings > 1)
4719                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4720                        (TX_TSS_CID << 7));
4721
4722         bnx2_init_rx_ring(bp);
4723 }
4724
4725 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4726 {
4727         u32 max, num_rings = 1;
4728
4729         while (ring_size > MAX_RX_DESC_CNT) {
4730                 ring_size -= MAX_RX_DESC_CNT;
4731                 num_rings++;
4732         }
4733         /* round to next power of 2 */
4734         max = max_size;
4735         while ((max & num_rings) == 0)
4736                 max >>= 1;
4737
4738         if (num_rings != max)
4739                 max <<= 1;
4740
4741         return max;
4742 }
4743
4744 static void
4745 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4746 {
4747         u32 rx_size, rx_space, jumbo_size;
4748
4749         /* 8 for CRC and VLAN */
4750         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4751
4752         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4753                 sizeof(struct skb_shared_info);
4754
4755         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4756         bp->rx_pg_ring_size = 0;
4757         bp->rx_max_pg_ring = 0;
4758         bp->rx_max_pg_ring_idx = 0;
4759         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4760                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4761
4762                 jumbo_size = size * pages;
4763                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4764                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4765
4766                 bp->rx_pg_ring_size = jumbo_size;
4767                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4768                                                         MAX_RX_PG_RINGS);
4769                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4770                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4771                 bp->rx_copy_thresh = 0;
4772         }
4773
4774         bp->rx_buf_use_size = rx_size;
4775         /* hw alignment */
4776         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4777         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4778         bp->rx_ring_size = size;
4779         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4780         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4781 }
4782
4783 static void
4784 bnx2_free_tx_skbs(struct bnx2 *bp)
4785 {
4786         int i;
4787
4788         for (i = 0; i < bp->num_tx_rings; i++) {
4789                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4790                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4791                 int j;
4792
4793                 if (txr->tx_buf_ring == NULL)
4794                         continue;
4795
4796                 for (j = 0; j < TX_DESC_CNT; ) {
4797                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4798                         struct sk_buff *skb = tx_buf->skb;
4799                         int k, last;
4800
4801                         if (skb == NULL) {
4802                                 j++;
4803                                 continue;
4804                         }
4805
4806                         pci_unmap_single(bp->pdev,
4807                                          pci_unmap_addr(tx_buf, mapping),
4808                         skb_headlen(skb), PCI_DMA_TODEVICE);
4809
4810                         tx_buf->skb = NULL;
4811
4812                         last = skb_shinfo(skb)->nr_frags;
4813                         for (k = 0; k < last; k++) {
4814                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4815                                 pci_unmap_page(bp->pdev,
4816                                         pci_unmap_addr(tx_buf, mapping),
4817                                         skb_shinfo(skb)->frags[j].size,
4818                                         PCI_DMA_TODEVICE);
4819                         }
4820                         dev_kfree_skb(skb);
4821                         j += k + 1;
4822                 }
4823         }
4824 }
4825
4826 static void
4827 bnx2_free_rx_skbs(struct bnx2 *bp)
4828 {
4829         int i;
4830
4831         if (bp->rx_buf_ring == NULL)
4832                 return;
4833
4834         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4835                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4836                 struct sk_buff *skb = rx_buf->skb;
4837
4838                 if (skb == NULL)
4839                         continue;
4840
4841                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4842                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4843
4844                 rx_buf->skb = NULL;
4845
4846                 dev_kfree_skb(skb);
4847         }
4848         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4849                 bnx2_free_rx_page(bp, i);
4850 }
4851
4852 static void
4853 bnx2_free_skbs(struct bnx2 *bp)
4854 {
4855         bnx2_free_tx_skbs(bp);
4856         bnx2_free_rx_skbs(bp);
4857 }
4858
4859 static int
4860 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4861 {
4862         int rc;
4863
4864         rc = bnx2_reset_chip(bp, reset_code);
4865         bnx2_free_skbs(bp);
4866         if (rc)
4867                 return rc;
4868
4869         if ((rc = bnx2_init_chip(bp)) != 0)
4870                 return rc;
4871
4872         bnx2_init_all_rings(bp);
4873         return 0;
4874 }
4875
4876 static int
4877 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4878 {
4879         int rc;
4880
4881         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4882                 return rc;
4883
4884         spin_lock_bh(&bp->phy_lock);
4885         bnx2_init_phy(bp, reset_phy);
4886         bnx2_set_link(bp);
4887         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4888                 bnx2_remote_phy_event(bp);
4889         spin_unlock_bh(&bp->phy_lock);
4890         return 0;
4891 }
4892
4893 static int
4894 bnx2_test_registers(struct bnx2 *bp)
4895 {
4896         int ret;
4897         int i, is_5709;
4898         static const struct {
4899                 u16   offset;
4900                 u16   flags;
4901 #define BNX2_FL_NOT_5709        1
4902                 u32   rw_mask;
4903                 u32   ro_mask;
4904         } reg_tbl[] = {
4905                 { 0x006c, 0, 0x00000000, 0x0000003f },
4906                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4907                 { 0x0094, 0, 0x00000000, 0x00000000 },
4908
4909                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4910                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4911                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4912                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4913                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4914                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4915                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4916                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4917                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4918
4919                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4920                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4921                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4922                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4923                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4924                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4925
4926                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4927                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4928                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4929
4930                 { 0x1000, 0, 0x00000000, 0x00000001 },
4931                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4932
4933                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4934                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4935                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4936                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4937                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4938                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4939                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4940                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4941                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4942                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4943
4944                 { 0x1800, 0, 0x00000000, 0x00000001 },
4945                 { 0x1804, 0, 0x00000000, 0x00000003 },
4946
4947                 { 0x2800, 0, 0x00000000, 0x00000001 },
4948                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4949                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4950                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4951                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4952                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4953                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4954                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4955                 { 0x2840, 0, 0x00000000, 0xffffffff },
4956                 { 0x2844, 0, 0x00000000, 0xffffffff },
4957                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4958                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4959
4960                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4961                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4962
4963                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4964                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4965                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4966                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4967                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4968                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4969                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4970                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4971                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4972
4973                 { 0x5004, 0, 0x00000000, 0x0000007f },
4974                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4975
4976                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4977                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4978                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4979                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4980                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4981                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4982                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4983                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4984                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4985
4986                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4987                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4988                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4989                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4990                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4991                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4992                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4993                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4994                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4995                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4996                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4997                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4998                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4999                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5000                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5001                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5002                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5003                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5004                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5005                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5006                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5007                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5008                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5009
5010                 { 0xffff, 0, 0x00000000, 0x00000000 },
5011         };
5012
5013         ret = 0;
5014         is_5709 = 0;
5015         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5016                 is_5709 = 1;
5017
5018         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5019                 u32 offset, rw_mask, ro_mask, save_val, val;
5020                 u16 flags = reg_tbl[i].flags;
5021
5022                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5023                         continue;
5024
5025                 offset = (u32) reg_tbl[i].offset;
5026                 rw_mask = reg_tbl[i].rw_mask;
5027                 ro_mask = reg_tbl[i].ro_mask;
5028
5029                 save_val = readl(bp->regview + offset);
5030
5031                 writel(0, bp->regview + offset);
5032
5033                 val = readl(bp->regview + offset);
5034                 if ((val & rw_mask) != 0) {
5035                         goto reg_test_err;
5036                 }
5037
5038                 if ((val & ro_mask) != (save_val & ro_mask)) {
5039                         goto reg_test_err;
5040                 }
5041
5042                 writel(0xffffffff, bp->regview + offset);
5043
5044                 val = readl(bp->regview + offset);
5045                 if ((val & rw_mask) != rw_mask) {
5046                         goto reg_test_err;
5047                 }
5048
5049                 if ((val & ro_mask) != (save_val & ro_mask)) {
5050                         goto reg_test_err;
5051                 }
5052
5053                 writel(save_val, bp->regview + offset);
5054                 continue;
5055
5056 reg_test_err:
5057                 writel(save_val, bp->regview + offset);
5058                 ret = -ENODEV;
5059                 break;
5060         }
5061         return ret;
5062 }
5063
5064 static int
5065 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5066 {
5067         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5068                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5069         int i;
5070
5071         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5072                 u32 offset;
5073
5074                 for (offset = 0; offset < size; offset += 4) {
5075
5076                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5077
5078                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5079                                 test_pattern[i]) {
5080                                 return -ENODEV;
5081                         }
5082                 }
5083         }
5084         return 0;
5085 }
5086
5087 static int
5088 bnx2_test_memory(struct bnx2 *bp)
5089 {
5090         int ret = 0;
5091         int i;
5092         static struct mem_entry {
5093                 u32   offset;
5094                 u32   len;
5095         } mem_tbl_5706[] = {
5096                 { 0x60000,  0x4000 },
5097                 { 0xa0000,  0x3000 },
5098                 { 0xe0000,  0x4000 },
5099                 { 0x120000, 0x4000 },
5100                 { 0x1a0000, 0x4000 },
5101                 { 0x160000, 0x4000 },
5102                 { 0xffffffff, 0    },
5103         },
5104         mem_tbl_5709[] = {
5105                 { 0x60000,  0x4000 },
5106                 { 0xa0000,  0x3000 },
5107                 { 0xe0000,  0x4000 },
5108                 { 0x120000, 0x4000 },
5109                 { 0x1a0000, 0x4000 },
5110                 { 0xffffffff, 0    },
5111         };
5112         struct mem_entry *mem_tbl;
5113
5114         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5115                 mem_tbl = mem_tbl_5709;
5116         else
5117                 mem_tbl = mem_tbl_5706;
5118
5119         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5120                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5121                         mem_tbl[i].len)) != 0) {
5122                         return ret;
5123                 }
5124         }
5125
5126         return ret;
5127 }
5128
5129 #define BNX2_MAC_LOOPBACK       0
5130 #define BNX2_PHY_LOOPBACK       1
5131
5132 static int
5133 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5134 {
5135         unsigned int pkt_size, num_pkts, i;
5136         struct sk_buff *skb, *rx_skb;
5137         unsigned char *packet;
5138         u16 rx_start_idx, rx_idx;
5139         dma_addr_t map;
5140         struct tx_bd *txbd;
5141         struct sw_bd *rx_buf;
5142         struct l2_fhdr *rx_hdr;
5143         int ret = -ENODEV;
5144         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5145         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5146
5147         tx_napi = bnapi;
5148
5149         txr = &tx_napi->tx_ring;
5150         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5151                 bp->loopback = MAC_LOOPBACK;
5152                 bnx2_set_mac_loopback(bp);
5153         }
5154         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5155                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5156                         return 0;
5157
5158                 bp->loopback = PHY_LOOPBACK;
5159                 bnx2_set_phy_loopback(bp);
5160         }
5161         else
5162                 return -EINVAL;
5163
5164         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5165         skb = netdev_alloc_skb(bp->dev, pkt_size);
5166         if (!skb)
5167                 return -ENOMEM;
5168         packet = skb_put(skb, pkt_size);
5169         memcpy(packet, bp->dev->dev_addr, 6);
5170         memset(packet + 6, 0x0, 8);
5171         for (i = 14; i < pkt_size; i++)
5172                 packet[i] = (unsigned char) (i & 0xff);
5173
5174         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5175                 PCI_DMA_TODEVICE);
5176
5177         REG_WR(bp, BNX2_HC_COMMAND,
5178                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5179
5180         REG_RD(bp, BNX2_HC_COMMAND);
5181
5182         udelay(5);
5183         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5184
5185         num_pkts = 0;
5186
5187         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5188
5189         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5190         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5191         txbd->tx_bd_mss_nbytes = pkt_size;
5192         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5193
5194         num_pkts++;
5195         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5196         txr->tx_prod_bseq += pkt_size;
5197
5198         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5199         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5200
5201         udelay(100);
5202
5203         REG_WR(bp, BNX2_HC_COMMAND,
5204                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5205
5206         REG_RD(bp, BNX2_HC_COMMAND);
5207
5208         udelay(5);
5209
5210         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5211         dev_kfree_skb(skb);
5212
5213         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5214                 goto loopback_test_done;
5215
5216         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5217         if (rx_idx != rx_start_idx + num_pkts) {
5218                 goto loopback_test_done;
5219         }
5220
5221         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5222         rx_skb = rx_buf->skb;
5223
5224         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5225         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5226
5227         pci_dma_sync_single_for_cpu(bp->pdev,
5228                 pci_unmap_addr(rx_buf, mapping),
5229                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5230
5231         if (rx_hdr->l2_fhdr_status &
5232                 (L2_FHDR_ERRORS_BAD_CRC |
5233                 L2_FHDR_ERRORS_PHY_DECODE |
5234                 L2_FHDR_ERRORS_ALIGNMENT |
5235                 L2_FHDR_ERRORS_TOO_SHORT |
5236                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5237
5238                 goto loopback_test_done;
5239         }
5240
5241         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5242                 goto loopback_test_done;
5243         }
5244
5245         for (i = 14; i < pkt_size; i++) {
5246                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5247                         goto loopback_test_done;
5248                 }
5249         }
5250
5251         ret = 0;
5252
5253 loopback_test_done:
5254         bp->loopback = 0;
5255         return ret;
5256 }
5257
5258 #define BNX2_MAC_LOOPBACK_FAILED        1
5259 #define BNX2_PHY_LOOPBACK_FAILED        2
5260 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5261                                          BNX2_PHY_LOOPBACK_FAILED)
5262
5263 static int
5264 bnx2_test_loopback(struct bnx2 *bp)
5265 {
5266         int rc = 0;
5267
5268         if (!netif_running(bp->dev))
5269                 return BNX2_LOOPBACK_FAILED;
5270
5271         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5272         spin_lock_bh(&bp->phy_lock);
5273         bnx2_init_phy(bp, 1);
5274         spin_unlock_bh(&bp->phy_lock);
5275         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5276                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5277         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5278                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5279         return rc;
5280 }
5281
5282 #define NVRAM_SIZE 0x200
5283 #define CRC32_RESIDUAL 0xdebb20e3
5284
5285 static int
5286 bnx2_test_nvram(struct bnx2 *bp)
5287 {
5288         __be32 buf[NVRAM_SIZE / 4];
5289         u8 *data = (u8 *) buf;
5290         int rc = 0;
5291         u32 magic, csum;
5292
5293         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5294                 goto test_nvram_done;
5295
5296         magic = be32_to_cpu(buf[0]);
5297         if (magic != 0x669955aa) {
5298                 rc = -ENODEV;
5299                 goto test_nvram_done;
5300         }
5301
5302         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5303                 goto test_nvram_done;
5304
5305         csum = ether_crc_le(0x100, data);
5306         if (csum != CRC32_RESIDUAL) {
5307                 rc = -ENODEV;
5308                 goto test_nvram_done;
5309         }
5310
5311         csum = ether_crc_le(0x100, data + 0x100);
5312         if (csum != CRC32_RESIDUAL) {
5313                 rc = -ENODEV;
5314         }
5315
5316 test_nvram_done:
5317         return rc;
5318 }
5319
5320 static int
5321 bnx2_test_link(struct bnx2 *bp)
5322 {
5323         u32 bmsr;
5324
5325         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5326                 if (bp->link_up)
5327                         return 0;
5328                 return -ENODEV;
5329         }
5330         spin_lock_bh(&bp->phy_lock);
5331         bnx2_enable_bmsr1(bp);
5332         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5333         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5334         bnx2_disable_bmsr1(bp);
5335         spin_unlock_bh(&bp->phy_lock);
5336
5337         if (bmsr & BMSR_LSTATUS) {
5338                 return 0;
5339         }
5340         return -ENODEV;
5341 }
5342
5343 static int
5344 bnx2_test_intr(struct bnx2 *bp)
5345 {
5346         int i;
5347         u16 status_idx;
5348
5349         if (!netif_running(bp->dev))
5350                 return -ENODEV;
5351
5352         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5353
5354         /* This register is not touched during run-time. */
5355         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5356         REG_RD(bp, BNX2_HC_COMMAND);
5357
5358         for (i = 0; i < 10; i++) {
5359                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5360                         status_idx) {
5361
5362                         break;
5363                 }
5364
5365                 msleep_interruptible(10);
5366         }
5367         if (i < 10)
5368                 return 0;
5369
5370         return -ENODEV;
5371 }
5372
5373 /* Determining link for parallel detection. */
5374 static int
5375 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5376 {
5377         u32 mode_ctl, an_dbg, exp;
5378
5379         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5380                 return 0;
5381
5382         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5383         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5384
5385         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5386                 return 0;
5387
5388         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5389         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5390         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5391
5392         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5393                 return 0;
5394
5395         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5396         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5397         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5398
5399         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5400                 return 0;
5401
5402         return 1;
5403 }
5404
5405 static void
5406 bnx2_5706_serdes_timer(struct bnx2 *bp)
5407 {
5408         int check_link = 1;
5409
5410         spin_lock(&bp->phy_lock);
5411         if (bp->serdes_an_pending) {
5412                 bp->serdes_an_pending--;
5413                 check_link = 0;
5414         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5415                 u32 bmcr;
5416
5417                 bp->current_interval = bp->timer_interval;
5418
5419                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5420
5421                 if (bmcr & BMCR_ANENABLE) {
5422                         if (bnx2_5706_serdes_has_link(bp)) {
5423                                 bmcr &= ~BMCR_ANENABLE;
5424                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5425                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5426                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5427                         }
5428                 }
5429         }
5430         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5431                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5432                 u32 phy2;
5433
5434                 bnx2_write_phy(bp, 0x17, 0x0f01);
5435                 bnx2_read_phy(bp, 0x15, &phy2);
5436                 if (phy2 & 0x20) {
5437                         u32 bmcr;
5438
5439                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5440                         bmcr |= BMCR_ANENABLE;
5441                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5442
5443                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5444                 }
5445         } else
5446                 bp->current_interval = bp->timer_interval;
5447
5448         if (check_link) {
5449                 u32 val;
5450
5451                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5452                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5453                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5454
5455                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5456                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5457                                 bnx2_5706s_force_link_dn(bp, 1);
5458                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5459                         } else
5460                                 bnx2_set_link(bp);
5461                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5462                         bnx2_set_link(bp);
5463         }
5464         spin_unlock(&bp->phy_lock);
5465 }
5466
5467 static void
5468 bnx2_5708_serdes_timer(struct bnx2 *bp)
5469 {
5470         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5471                 return;
5472
5473         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5474                 bp->serdes_an_pending = 0;
5475                 return;
5476         }
5477
5478         spin_lock(&bp->phy_lock);
5479         if (bp->serdes_an_pending)
5480                 bp->serdes_an_pending--;
5481         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5482                 u32 bmcr;
5483
5484                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5485                 if (bmcr & BMCR_ANENABLE) {
5486                         bnx2_enable_forced_2g5(bp);
5487                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5488                 } else {
5489                         bnx2_disable_forced_2g5(bp);
5490                         bp->serdes_an_pending = 2;
5491                         bp->current_interval = bp->timer_interval;
5492                 }
5493
5494         } else
5495                 bp->current_interval = bp->timer_interval;
5496
5497         spin_unlock(&bp->phy_lock);
5498 }
5499
5500 static void
5501 bnx2_timer(unsigned long data)
5502 {
5503         struct bnx2 *bp = (struct bnx2 *) data;
5504
5505         if (!netif_running(bp->dev))
5506                 return;
5507
5508         if (atomic_read(&bp->intr_sem) != 0)
5509                 goto bnx2_restart_timer;
5510
5511         bnx2_send_heart_beat(bp);
5512
5513         bp->stats_blk->stat_FwRxDrop =
5514                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5515
5516         /* workaround occasional corrupted counters */
5517         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5518                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5519                                             BNX2_HC_COMMAND_STATS_NOW);
5520
5521         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5522                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5523                         bnx2_5706_serdes_timer(bp);
5524                 else
5525                         bnx2_5708_serdes_timer(bp);
5526         }
5527
5528 bnx2_restart_timer:
5529         mod_timer(&bp->timer, jiffies + bp->current_interval);
5530 }
5531
5532 static int
5533 bnx2_request_irq(struct bnx2 *bp)
5534 {
5535         struct net_device *dev = bp->dev;
5536         unsigned long flags;
5537         struct bnx2_irq *irq;
5538         int rc = 0, i;
5539
5540         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5541                 flags = 0;
5542         else
5543                 flags = IRQF_SHARED;
5544
5545         for (i = 0; i < bp->irq_nvecs; i++) {
5546                 irq = &bp->irq_tbl[i];
5547                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5548                                  dev);
5549                 if (rc)
5550                         break;
5551                 irq->requested = 1;
5552         }
5553         return rc;
5554 }
5555
5556 static void
5557 bnx2_free_irq(struct bnx2 *bp)
5558 {
5559         struct net_device *dev = bp->dev;
5560         struct bnx2_irq *irq;
5561         int i;
5562
5563         for (i = 0; i < bp->irq_nvecs; i++) {
5564                 irq = &bp->irq_tbl[i];
5565                 if (irq->requested)
5566                         free_irq(irq->vector, dev);
5567                 irq->requested = 0;
5568         }
5569         if (bp->flags & BNX2_FLAG_USING_MSI)
5570                 pci_disable_msi(bp->pdev);
5571         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5572                 pci_disable_msix(bp->pdev);
5573
5574         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5575 }
5576
5577 static void
5578 bnx2_enable_msix(struct bnx2 *bp)
5579 {
5580         int i, rc;
5581         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5582
5583         bnx2_setup_msix_tbl(bp);
5584         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5585         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5586         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5587
5588         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5589                 msix_ent[i].entry = i;
5590                 msix_ent[i].vector = 0;
5591
5592                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5593                 if (i == 0)
5594                         bp->irq_tbl[i].handler = bnx2_msi_1shot;
5595                 else
5596                         bp->irq_tbl[i].handler = bnx2_tx_msix;
5597         }
5598
5599         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5600         if (rc != 0)
5601                 return;
5602
5603         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5604         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5605         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5606                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5607 }
5608
5609 static void
5610 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5611 {
5612         bp->irq_tbl[0].handler = bnx2_interrupt;
5613         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5614         bp->irq_nvecs = 1;
5615         bp->irq_tbl[0].vector = bp->pdev->irq;
5616
5617         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5618                 bnx2_enable_msix(bp);
5619
5620         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5621             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5622                 if (pci_enable_msi(bp->pdev) == 0) {
5623                         bp->flags |= BNX2_FLAG_USING_MSI;
5624                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5625                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5626                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5627                         } else
5628                                 bp->irq_tbl[0].handler = bnx2_msi;
5629
5630                         bp->irq_tbl[0].vector = bp->pdev->irq;
5631                 }
5632         }
5633         bp->num_tx_rings = 1;
5634 }
5635
5636 /* Called with rtnl_lock */
5637 static int
5638 bnx2_open(struct net_device *dev)
5639 {
5640         struct bnx2 *bp = netdev_priv(dev);
5641         int rc;
5642
5643         netif_carrier_off(dev);
5644
5645         bnx2_set_power_state(bp, PCI_D0);
5646         bnx2_disable_int(bp);
5647
5648         bnx2_setup_int_mode(bp, disable_msi);
5649         bnx2_napi_enable(bp);
5650         rc = bnx2_alloc_mem(bp);
5651         if (rc) {
5652                 bnx2_napi_disable(bp);
5653                 bnx2_free_mem(bp);
5654                 return rc;
5655         }
5656
5657         rc = bnx2_request_irq(bp);
5658
5659         if (rc) {
5660                 bnx2_napi_disable(bp);
5661                 bnx2_free_mem(bp);
5662                 return rc;
5663         }
5664
5665         rc = bnx2_init_nic(bp, 1);
5666
5667         if (rc) {
5668                 bnx2_napi_disable(bp);
5669                 bnx2_free_irq(bp);
5670                 bnx2_free_skbs(bp);
5671                 bnx2_free_mem(bp);
5672                 return rc;
5673         }
5674
5675         mod_timer(&bp->timer, jiffies + bp->current_interval);
5676
5677         atomic_set(&bp->intr_sem, 0);
5678
5679         bnx2_enable_int(bp);
5680
5681         if (bp->flags & BNX2_FLAG_USING_MSI) {
5682                 /* Test MSI to make sure it is working
5683                  * If MSI test fails, go back to INTx mode
5684                  */
5685                 if (bnx2_test_intr(bp) != 0) {
5686                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5687                                " using MSI, switching to INTx mode. Please"
5688                                " report this failure to the PCI maintainer"
5689                                " and include system chipset information.\n",
5690                                bp->dev->name);
5691
5692                         bnx2_disable_int(bp);
5693                         bnx2_free_irq(bp);
5694
5695                         bnx2_setup_int_mode(bp, 1);
5696
5697                         rc = bnx2_init_nic(bp, 0);
5698
5699                         if (!rc)
5700                                 rc = bnx2_request_irq(bp);
5701
5702                         if (rc) {
5703                                 bnx2_napi_disable(bp);
5704                                 bnx2_free_skbs(bp);
5705                                 bnx2_free_mem(bp);
5706                                 del_timer_sync(&bp->timer);
5707                                 return rc;
5708                         }
5709                         bnx2_enable_int(bp);
5710                 }
5711         }
5712         if (bp->flags & BNX2_FLAG_USING_MSI)
5713                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5714         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5715                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5716
5717         netif_start_queue(dev);
5718
5719         return 0;
5720 }
5721
5722 static void
5723 bnx2_reset_task(struct work_struct *work)
5724 {
5725         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5726
5727         if (!netif_running(bp->dev))
5728                 return;
5729
5730         bnx2_netif_stop(bp);
5731
5732         bnx2_init_nic(bp, 1);
5733
5734         atomic_set(&bp->intr_sem, 1);
5735         bnx2_netif_start(bp);
5736 }
5737
5738 static void
5739 bnx2_tx_timeout(struct net_device *dev)
5740 {
5741         struct bnx2 *bp = netdev_priv(dev);
5742
5743         /* This allows the netif to be shutdown gracefully before resetting */
5744         schedule_work(&bp->reset_task);
5745 }
5746
5747 #ifdef BCM_VLAN
5748 /* Called with rtnl_lock */
5749 static void
5750 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5751 {
5752         struct bnx2 *bp = netdev_priv(dev);
5753
5754         bnx2_netif_stop(bp);
5755
5756         bp->vlgrp = vlgrp;
5757         bnx2_set_rx_mode(dev);
5758
5759         bnx2_netif_start(bp);
5760 }
5761 #endif
5762
5763 /* Called with netif_tx_lock.
5764  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5765  * netif_wake_queue().
5766  */
5767 static int
5768 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5769 {
5770         struct bnx2 *bp = netdev_priv(dev);
5771         dma_addr_t mapping;
5772         struct tx_bd *txbd;
5773         struct sw_bd *tx_buf;
5774         u32 len, vlan_tag_flags, last_frag, mss;
5775         u16 prod, ring_prod;
5776         int i;
5777         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5778         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5779
5780         if (unlikely(bnx2_tx_avail(bp, txr) <
5781             (skb_shinfo(skb)->nr_frags + 1))) {
5782                 netif_stop_queue(dev);
5783                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5784                         dev->name);
5785
5786                 return NETDEV_TX_BUSY;
5787         }
5788         len = skb_headlen(skb);
5789         prod = txr->tx_prod;
5790         ring_prod = TX_RING_IDX(prod);
5791
5792         vlan_tag_flags = 0;
5793         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5794                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5795         }
5796
5797         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5798                 vlan_tag_flags |=
5799                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5800         }
5801         if ((mss = skb_shinfo(skb)->gso_size)) {
5802                 u32 tcp_opt_len, ip_tcp_len;
5803                 struct iphdr *iph;
5804
5805                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5806
5807                 tcp_opt_len = tcp_optlen(skb);
5808
5809                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5810                         u32 tcp_off = skb_transport_offset(skb) -
5811                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5812
5813                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5814                                           TX_BD_FLAGS_SW_FLAGS;
5815                         if (likely(tcp_off == 0))
5816                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5817                         else {
5818                                 tcp_off >>= 3;
5819                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5820                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5821                                                   ((tcp_off & 0x10) <<
5822                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5823                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5824                         }
5825                 } else {
5826                         if (skb_header_cloned(skb) &&
5827                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5828                                 dev_kfree_skb(skb);
5829                                 return NETDEV_TX_OK;
5830                         }
5831
5832                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5833
5834                         iph = ip_hdr(skb);
5835                         iph->check = 0;
5836                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5837                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5838                                                                  iph->daddr, 0,
5839                                                                  IPPROTO_TCP,
5840                                                                  0);
5841                         if (tcp_opt_len || (iph->ihl > 5)) {
5842                                 vlan_tag_flags |= ((iph->ihl - 5) +
5843                                                    (tcp_opt_len >> 2)) << 8;
5844                         }
5845                 }
5846         } else
5847                 mss = 0;
5848
5849         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5850
5851         tx_buf = &txr->tx_buf_ring[ring_prod];
5852         tx_buf->skb = skb;
5853         pci_unmap_addr_set(tx_buf, mapping, mapping);
5854
5855         txbd = &txr->tx_desc_ring[ring_prod];
5856
5857         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5858         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5859         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5860         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5861
5862         last_frag = skb_shinfo(skb)->nr_frags;
5863
5864         for (i = 0; i < last_frag; i++) {
5865                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5866
5867                 prod = NEXT_TX_BD(prod);
5868                 ring_prod = TX_RING_IDX(prod);
5869                 txbd = &txr->tx_desc_ring[ring_prod];
5870
5871                 len = frag->size;
5872                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5873                         len, PCI_DMA_TODEVICE);
5874                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5875                                 mapping, mapping);
5876
5877                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5878                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5879                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5880                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5881
5882         }
5883         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5884
5885         prod = NEXT_TX_BD(prod);
5886         txr->tx_prod_bseq += skb->len;
5887
5888         REG_WR16(bp, txr->tx_bidx_addr, prod);
5889         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5890
5891         mmiowb();
5892
5893         txr->tx_prod = prod;
5894         dev->trans_start = jiffies;
5895
5896         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5897                 netif_stop_queue(dev);
5898                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5899                         netif_wake_queue(dev);
5900         }
5901
5902         return NETDEV_TX_OK;
5903 }
5904
5905 /* Called with rtnl_lock */
5906 static int
5907 bnx2_close(struct net_device *dev)
5908 {
5909         struct bnx2 *bp = netdev_priv(dev);
5910         u32 reset_code;
5911
5912         cancel_work_sync(&bp->reset_task);
5913
5914         bnx2_disable_int_sync(bp);
5915         bnx2_napi_disable(bp);
5916         del_timer_sync(&bp->timer);
5917         if (bp->flags & BNX2_FLAG_NO_WOL)
5918                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5919         else if (bp->wol)
5920                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5921         else
5922                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5923         bnx2_reset_chip(bp, reset_code);
5924         bnx2_free_irq(bp);
5925         bnx2_free_skbs(bp);
5926         bnx2_free_mem(bp);
5927         bp->link_up = 0;
5928         netif_carrier_off(bp->dev);
5929         bnx2_set_power_state(bp, PCI_D3hot);
5930         return 0;
5931 }
5932
5933 #define GET_NET_STATS64(ctr)                                    \
5934         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5935         (unsigned long) (ctr##_lo)
5936
5937 #define GET_NET_STATS32(ctr)            \
5938         (ctr##_lo)
5939
5940 #if (BITS_PER_LONG == 64)
5941 #define GET_NET_STATS   GET_NET_STATS64
5942 #else
5943 #define GET_NET_STATS   GET_NET_STATS32
5944 #endif
5945
5946 static struct net_device_stats *
5947 bnx2_get_stats(struct net_device *dev)
5948 {
5949         struct bnx2 *bp = netdev_priv(dev);
5950         struct statistics_block *stats_blk = bp->stats_blk;
5951         struct net_device_stats *net_stats = &bp->net_stats;
5952
5953         if (bp->stats_blk == NULL) {
5954                 return net_stats;
5955         }
5956         net_stats->rx_packets =
5957                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5958                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5959                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5960
5961         net_stats->tx_packets =
5962                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5963                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5964                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5965
5966         net_stats->rx_bytes =
5967                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5968
5969         net_stats->tx_bytes =
5970                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5971
5972         net_stats->multicast =
5973                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5974
5975         net_stats->collisions =
5976                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5977
5978         net_stats->rx_length_errors =
5979                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5980                 stats_blk->stat_EtherStatsOverrsizePkts);
5981
5982         net_stats->rx_over_errors =
5983                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5984
5985         net_stats->rx_frame_errors =
5986                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5987
5988         net_stats->rx_crc_errors =
5989                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5990
5991         net_stats->rx_errors = net_stats->rx_length_errors +
5992                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5993                 net_stats->rx_crc_errors;
5994
5995         net_stats->tx_aborted_errors =
5996                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5997                 stats_blk->stat_Dot3StatsLateCollisions);
5998
5999         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6000             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6001                 net_stats->tx_carrier_errors = 0;
6002         else {
6003                 net_stats->tx_carrier_errors =
6004                         (unsigned long)
6005                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6006         }
6007
6008         net_stats->tx_errors =
6009                 (unsigned long)
6010                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6011                 +
6012                 net_stats->tx_aborted_errors +
6013                 net_stats->tx_carrier_errors;
6014
6015         net_stats->rx_missed_errors =
6016                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6017                 stats_blk->stat_FwRxDrop);
6018
6019         return net_stats;
6020 }
6021
6022 /* All ethtool functions called with rtnl_lock */
6023
6024 static int
6025 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6026 {
6027         struct bnx2 *bp = netdev_priv(dev);
6028         int support_serdes = 0, support_copper = 0;
6029
6030         cmd->supported = SUPPORTED_Autoneg;
6031         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6032                 support_serdes = 1;
6033                 support_copper = 1;
6034         } else if (bp->phy_port == PORT_FIBRE)
6035                 support_serdes = 1;
6036         else
6037                 support_copper = 1;
6038
6039         if (support_serdes) {
6040                 cmd->supported |= SUPPORTED_1000baseT_Full |
6041                         SUPPORTED_FIBRE;
6042                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6043                         cmd->supported |= SUPPORTED_2500baseX_Full;
6044
6045         }
6046         if (support_copper) {
6047                 cmd->supported |= SUPPORTED_10baseT_Half |
6048                         SUPPORTED_10baseT_Full |
6049                         SUPPORTED_100baseT_Half |
6050                         SUPPORTED_100baseT_Full |
6051                         SUPPORTED_1000baseT_Full |
6052                         SUPPORTED_TP;
6053
6054         }
6055
6056         spin_lock_bh(&bp->phy_lock);
6057         cmd->port = bp->phy_port;
6058         cmd->advertising = bp->advertising;
6059
6060         if (bp->autoneg & AUTONEG_SPEED) {
6061                 cmd->autoneg = AUTONEG_ENABLE;
6062         }
6063         else {
6064                 cmd->autoneg = AUTONEG_DISABLE;
6065         }
6066
6067         if (netif_carrier_ok(dev)) {
6068                 cmd->speed = bp->line_speed;
6069                 cmd->duplex = bp->duplex;
6070         }
6071         else {
6072                 cmd->speed = -1;
6073                 cmd->duplex = -1;
6074         }
6075         spin_unlock_bh(&bp->phy_lock);
6076
6077         cmd->transceiver = XCVR_INTERNAL;
6078         cmd->phy_address = bp->phy_addr;
6079
6080         return 0;
6081 }
6082
6083 static int
6084 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6085 {
6086         struct bnx2 *bp = netdev_priv(dev);
6087         u8 autoneg = bp->autoneg;
6088         u8 req_duplex = bp->req_duplex;
6089         u16 req_line_speed = bp->req_line_speed;
6090         u32 advertising = bp->advertising;
6091         int err = -EINVAL;
6092
6093         spin_lock_bh(&bp->phy_lock);
6094
6095         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6096                 goto err_out_unlock;
6097
6098         if (cmd->port != bp->phy_port &&
6099             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6100                 goto err_out_unlock;
6101
6102         if (cmd->autoneg == AUTONEG_ENABLE) {
6103                 autoneg |= AUTONEG_SPEED;
6104
6105                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6106
6107                 /* allow advertising 1 speed */
6108                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6109                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6110                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6111                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6112
6113                         if (cmd->port == PORT_FIBRE)
6114                                 goto err_out_unlock;
6115
6116                         advertising = cmd->advertising;
6117
6118                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6119                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6120                             (cmd->port == PORT_TP))
6121                                 goto err_out_unlock;
6122                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6123                         advertising = cmd->advertising;
6124                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6125                         goto err_out_unlock;
6126                 else {
6127                         if (cmd->port == PORT_FIBRE)
6128                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6129                         else
6130                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6131                 }
6132                 advertising |= ADVERTISED_Autoneg;
6133         }
6134         else {
6135                 if (cmd->port == PORT_FIBRE) {
6136                         if ((cmd->speed != SPEED_1000 &&
6137                              cmd->speed != SPEED_2500) ||
6138                             (cmd->duplex != DUPLEX_FULL))
6139                                 goto err_out_unlock;
6140
6141                         if (cmd->speed == SPEED_2500 &&
6142                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6143                                 goto err_out_unlock;
6144                 }
6145                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6146                         goto err_out_unlock;
6147
6148                 autoneg &= ~AUTONEG_SPEED;
6149                 req_line_speed = cmd->speed;
6150                 req_duplex = cmd->duplex;
6151                 advertising = 0;
6152         }
6153
6154         bp->autoneg = autoneg;
6155         bp->advertising = advertising;
6156         bp->req_line_speed = req_line_speed;
6157         bp->req_duplex = req_duplex;
6158
6159         err = bnx2_setup_phy(bp, cmd->port);
6160
6161 err_out_unlock:
6162         spin_unlock_bh(&bp->phy_lock);
6163
6164         return err;
6165 }
6166
6167 static void
6168 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6169 {
6170         struct bnx2 *bp = netdev_priv(dev);
6171
6172         strcpy(info->driver, DRV_MODULE_NAME);
6173         strcpy(info->version, DRV_MODULE_VERSION);
6174         strcpy(info->bus_info, pci_name(bp->pdev));
6175         strcpy(info->fw_version, bp->fw_version);
6176 }
6177
6178 #define BNX2_REGDUMP_LEN                (32 * 1024)
6179
6180 static int
6181 bnx2_get_regs_len(struct net_device *dev)
6182 {
6183         return BNX2_REGDUMP_LEN;
6184 }
6185
6186 static void
6187 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6188 {
6189         u32 *p = _p, i, offset;
6190         u8 *orig_p = _p;
6191         struct bnx2 *bp = netdev_priv(dev);
6192         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6193                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6194                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6195                                  0x1040, 0x1048, 0x1080, 0x10a4,
6196                                  0x1400, 0x1490, 0x1498, 0x14f0,
6197                                  0x1500, 0x155c, 0x1580, 0x15dc,
6198                                  0x1600, 0x1658, 0x1680, 0x16d8,
6199                                  0x1800, 0x1820, 0x1840, 0x1854,
6200                                  0x1880, 0x1894, 0x1900, 0x1984,
6201                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6202                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6203                                  0x2000, 0x2030, 0x23c0, 0x2400,
6204                                  0x2800, 0x2820, 0x2830, 0x2850,
6205                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6206                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6207                                  0x4080, 0x4090, 0x43c0, 0x4458,
6208                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6209                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6210                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6211                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6212                                  0x6800, 0x6848, 0x684c, 0x6860,
6213                                  0x6888, 0x6910, 0x8000 };
6214
6215         regs->version = 0;
6216
6217         memset(p, 0, BNX2_REGDUMP_LEN);
6218
6219         if (!netif_running(bp->dev))
6220                 return;
6221
6222         i = 0;
6223         offset = reg_boundaries[0];
6224         p += offset;
6225         while (offset < BNX2_REGDUMP_LEN) {
6226                 *p++ = REG_RD(bp, offset);
6227                 offset += 4;
6228                 if (offset == reg_boundaries[i + 1]) {
6229                         offset = reg_boundaries[i + 2];
6230                         p = (u32 *) (orig_p + offset);
6231                         i += 2;
6232                 }
6233         }
6234 }
6235
6236 static void
6237 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6238 {
6239         struct bnx2 *bp = netdev_priv(dev);
6240
6241         if (bp->flags & BNX2_FLAG_NO_WOL) {
6242                 wol->supported = 0;
6243                 wol->wolopts = 0;
6244         }
6245         else {
6246                 wol->supported = WAKE_MAGIC;
6247                 if (bp->wol)
6248                         wol->wolopts = WAKE_MAGIC;
6249                 else
6250                         wol->wolopts = 0;
6251         }
6252         memset(&wol->sopass, 0, sizeof(wol->sopass));
6253 }
6254
6255 static int
6256 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6257 {
6258         struct bnx2 *bp = netdev_priv(dev);
6259
6260         if (wol->wolopts & ~WAKE_MAGIC)
6261                 return -EINVAL;
6262
6263         if (wol->wolopts & WAKE_MAGIC) {
6264                 if (bp->flags & BNX2_FLAG_NO_WOL)
6265                         return -EINVAL;
6266
6267                 bp->wol = 1;
6268         }
6269         else {
6270                 bp->wol = 0;
6271         }
6272         return 0;
6273 }
6274
6275 static int
6276 bnx2_nway_reset(struct net_device *dev)
6277 {
6278         struct bnx2 *bp = netdev_priv(dev);
6279         u32 bmcr;
6280
6281         if (!(bp->autoneg & AUTONEG_SPEED)) {
6282                 return -EINVAL;
6283         }
6284
6285         spin_lock_bh(&bp->phy_lock);
6286
6287         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6288                 int rc;
6289
6290                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6291                 spin_unlock_bh(&bp->phy_lock);
6292                 return rc;
6293         }
6294
6295         /* Force a link down visible on the other side */
6296         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6297                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6298                 spin_unlock_bh(&bp->phy_lock);
6299
6300                 msleep(20);
6301
6302                 spin_lock_bh(&bp->phy_lock);
6303
6304                 bp->current_interval = SERDES_AN_TIMEOUT;
6305                 bp->serdes_an_pending = 1;
6306                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6307         }
6308
6309         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6310         bmcr &= ~BMCR_LOOPBACK;
6311         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6312
6313         spin_unlock_bh(&bp->phy_lock);
6314
6315         return 0;
6316 }
6317
6318 static int
6319 bnx2_get_eeprom_len(struct net_device *dev)
6320 {
6321         struct bnx2 *bp = netdev_priv(dev);
6322
6323         if (bp->flash_info == NULL)
6324                 return 0;
6325
6326         return (int) bp->flash_size;
6327 }
6328
6329 static int
6330 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6331                 u8 *eebuf)
6332 {
6333         struct bnx2 *bp = netdev_priv(dev);
6334         int rc;
6335
6336         /* parameters already validated in ethtool_get_eeprom */
6337
6338         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6339
6340         return rc;
6341 }
6342
6343 static int
6344 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6345                 u8 *eebuf)
6346 {
6347         struct bnx2 *bp = netdev_priv(dev);
6348         int rc;
6349
6350         /* parameters already validated in ethtool_set_eeprom */
6351
6352         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6353
6354         return rc;
6355 }
6356
6357 static int
6358 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6359 {
6360         struct bnx2 *bp = netdev_priv(dev);
6361
6362         memset(coal, 0, sizeof(struct ethtool_coalesce));
6363
6364         coal->rx_coalesce_usecs = bp->rx_ticks;
6365         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6366         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6367         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6368
6369         coal->tx_coalesce_usecs = bp->tx_ticks;
6370         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6371         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6372         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6373
6374         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6375
6376         return 0;
6377 }
6378
6379 static int
6380 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6381 {
6382         struct bnx2 *bp = netdev_priv(dev);
6383
6384         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6385         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6386
6387         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6388         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6389
6390         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6391         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6392
6393         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6394         if (bp->rx_quick_cons_trip_int > 0xff)
6395                 bp->rx_quick_cons_trip_int = 0xff;
6396
6397         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6398         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6399
6400         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6401         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6402
6403         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6404         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6405
6406         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6407         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6408                 0xff;
6409
6410         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6411         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6412                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6413                         bp->stats_ticks = USEC_PER_SEC;
6414         }
6415         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6416                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6417         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6418
6419         if (netif_running(bp->dev)) {
6420                 bnx2_netif_stop(bp);
6421                 bnx2_init_nic(bp, 0);
6422                 bnx2_netif_start(bp);
6423         }
6424
6425         return 0;
6426 }
6427
6428 static void
6429 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6430 {
6431         struct bnx2 *bp = netdev_priv(dev);
6432
6433         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6434         ering->rx_mini_max_pending = 0;
6435         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6436
6437         ering->rx_pending = bp->rx_ring_size;
6438         ering->rx_mini_pending = 0;
6439         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6440
6441         ering->tx_max_pending = MAX_TX_DESC_CNT;
6442         ering->tx_pending = bp->tx_ring_size;
6443 }
6444
6445 static int
6446 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6447 {
6448         if (netif_running(bp->dev)) {
6449                 bnx2_netif_stop(bp);
6450                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6451                 bnx2_free_skbs(bp);
6452                 bnx2_free_mem(bp);
6453         }
6454
6455         bnx2_set_rx_ring_size(bp, rx);
6456         bp->tx_ring_size = tx;
6457
6458         if (netif_running(bp->dev)) {
6459                 int rc;
6460
6461                 rc = bnx2_alloc_mem(bp);
6462                 if (rc)
6463                         return rc;
6464                 bnx2_init_nic(bp, 0);
6465                 bnx2_netif_start(bp);
6466         }
6467         return 0;
6468 }
6469
6470 static int
6471 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6472 {
6473         struct bnx2 *bp = netdev_priv(dev);
6474         int rc;
6475
6476         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6477                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6478                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6479
6480                 return -EINVAL;
6481         }
6482         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6483         return rc;
6484 }
6485
6486 static void
6487 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6488 {
6489         struct bnx2 *bp = netdev_priv(dev);
6490
6491         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6492         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6493         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6494 }
6495
6496 static int
6497 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6498 {
6499         struct bnx2 *bp = netdev_priv(dev);
6500
6501         bp->req_flow_ctrl = 0;
6502         if (epause->rx_pause)
6503                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6504         if (epause->tx_pause)
6505                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6506
6507         if (epause->autoneg) {
6508                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6509         }
6510         else {
6511                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6512         }
6513
6514         spin_lock_bh(&bp->phy_lock);
6515
6516         bnx2_setup_phy(bp, bp->phy_port);
6517
6518         spin_unlock_bh(&bp->phy_lock);
6519
6520         return 0;
6521 }
6522
6523 static u32
6524 bnx2_get_rx_csum(struct net_device *dev)
6525 {
6526         struct bnx2 *bp = netdev_priv(dev);
6527
6528         return bp->rx_csum;
6529 }
6530
6531 static int
6532 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6533 {
6534         struct bnx2 *bp = netdev_priv(dev);
6535
6536         bp->rx_csum = data;
6537         return 0;
6538 }
6539
6540 static int
6541 bnx2_set_tso(struct net_device *dev, u32 data)
6542 {
6543         struct bnx2 *bp = netdev_priv(dev);
6544
6545         if (data) {
6546                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6547                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6548                         dev->features |= NETIF_F_TSO6;
6549         } else
6550                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6551                                    NETIF_F_TSO_ECN);
6552         return 0;
6553 }
6554
6555 #define BNX2_NUM_STATS 46
6556
6557 static struct {
6558         char string[ETH_GSTRING_LEN];
6559 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6560         { "rx_bytes" },
6561         { "rx_error_bytes" },
6562         { "tx_bytes" },
6563         { "tx_error_bytes" },
6564         { "rx_ucast_packets" },
6565         { "rx_mcast_packets" },
6566         { "rx_bcast_packets" },
6567         { "tx_ucast_packets" },
6568         { "tx_mcast_packets" },
6569         { "tx_bcast_packets" },
6570         { "tx_mac_errors" },
6571         { "tx_carrier_errors" },
6572         { "rx_crc_errors" },
6573         { "rx_align_errors" },
6574         { "tx_single_collisions" },
6575         { "tx_multi_collisions" },
6576         { "tx_deferred" },
6577         { "tx_excess_collisions" },
6578         { "tx_late_collisions" },
6579         { "tx_total_collisions" },
6580         { "rx_fragments" },
6581         { "rx_jabbers" },
6582         { "rx_undersize_packets" },
6583         { "rx_oversize_packets" },
6584         { "rx_64_byte_packets" },
6585         { "rx_65_to_127_byte_packets" },
6586         { "rx_128_to_255_byte_packets" },
6587         { "rx_256_to_511_byte_packets" },
6588         { "rx_512_to_1023_byte_packets" },
6589         { "rx_1024_to_1522_byte_packets" },
6590         { "rx_1523_to_9022_byte_packets" },
6591         { "tx_64_byte_packets" },
6592         { "tx_65_to_127_byte_packets" },
6593         { "tx_128_to_255_byte_packets" },
6594         { "tx_256_to_511_byte_packets" },
6595         { "tx_512_to_1023_byte_packets" },
6596         { "tx_1024_to_1522_byte_packets" },
6597         { "tx_1523_to_9022_byte_packets" },
6598         { "rx_xon_frames" },
6599         { "rx_xoff_frames" },
6600         { "tx_xon_frames" },
6601         { "tx_xoff_frames" },
6602         { "rx_mac_ctrl_frames" },
6603         { "rx_filtered_packets" },
6604         { "rx_discards" },
6605         { "rx_fw_discards" },
6606 };
6607
6608 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6609
6610 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6611     STATS_OFFSET32(stat_IfHCInOctets_hi),
6612     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6613     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6614     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6615     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6616     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6617     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6618     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6619     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6620     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6621     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6622     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6623     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6624     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6625     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6626     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6627     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6628     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6629     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6630     STATS_OFFSET32(stat_EtherStatsCollisions),
6631     STATS_OFFSET32(stat_EtherStatsFragments),
6632     STATS_OFFSET32(stat_EtherStatsJabbers),
6633     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6634     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6635     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6636     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6637     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6638     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6639     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6640     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6641     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6642     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6643     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6644     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6645     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6646     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6647     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6648     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6649     STATS_OFFSET32(stat_XonPauseFramesReceived),
6650     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6651     STATS_OFFSET32(stat_OutXonSent),
6652     STATS_OFFSET32(stat_OutXoffSent),
6653     STATS_OFFSET32(stat_MacControlFramesReceived),
6654     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6655     STATS_OFFSET32(stat_IfInMBUFDiscards),
6656     STATS_OFFSET32(stat_FwRxDrop),
6657 };
6658
6659 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6660  * skipped because of errata.
6661  */
6662 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6663         8,0,8,8,8,8,8,8,8,8,
6664         4,0,4,4,4,4,4,4,4,4,
6665         4,4,4,4,4,4,4,4,4,4,
6666         4,4,4,4,4,4,4,4,4,4,
6667         4,4,4,4,4,4,
6668 };
6669
6670 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6671         8,0,8,8,8,8,8,8,8,8,
6672         4,4,4,4,4,4,4,4,4,4,
6673         4,4,4,4,4,4,4,4,4,4,
6674         4,4,4,4,4,4,4,4,4,4,
6675         4,4,4,4,4,4,
6676 };
6677
6678 #define BNX2_NUM_TESTS 6
6679
6680 static struct {
6681         char string[ETH_GSTRING_LEN];
6682 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6683         { "register_test (offline)" },
6684         { "memory_test (offline)" },
6685         { "loopback_test (offline)" },
6686         { "nvram_test (online)" },
6687         { "interrupt_test (online)" },
6688         { "link_test (online)" },
6689 };
6690
6691 static int
6692 bnx2_get_sset_count(struct net_device *dev, int sset)
6693 {
6694         switch (sset) {
6695         case ETH_SS_TEST:
6696                 return BNX2_NUM_TESTS;
6697         case ETH_SS_STATS:
6698                 return BNX2_NUM_STATS;
6699         default:
6700                 return -EOPNOTSUPP;
6701         }
6702 }
6703
6704 static void
6705 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6706 {
6707         struct bnx2 *bp = netdev_priv(dev);
6708
6709         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6710         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6711                 int i;
6712
6713                 bnx2_netif_stop(bp);
6714                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6715                 bnx2_free_skbs(bp);
6716
6717                 if (bnx2_test_registers(bp) != 0) {
6718                         buf[0] = 1;
6719                         etest->flags |= ETH_TEST_FL_FAILED;
6720                 }
6721                 if (bnx2_test_memory(bp) != 0) {
6722                         buf[1] = 1;
6723                         etest->flags |= ETH_TEST_FL_FAILED;
6724                 }
6725                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6726                         etest->flags |= ETH_TEST_FL_FAILED;
6727
6728                 if (!netif_running(bp->dev)) {
6729                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6730                 }
6731                 else {
6732                         bnx2_init_nic(bp, 1);
6733                         bnx2_netif_start(bp);
6734                 }
6735
6736                 /* wait for link up */
6737                 for (i = 0; i < 7; i++) {
6738                         if (bp->link_up)
6739                                 break;
6740                         msleep_interruptible(1000);
6741                 }
6742         }
6743
6744         if (bnx2_test_nvram(bp) != 0) {
6745                 buf[3] = 1;
6746                 etest->flags |= ETH_TEST_FL_FAILED;
6747         }
6748         if (bnx2_test_intr(bp) != 0) {
6749                 buf[4] = 1;
6750                 etest->flags |= ETH_TEST_FL_FAILED;
6751         }
6752
6753         if (bnx2_test_link(bp) != 0) {
6754                 buf[5] = 1;
6755                 etest->flags |= ETH_TEST_FL_FAILED;
6756
6757         }
6758 }
6759
6760 static void
6761 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6762 {
6763         switch (stringset) {
6764         case ETH_SS_STATS:
6765                 memcpy(buf, bnx2_stats_str_arr,
6766                         sizeof(bnx2_stats_str_arr));
6767                 break;
6768         case ETH_SS_TEST:
6769                 memcpy(buf, bnx2_tests_str_arr,
6770                         sizeof(bnx2_tests_str_arr));
6771                 break;
6772         }
6773 }
6774
6775 static void
6776 bnx2_get_ethtool_stats(struct net_device *dev,
6777                 struct ethtool_stats *stats, u64 *buf)
6778 {
6779         struct bnx2 *bp = netdev_priv(dev);
6780         int i;
6781         u32 *hw_stats = (u32 *) bp->stats_blk;
6782         u8 *stats_len_arr = NULL;
6783
6784         if (hw_stats == NULL) {
6785                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6786                 return;
6787         }
6788
6789         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6790             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6791             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6792             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6793                 stats_len_arr = bnx2_5706_stats_len_arr;
6794         else
6795                 stats_len_arr = bnx2_5708_stats_len_arr;
6796
6797         for (i = 0; i < BNX2_NUM_STATS; i++) {
6798                 if (stats_len_arr[i] == 0) {
6799                         /* skip this counter */
6800                         buf[i] = 0;
6801                         continue;
6802                 }
6803                 if (stats_len_arr[i] == 4) {
6804                         /* 4-byte counter */
6805                         buf[i] = (u64)
6806                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6807                         continue;
6808                 }
6809                 /* 8-byte counter */
6810                 buf[i] = (((u64) *(hw_stats +
6811                                         bnx2_stats_offset_arr[i])) << 32) +
6812                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6813         }
6814 }
6815
6816 static int
6817 bnx2_phys_id(struct net_device *dev, u32 data)
6818 {
6819         struct bnx2 *bp = netdev_priv(dev);
6820         int i;
6821         u32 save;
6822
6823         if (data == 0)
6824                 data = 2;
6825
6826         save = REG_RD(bp, BNX2_MISC_CFG);
6827         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6828
6829         for (i = 0; i < (data * 2); i++) {
6830                 if ((i % 2) == 0) {
6831                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6832                 }
6833                 else {
6834                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6835                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6836                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6837                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6838                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6839                                 BNX2_EMAC_LED_TRAFFIC);
6840                 }
6841                 msleep_interruptible(500);
6842                 if (signal_pending(current))
6843                         break;
6844         }
6845         REG_WR(bp, BNX2_EMAC_LED, 0);
6846         REG_WR(bp, BNX2_MISC_CFG, save);
6847         return 0;
6848 }
6849
6850 static int
6851 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6852 {
6853         struct bnx2 *bp = netdev_priv(dev);
6854
6855         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6856                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6857         else
6858                 return (ethtool_op_set_tx_csum(dev, data));
6859 }
6860
6861 static const struct ethtool_ops bnx2_ethtool_ops = {
6862         .get_settings           = bnx2_get_settings,
6863         .set_settings           = bnx2_set_settings,
6864         .get_drvinfo            = bnx2_get_drvinfo,
6865         .get_regs_len           = bnx2_get_regs_len,
6866         .get_regs               = bnx2_get_regs,
6867         .get_wol                = bnx2_get_wol,
6868         .set_wol                = bnx2_set_wol,
6869         .nway_reset             = bnx2_nway_reset,
6870         .get_link               = ethtool_op_get_link,
6871         .get_eeprom_len         = bnx2_get_eeprom_len,
6872         .get_eeprom             = bnx2_get_eeprom,
6873         .set_eeprom             = bnx2_set_eeprom,
6874         .get_coalesce           = bnx2_get_coalesce,
6875         .set_coalesce           = bnx2_set_coalesce,
6876         .get_ringparam          = bnx2_get_ringparam,
6877         .set_ringparam          = bnx2_set_ringparam,
6878         .get_pauseparam         = bnx2_get_pauseparam,
6879         .set_pauseparam         = bnx2_set_pauseparam,
6880         .get_rx_csum            = bnx2_get_rx_csum,
6881         .set_rx_csum            = bnx2_set_rx_csum,
6882         .set_tx_csum            = bnx2_set_tx_csum,
6883         .set_sg                 = ethtool_op_set_sg,
6884         .set_tso                = bnx2_set_tso,
6885         .self_test              = bnx2_self_test,
6886         .get_strings            = bnx2_get_strings,
6887         .phys_id                = bnx2_phys_id,
6888         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6889         .get_sset_count         = bnx2_get_sset_count,
6890 };
6891
6892 /* Called with rtnl_lock */
6893 static int
6894 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6895 {
6896         struct mii_ioctl_data *data = if_mii(ifr);
6897         struct bnx2 *bp = netdev_priv(dev);
6898         int err;
6899
6900         switch(cmd) {
6901         case SIOCGMIIPHY:
6902                 data->phy_id = bp->phy_addr;
6903
6904                 /* fallthru */
6905         case SIOCGMIIREG: {
6906                 u32 mii_regval;
6907
6908                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6909                         return -EOPNOTSUPP;
6910
6911                 if (!netif_running(dev))
6912                         return -EAGAIN;
6913
6914                 spin_lock_bh(&bp->phy_lock);
6915                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6916                 spin_unlock_bh(&bp->phy_lock);
6917
6918                 data->val_out = mii_regval;
6919
6920                 return err;
6921         }
6922
6923         case SIOCSMIIREG:
6924                 if (!capable(CAP_NET_ADMIN))
6925                         return -EPERM;
6926
6927                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6928                         return -EOPNOTSUPP;
6929
6930                 if (!netif_running(dev))
6931                         return -EAGAIN;
6932
6933                 spin_lock_bh(&bp->phy_lock);
6934                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6935                 spin_unlock_bh(&bp->phy_lock);
6936
6937                 return err;
6938
6939         default:
6940                 /* do nothing */
6941                 break;
6942         }
6943         return -EOPNOTSUPP;
6944 }
6945
6946 /* Called with rtnl_lock */
6947 static int
6948 bnx2_change_mac_addr(struct net_device *dev, void *p)
6949 {
6950         struct sockaddr *addr = p;
6951         struct bnx2 *bp = netdev_priv(dev);
6952
6953         if (!is_valid_ether_addr(addr->sa_data))
6954                 return -EINVAL;
6955
6956         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6957         if (netif_running(dev))
6958                 bnx2_set_mac_addr(bp);
6959
6960         return 0;
6961 }
6962
6963 /* Called with rtnl_lock */
6964 static int
6965 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6966 {
6967         struct bnx2 *bp = netdev_priv(dev);
6968
6969         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6970                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6971                 return -EINVAL;
6972
6973         dev->mtu = new_mtu;
6974         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6975 }
6976
6977 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6978 static void
6979 poll_bnx2(struct net_device *dev)
6980 {
6981         struct bnx2 *bp = netdev_priv(dev);
6982
6983         disable_irq(bp->pdev->irq);
6984         bnx2_interrupt(bp->pdev->irq, dev);
6985         enable_irq(bp->pdev->irq);
6986 }
6987 #endif
6988
6989 static void __devinit
6990 bnx2_get_5709_media(struct bnx2 *bp)
6991 {
6992         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6993         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6994         u32 strap;
6995
6996         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6997                 return;
6998         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6999                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7000                 return;
7001         }
7002
7003         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7004                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7005         else
7006                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7007
7008         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7009                 switch (strap) {
7010                 case 0x4:
7011                 case 0x5:
7012                 case 0x6:
7013                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7014                         return;
7015                 }
7016         } else {
7017                 switch (strap) {
7018                 case 0x1:
7019                 case 0x2:
7020                 case 0x4:
7021                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7022                         return;
7023                 }
7024         }
7025 }
7026
7027 static void __devinit
7028 bnx2_get_pci_speed(struct bnx2 *bp)
7029 {
7030         u32 reg;
7031
7032         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7033         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7034                 u32 clkreg;
7035
7036                 bp->flags |= BNX2_FLAG_PCIX;
7037
7038                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7039
7040                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7041                 switch (clkreg) {
7042                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7043                         bp->bus_speed_mhz = 133;
7044                         break;
7045
7046                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7047                         bp->bus_speed_mhz = 100;
7048                         break;
7049
7050                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7051                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7052                         bp->bus_speed_mhz = 66;
7053                         break;
7054
7055                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7056                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7057                         bp->bus_speed_mhz = 50;
7058                         break;
7059
7060                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7061                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7062                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7063                         bp->bus_speed_mhz = 33;
7064                         break;
7065                 }
7066         }
7067         else {
7068                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7069                         bp->bus_speed_mhz = 66;
7070                 else
7071                         bp->bus_speed_mhz = 33;
7072         }
7073
7074         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7075                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7076
7077 }
7078
7079 static int __devinit
7080 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7081 {
7082         struct bnx2 *bp;
7083         unsigned long mem_len;
7084         int rc, i, j;
7085         u32 reg;
7086         u64 dma_mask, persist_dma_mask;
7087
7088         SET_NETDEV_DEV(dev, &pdev->dev);
7089         bp = netdev_priv(dev);
7090
7091         bp->flags = 0;
7092         bp->phy_flags = 0;
7093
7094         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7095         rc = pci_enable_device(pdev);
7096         if (rc) {
7097                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7098                 goto err_out;
7099         }
7100
7101         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7102                 dev_err(&pdev->dev,
7103                         "Cannot find PCI device base address, aborting.\n");
7104                 rc = -ENODEV;
7105                 goto err_out_disable;
7106         }
7107
7108         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7109         if (rc) {
7110                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7111                 goto err_out_disable;
7112         }
7113
7114         pci_set_master(pdev);
7115         pci_save_state(pdev);
7116
7117         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7118         if (bp->pm_cap == 0) {
7119                 dev_err(&pdev->dev,
7120                         "Cannot find power management capability, aborting.\n");
7121                 rc = -EIO;
7122                 goto err_out_release;
7123         }
7124
7125         bp->dev = dev;
7126         bp->pdev = pdev;
7127
7128         spin_lock_init(&bp->phy_lock);
7129         spin_lock_init(&bp->indirect_lock);
7130         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7131
7132         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7133         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7134         dev->mem_end = dev->mem_start + mem_len;
7135         dev->irq = pdev->irq;
7136
7137         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7138
7139         if (!bp->regview) {
7140                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7141                 rc = -ENOMEM;
7142                 goto err_out_release;
7143         }
7144
7145         /* Configure byte swap and enable write to the reg_window registers.
7146          * Rely on CPU to do target byte swapping on big endian systems
7147          * The chip's target access swapping will not swap all accesses
7148          */
7149         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7150                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7151                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7152
7153         bnx2_set_power_state(bp, PCI_D0);
7154
7155         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7156
7157         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7158                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7159                         dev_err(&pdev->dev,
7160                                 "Cannot find PCIE capability, aborting.\n");
7161                         rc = -EIO;
7162                         goto err_out_unmap;
7163                 }
7164                 bp->flags |= BNX2_FLAG_PCIE;
7165                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7166                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7167         } else {
7168                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7169                 if (bp->pcix_cap == 0) {
7170                         dev_err(&pdev->dev,
7171                                 "Cannot find PCIX capability, aborting.\n");
7172                         rc = -EIO;
7173                         goto err_out_unmap;
7174                 }
7175         }
7176
7177         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7178                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7179                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7180         }
7181
7182         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7183                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7184                         bp->flags |= BNX2_FLAG_MSI_CAP;
7185         }
7186
7187         /* 5708 cannot support DMA addresses > 40-bit.  */
7188         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7189                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7190         else
7191                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7192
7193         /* Configure DMA attributes. */
7194         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7195                 dev->features |= NETIF_F_HIGHDMA;
7196                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7197                 if (rc) {
7198                         dev_err(&pdev->dev,
7199                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7200                         goto err_out_unmap;
7201                 }
7202         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7203                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7204                 goto err_out_unmap;
7205         }
7206
7207         if (!(bp->flags & BNX2_FLAG_PCIE))
7208                 bnx2_get_pci_speed(bp);
7209
7210         /* 5706A0 may falsely detect SERR and PERR. */
7211         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7212                 reg = REG_RD(bp, PCI_COMMAND);
7213                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7214                 REG_WR(bp, PCI_COMMAND, reg);
7215         }
7216         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7217                 !(bp->flags & BNX2_FLAG_PCIX)) {
7218
7219                 dev_err(&pdev->dev,
7220                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7221                 goto err_out_unmap;
7222         }
7223
7224         bnx2_init_nvram(bp);
7225
7226         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7227
7228         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7229             BNX2_SHM_HDR_SIGNATURE_SIG) {
7230                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7231
7232                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7233         } else
7234                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7235
7236         /* Get the permanent MAC address.  First we need to make sure the
7237          * firmware is actually running.
7238          */
7239         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7240
7241         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7242             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7243                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7244                 rc = -ENODEV;
7245                 goto err_out_unmap;
7246         }
7247
7248         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7249         for (i = 0, j = 0; i < 3; i++) {
7250                 u8 num, k, skip0;
7251
7252                 num = (u8) (reg >> (24 - (i * 8)));
7253                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7254                         if (num >= k || !skip0 || k == 1) {
7255                                 bp->fw_version[j++] = (num / k) + '0';
7256                                 skip0 = 0;
7257                         }
7258                 }
7259                 if (i != 2)
7260                         bp->fw_version[j++] = '.';
7261         }
7262         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7263         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7264                 bp->wol = 1;
7265
7266         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7267                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7268
7269                 for (i = 0; i < 30; i++) {
7270                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7271                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7272                                 break;
7273                         msleep(10);
7274                 }
7275         }
7276         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7277         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7278         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7279             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7280                 int i;
7281                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7282
7283                 bp->fw_version[j++] = ' ';
7284                 for (i = 0; i < 3; i++) {
7285                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7286                         reg = swab32(reg);
7287                         memcpy(&bp->fw_version[j], &reg, 4);
7288                         j += 4;
7289                 }
7290         }
7291
7292         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7293         bp->mac_addr[0] = (u8) (reg >> 8);
7294         bp->mac_addr[1] = (u8) reg;
7295
7296         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7297         bp->mac_addr[2] = (u8) (reg >> 24);
7298         bp->mac_addr[3] = (u8) (reg >> 16);
7299         bp->mac_addr[4] = (u8) (reg >> 8);
7300         bp->mac_addr[5] = (u8) reg;
7301
7302         bp->tx_ring_size = MAX_TX_DESC_CNT;
7303         bnx2_set_rx_ring_size(bp, 255);
7304
7305         bp->rx_csum = 1;
7306
7307         bp->tx_quick_cons_trip_int = 20;
7308         bp->tx_quick_cons_trip = 20;
7309         bp->tx_ticks_int = 80;
7310         bp->tx_ticks = 80;
7311
7312         bp->rx_quick_cons_trip_int = 6;
7313         bp->rx_quick_cons_trip = 6;
7314         bp->rx_ticks_int = 18;
7315         bp->rx_ticks = 18;
7316
7317         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7318
7319         bp->timer_interval =  HZ;
7320         bp->current_interval =  HZ;
7321
7322         bp->phy_addr = 1;
7323
7324         /* Disable WOL support if we are running on a SERDES chip. */
7325         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7326                 bnx2_get_5709_media(bp);
7327         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7328                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7329
7330         bp->phy_port = PORT_TP;
7331         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7332                 bp->phy_port = PORT_FIBRE;
7333                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7334                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7335                         bp->flags |= BNX2_FLAG_NO_WOL;
7336                         bp->wol = 0;
7337                 }
7338                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7339                         /* Don't do parallel detect on this board because of
7340                          * some board problems.  The link will not go down
7341                          * if we do parallel detect.
7342                          */
7343                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7344                             pdev->subsystem_device == 0x310c)
7345                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7346                 } else {
7347                         bp->phy_addr = 2;
7348                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7349                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7350                 }
7351                 bnx2_init_remote_phy(bp);
7352
7353         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7354                    CHIP_NUM(bp) == CHIP_NUM_5708)
7355                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7356         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7357                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7358                   CHIP_REV(bp) == CHIP_REV_Bx))
7359                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7360
7361         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7362             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7363             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7364                 bp->flags |= BNX2_FLAG_NO_WOL;
7365                 bp->wol = 0;
7366         }
7367
7368         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7369                 bp->tx_quick_cons_trip_int =
7370                         bp->tx_quick_cons_trip;
7371                 bp->tx_ticks_int = bp->tx_ticks;
7372                 bp->rx_quick_cons_trip_int =
7373                         bp->rx_quick_cons_trip;
7374                 bp->rx_ticks_int = bp->rx_ticks;
7375                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7376                 bp->com_ticks_int = bp->com_ticks;
7377                 bp->cmd_ticks_int = bp->cmd_ticks;
7378         }
7379
7380         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7381          *
7382          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7383          * with byte enables disabled on the unused 32-bit word.  This is legal
7384          * but causes problems on the AMD 8132 which will eventually stop
7385          * responding after a while.
7386          *
7387          * AMD believes this incompatibility is unique to the 5706, and
7388          * prefers to locally disable MSI rather than globally disabling it.
7389          */
7390         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7391                 struct pci_dev *amd_8132 = NULL;
7392
7393                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7394                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7395                                                   amd_8132))) {
7396
7397                         if (amd_8132->revision >= 0x10 &&
7398                             amd_8132->revision <= 0x13) {
7399                                 disable_msi = 1;
7400                                 pci_dev_put(amd_8132);
7401                                 break;
7402                         }
7403                 }
7404         }
7405
7406         bnx2_set_default_link(bp);
7407         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7408
7409         init_timer(&bp->timer);
7410         bp->timer.expires = RUN_AT(bp->timer_interval);
7411         bp->timer.data = (unsigned long) bp;
7412         bp->timer.function = bnx2_timer;
7413
7414         return 0;
7415
7416 err_out_unmap:
7417         if (bp->regview) {
7418                 iounmap(bp->regview);
7419                 bp->regview = NULL;
7420         }
7421
7422 err_out_release:
7423         pci_release_regions(pdev);
7424
7425 err_out_disable:
7426         pci_disable_device(pdev);
7427         pci_set_drvdata(pdev, NULL);
7428
7429 err_out:
7430         return rc;
7431 }
7432
7433 static char * __devinit
7434 bnx2_bus_string(struct bnx2 *bp, char *str)
7435 {
7436         char *s = str;
7437
7438         if (bp->flags & BNX2_FLAG_PCIE) {
7439                 s += sprintf(s, "PCI Express");
7440         } else {
7441                 s += sprintf(s, "PCI");
7442                 if (bp->flags & BNX2_FLAG_PCIX)
7443                         s += sprintf(s, "-X");
7444                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7445                         s += sprintf(s, " 32-bit");
7446                 else
7447                         s += sprintf(s, " 64-bit");
7448                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7449         }
7450         return str;
7451 }
7452
7453 static void __devinit
7454 bnx2_init_napi(struct bnx2 *bp)
7455 {
7456         int i;
7457
7458         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7459                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7460                 int (*poll)(struct napi_struct *, int);
7461
7462                 if (i == 0)
7463                         poll = bnx2_poll;
7464                 else
7465                         poll = bnx2_tx_poll;
7466
7467                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7468                 bnapi->bp = bp;
7469         }
7470 }
7471
7472 static int __devinit
7473 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7474 {
7475         static int version_printed = 0;
7476         struct net_device *dev = NULL;
7477         struct bnx2 *bp;
7478         int rc;
7479         char str[40];
7480         DECLARE_MAC_BUF(mac);
7481
7482         if (version_printed++ == 0)
7483                 printk(KERN_INFO "%s", version);
7484
7485         /* dev zeroed in init_etherdev */
7486         dev = alloc_etherdev(sizeof(*bp));
7487
7488         if (!dev)
7489                 return -ENOMEM;
7490
7491         rc = bnx2_init_board(pdev, dev);
7492         if (rc < 0) {
7493                 free_netdev(dev);
7494                 return rc;
7495         }
7496
7497         dev->open = bnx2_open;
7498         dev->hard_start_xmit = bnx2_start_xmit;
7499         dev->stop = bnx2_close;
7500         dev->get_stats = bnx2_get_stats;
7501         dev->set_multicast_list = bnx2_set_rx_mode;
7502         dev->do_ioctl = bnx2_ioctl;
7503         dev->set_mac_address = bnx2_change_mac_addr;
7504         dev->change_mtu = bnx2_change_mtu;
7505         dev->tx_timeout = bnx2_tx_timeout;
7506         dev->watchdog_timeo = TX_TIMEOUT;
7507 #ifdef BCM_VLAN
7508         dev->vlan_rx_register = bnx2_vlan_rx_register;
7509 #endif
7510         dev->ethtool_ops = &bnx2_ethtool_ops;
7511
7512         bp = netdev_priv(dev);
7513         bnx2_init_napi(bp);
7514
7515 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7516         dev->poll_controller = poll_bnx2;
7517 #endif
7518
7519         pci_set_drvdata(pdev, dev);
7520
7521         memcpy(dev->dev_addr, bp->mac_addr, 6);
7522         memcpy(dev->perm_addr, bp->mac_addr, 6);
7523         bp->name = board_info[ent->driver_data].name;
7524
7525         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7526         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7527                 dev->features |= NETIF_F_IPV6_CSUM;
7528
7529 #ifdef BCM_VLAN
7530         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7531 #endif
7532         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7533         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7534                 dev->features |= NETIF_F_TSO6;
7535
7536         if ((rc = register_netdev(dev))) {
7537                 dev_err(&pdev->dev, "Cannot register net device\n");
7538                 if (bp->regview)
7539                         iounmap(bp->regview);
7540                 pci_release_regions(pdev);
7541                 pci_disable_device(pdev);
7542                 pci_set_drvdata(pdev, NULL);
7543                 free_netdev(dev);
7544                 return rc;
7545         }
7546
7547         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7548                 "IRQ %d, node addr %s\n",
7549                 dev->name,
7550                 bp->name,
7551                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7552                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7553                 bnx2_bus_string(bp, str),
7554                 dev->base_addr,
7555                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7556
7557         return 0;
7558 }
7559
7560 static void __devexit
7561 bnx2_remove_one(struct pci_dev *pdev)
7562 {
7563         struct net_device *dev = pci_get_drvdata(pdev);
7564         struct bnx2 *bp = netdev_priv(dev);
7565
7566         flush_scheduled_work();
7567
7568         unregister_netdev(dev);
7569
7570         if (bp->regview)
7571                 iounmap(bp->regview);
7572
7573         free_netdev(dev);
7574         pci_release_regions(pdev);
7575         pci_disable_device(pdev);
7576         pci_set_drvdata(pdev, NULL);
7577 }
7578
7579 static int
7580 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7581 {
7582         struct net_device *dev = pci_get_drvdata(pdev);
7583         struct bnx2 *bp = netdev_priv(dev);
7584         u32 reset_code;
7585
7586         /* PCI register 4 needs to be saved whether netif_running() or not.
7587          * MSI address and data need to be saved if using MSI and
7588          * netif_running().
7589          */
7590         pci_save_state(pdev);
7591         if (!netif_running(dev))
7592                 return 0;
7593
7594         flush_scheduled_work();
7595         bnx2_netif_stop(bp);
7596         netif_device_detach(dev);
7597         del_timer_sync(&bp->timer);
7598         if (bp->flags & BNX2_FLAG_NO_WOL)
7599                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7600         else if (bp->wol)
7601                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7602         else
7603                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7604         bnx2_reset_chip(bp, reset_code);
7605         bnx2_free_skbs(bp);
7606         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7607         return 0;
7608 }
7609
7610 static int
7611 bnx2_resume(struct pci_dev *pdev)
7612 {
7613         struct net_device *dev = pci_get_drvdata(pdev);
7614         struct bnx2 *bp = netdev_priv(dev);
7615
7616         pci_restore_state(pdev);
7617         if (!netif_running(dev))
7618                 return 0;
7619
7620         bnx2_set_power_state(bp, PCI_D0);
7621         netif_device_attach(dev);
7622         bnx2_init_nic(bp, 1);
7623         bnx2_netif_start(bp);
7624         return 0;
7625 }
7626
7627 /**
7628  * bnx2_io_error_detected - called when PCI error is detected
7629  * @pdev: Pointer to PCI device
7630  * @state: The current pci connection state
7631  *
7632  * This function is called after a PCI bus error affecting
7633  * this device has been detected.
7634  */
7635 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7636                                                pci_channel_state_t state)
7637 {
7638         struct net_device *dev = pci_get_drvdata(pdev);
7639         struct bnx2 *bp = netdev_priv(dev);
7640
7641         rtnl_lock();
7642         netif_device_detach(dev);
7643
7644         if (netif_running(dev)) {
7645                 bnx2_netif_stop(bp);
7646                 del_timer_sync(&bp->timer);
7647                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7648         }
7649
7650         pci_disable_device(pdev);
7651         rtnl_unlock();
7652
7653         /* Request a slot slot reset. */
7654         return PCI_ERS_RESULT_NEED_RESET;
7655 }
7656
7657 /**
7658  * bnx2_io_slot_reset - called after the pci bus has been reset.
7659  * @pdev: Pointer to PCI device
7660  *
7661  * Restart the card from scratch, as if from a cold-boot.
7662  */
7663 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7664 {
7665         struct net_device *dev = pci_get_drvdata(pdev);
7666         struct bnx2 *bp = netdev_priv(dev);
7667
7668         rtnl_lock();
7669         if (pci_enable_device(pdev)) {
7670                 dev_err(&pdev->dev,
7671                         "Cannot re-enable PCI device after reset.\n");
7672                 rtnl_unlock();
7673                 return PCI_ERS_RESULT_DISCONNECT;
7674         }
7675         pci_set_master(pdev);
7676         pci_restore_state(pdev);
7677
7678         if (netif_running(dev)) {
7679                 bnx2_set_power_state(bp, PCI_D0);
7680                 bnx2_init_nic(bp, 1);
7681         }
7682
7683         rtnl_unlock();
7684         return PCI_ERS_RESULT_RECOVERED;
7685 }
7686
7687 /**
7688  * bnx2_io_resume - called when traffic can start flowing again.
7689  * @pdev: Pointer to PCI device
7690  *
7691  * This callback is called when the error recovery driver tells us that
7692  * its OK to resume normal operation.
7693  */
7694 static void bnx2_io_resume(struct pci_dev *pdev)
7695 {
7696         struct net_device *dev = pci_get_drvdata(pdev);
7697         struct bnx2 *bp = netdev_priv(dev);
7698
7699         rtnl_lock();
7700         if (netif_running(dev))
7701                 bnx2_netif_start(bp);
7702
7703         netif_device_attach(dev);
7704         rtnl_unlock();
7705 }
7706
7707 static struct pci_error_handlers bnx2_err_handler = {
7708         .error_detected = bnx2_io_error_detected,
7709         .slot_reset     = bnx2_io_slot_reset,
7710         .resume         = bnx2_io_resume,
7711 };
7712
7713 static struct pci_driver bnx2_pci_driver = {
7714         .name           = DRV_MODULE_NAME,
7715         .id_table       = bnx2_pci_tbl,
7716         .probe          = bnx2_init_one,
7717         .remove         = __devexit_p(bnx2_remove_one),
7718         .suspend        = bnx2_suspend,
7719         .resume         = bnx2_resume,
7720         .err_handler    = &bnx2_err_handler,
7721 };
7722
7723 static int __init bnx2_init(void)
7724 {
7725         return pci_register_driver(&bnx2_pci_driver);
7726 }
7727
7728 static void __exit bnx2_cleanup(void)
7729 {
7730         pci_unregister_driver(&bnx2_pci_driver);
7731 }
7732
7733 module_init(bnx2_init);
7734 module_exit(bnx2_cleanup);
7735
7736
7737