bnx2: Use const on flash_table structure.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.1"
63 #define DRV_MODULE_RELDATE      "May 6, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-4.6.16.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-4.6.16.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-4.6.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-4.6.15.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85
86 static int disable_msi = 0;
87
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91 typedef enum {
92         BCM5706 = 0,
93         NC370T,
94         NC370I,
95         BCM5706S,
96         NC370F,
97         BCM5708,
98         BCM5708S,
99         BCM5709,
100         BCM5709S,
101         BCM5716,
102         BCM5716S,
103 } board_t;
104
105 /* indexed by board_t, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110         { "HP NC370T Multifunction Gigabit Server Adapter" },
111         { "HP NC370i Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113         { "HP NC370F Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120         };
121
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141         { PCI_VENDOR_ID_BROADCOM, 0x163b,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143         { PCI_VENDOR_ID_BROADCOM, 0x163c,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145         { 0, }
146 };
147
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
152         /* Slow EEPROM */
153         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156          "EEPROM - slow"},
157         /* Expansion entry 0001 */
158         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161          "Entry 0001"},
162         /* Saifun SA25F010 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167          "Non-buffered flash (128kB)"},
168         /* Saifun SA25F020 (non-buffered flash) */
169         /* strap, cfg1, & write1 need updates */
170         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173          "Non-buffered flash (256kB)"},
174         /* Expansion entry 0100 */
175         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178          "Entry 0100"},
179         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189         /* Saifun SA25F005 (non-buffered flash) */
190         /* strap, cfg1, & write1 need updates */
191         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194          "Non-buffered flash (64kB)"},
195         /* Fast EEPROM */
196         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199          "EEPROM - fast"},
200         /* Expansion entry 1001 */
201         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1001"},
205         /* Expansion entry 1010 */
206         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1010"},
210         /* ATMEL AT45DB011B (buffered flash) */
211         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214          "Buffered flash (128kB)"},
215         /* Expansion entry 1100 */
216         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1100"},
220         /* Expansion entry 1101 */
221         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224          "Entry 1101"},
225         /* Ateml Expansion entry 1110 */
226         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229          "Entry 1110 (Atmel)"},
230         /* ATMEL AT45DB021B (buffered flash) */
231         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234          "Buffered flash (256kB)"},
235 };
236
237 static const struct flash_spec flash_5709 = {
238         .flags          = BNX2_NV_BUFFERED,
239         .page_bits      = BCM5709_FLASH_PAGE_BITS,
240         .page_size      = BCM5709_FLASH_PAGE_SIZE,
241         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
242         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
243         .name           = "5709 Buffered flash (256kB)",
244 };
245
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
248 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
249 {
250         u32 diff;
251
252         smp_mb();
253
254         /* The ring uses 256 indices for 255 entries, one of them
255          * needs to be skipped.
256          */
257         diff = txr->tx_prod - txr->tx_cons;
258         if (unlikely(diff >= TX_DESC_CNT)) {
259                 diff &= 0xffff;
260                 if (diff == TX_DESC_CNT)
261                         diff = MAX_TX_DESC_CNT;
262         }
263         return (bp->tx_ring_size - diff);
264 }
265
266 static u32
267 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
268 {
269         u32 val;
270
271         spin_lock_bh(&bp->indirect_lock);
272         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
274         spin_unlock_bh(&bp->indirect_lock);
275         return val;
276 }
277
278 static void
279 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280 {
281         spin_lock_bh(&bp->indirect_lock);
282         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
284         spin_unlock_bh(&bp->indirect_lock);
285 }
286
287 static void
288 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289 {
290         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
291 }
292
293 static u32
294 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295 {
296         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
297 }
298
299 static void
300 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
301 {
302         offset += cid_addr;
303         spin_lock_bh(&bp->indirect_lock);
304         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
305                 int i;
306
307                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
308                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
309                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
310                 for (i = 0; i < 5; i++) {
311                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
312                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
313                                 break;
314                         udelay(5);
315                 }
316         } else {
317                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
318                 REG_WR(bp, BNX2_CTX_DATA, val);
319         }
320         spin_unlock_bh(&bp->indirect_lock);
321 }
322
323 #ifdef BCM_CNIC
324 static int
325 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
326 {
327         struct bnx2 *bp = netdev_priv(dev);
328         struct drv_ctl_io *io = &info->data.io;
329
330         switch (info->cmd) {
331         case DRV_CTL_IO_WR_CMD:
332                 bnx2_reg_wr_ind(bp, io->offset, io->data);
333                 break;
334         case DRV_CTL_IO_RD_CMD:
335                 io->data = bnx2_reg_rd_ind(bp, io->offset);
336                 break;
337         case DRV_CTL_CTX_WR_CMD:
338                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
339                 break;
340         default:
341                 return -EINVAL;
342         }
343         return 0;
344 }
345
346 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
347 {
348         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
349         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
350         int sb_id;
351
352         if (bp->flags & BNX2_FLAG_USING_MSIX) {
353                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
354                 bnapi->cnic_present = 0;
355                 sb_id = bp->irq_nvecs;
356                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357         } else {
358                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_tag = bnapi->last_status_idx;
360                 bnapi->cnic_present = 1;
361                 sb_id = 0;
362                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
363         }
364
365         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
366         cp->irq_arr[0].status_blk = (void *)
367                 ((unsigned long) bnapi->status_blk.msi +
368                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
369         cp->irq_arr[0].status_blk_num = sb_id;
370         cp->num_irq = 1;
371 }
372
373 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
374                               void *data)
375 {
376         struct bnx2 *bp = netdev_priv(dev);
377         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
378
379         if (ops == NULL)
380                 return -EINVAL;
381
382         if (cp->drv_state & CNIC_DRV_STATE_REGD)
383                 return -EBUSY;
384
385         bp->cnic_data = data;
386         rcu_assign_pointer(bp->cnic_ops, ops);
387
388         cp->num_irq = 0;
389         cp->drv_state = CNIC_DRV_STATE_REGD;
390
391         bnx2_setup_cnic_irq_info(bp);
392
393         return 0;
394 }
395
396 static int bnx2_unregister_cnic(struct net_device *dev)
397 {
398         struct bnx2 *bp = netdev_priv(dev);
399         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
401
402         cp->drv_state = 0;
403         bnapi->cnic_present = 0;
404         rcu_assign_pointer(bp->cnic_ops, NULL);
405         synchronize_rcu();
406         return 0;
407 }
408
409 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
410 {
411         struct bnx2 *bp = netdev_priv(dev);
412         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
413
414         cp->drv_owner = THIS_MODULE;
415         cp->chip_id = bp->chip_id;
416         cp->pdev = bp->pdev;
417         cp->io_base = bp->regview;
418         cp->drv_ctl = bnx2_drv_ctl;
419         cp->drv_register_cnic = bnx2_register_cnic;
420         cp->drv_unregister_cnic = bnx2_unregister_cnic;
421
422         return cp;
423 }
424 EXPORT_SYMBOL(bnx2_cnic_probe);
425
426 static void
427 bnx2_cnic_stop(struct bnx2 *bp)
428 {
429         struct cnic_ops *c_ops;
430         struct cnic_ctl_info info;
431
432         rcu_read_lock();
433         c_ops = rcu_dereference(bp->cnic_ops);
434         if (c_ops) {
435                 info.cmd = CNIC_CTL_STOP_CMD;
436                 c_ops->cnic_ctl(bp->cnic_data, &info);
437         }
438         rcu_read_unlock();
439 }
440
441 static void
442 bnx2_cnic_start(struct bnx2 *bp)
443 {
444         struct cnic_ops *c_ops;
445         struct cnic_ctl_info info;
446
447         rcu_read_lock();
448         c_ops = rcu_dereference(bp->cnic_ops);
449         if (c_ops) {
450                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
451                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
452
453                         bnapi->cnic_tag = bnapi->last_status_idx;
454                 }
455                 info.cmd = CNIC_CTL_START_CMD;
456                 c_ops->cnic_ctl(bp->cnic_data, &info);
457         }
458         rcu_read_unlock();
459 }
460
461 #else
462
463 static void
464 bnx2_cnic_stop(struct bnx2 *bp)
465 {
466 }
467
468 static void
469 bnx2_cnic_start(struct bnx2 *bp)
470 {
471 }
472
473 #endif
474
475 static int
476 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
477 {
478         u32 val1;
479         int i, ret;
480
481         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
482                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
483                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
484
485                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
486                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487
488                 udelay(40);
489         }
490
491         val1 = (bp->phy_addr << 21) | (reg << 16) |
492                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
493                 BNX2_EMAC_MDIO_COMM_START_BUSY;
494         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
495
496         for (i = 0; i < 50; i++) {
497                 udelay(10);
498
499                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
500                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
501                         udelay(5);
502
503                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
505
506                         break;
507                 }
508         }
509
510         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
511                 *val = 0x0;
512                 ret = -EBUSY;
513         }
514         else {
515                 *val = val1;
516                 ret = 0;
517         }
518
519         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
520                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
521                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
522
523                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
524                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525
526                 udelay(40);
527         }
528
529         return ret;
530 }
531
532 static int
533 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
534 {
535         u32 val1;
536         int i, ret;
537
538         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
539                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
540                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
541
542                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
543                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544
545                 udelay(40);
546         }
547
548         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
549                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
550                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
551         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
552
553         for (i = 0; i < 50; i++) {
554                 udelay(10);
555
556                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
557                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
558                         udelay(5);
559                         break;
560                 }
561         }
562
563         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
564                 ret = -EBUSY;
565         else
566                 ret = 0;
567
568         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
569                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
570                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
571
572                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
573                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574
575                 udelay(40);
576         }
577
578         return ret;
579 }
580
581 static void
582 bnx2_disable_int(struct bnx2 *bp)
583 {
584         int i;
585         struct bnx2_napi *bnapi;
586
587         for (i = 0; i < bp->irq_nvecs; i++) {
588                 bnapi = &bp->bnx2_napi[i];
589                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
590                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
591         }
592         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
593 }
594
595 static void
596 bnx2_enable_int(struct bnx2 *bp)
597 {
598         int i;
599         struct bnx2_napi *bnapi;
600
601         for (i = 0; i < bp->irq_nvecs; i++) {
602                 bnapi = &bp->bnx2_napi[i];
603
604                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
605                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
606                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
607                        bnapi->last_status_idx);
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        bnapi->last_status_idx);
612         }
613         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
614 }
615
616 static void
617 bnx2_disable_int_sync(struct bnx2 *bp)
618 {
619         int i;
620
621         atomic_inc(&bp->intr_sem);
622         if (!netif_running(bp->dev))
623                 return;
624
625         bnx2_disable_int(bp);
626         for (i = 0; i < bp->irq_nvecs; i++)
627                 synchronize_irq(bp->irq_tbl[i].vector);
628 }
629
630 static void
631 bnx2_napi_disable(struct bnx2 *bp)
632 {
633         int i;
634
635         for (i = 0; i < bp->irq_nvecs; i++)
636                 napi_disable(&bp->bnx2_napi[i].napi);
637 }
638
639 static void
640 bnx2_napi_enable(struct bnx2 *bp)
641 {
642         int i;
643
644         for (i = 0; i < bp->irq_nvecs; i++)
645                 napi_enable(&bp->bnx2_napi[i].napi);
646 }
647
648 static void
649 bnx2_netif_stop(struct bnx2 *bp)
650 {
651         bnx2_cnic_stop(bp);
652         bnx2_disable_int_sync(bp);
653         if (netif_running(bp->dev)) {
654                 bnx2_napi_disable(bp);
655                 netif_tx_disable(bp->dev);
656                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
657         }
658 }
659
660 static void
661 bnx2_netif_start(struct bnx2 *bp)
662 {
663         if (atomic_dec_and_test(&bp->intr_sem)) {
664                 if (netif_running(bp->dev)) {
665                         netif_tx_wake_all_queues(bp->dev);
666                         bnx2_napi_enable(bp);
667                         bnx2_enable_int(bp);
668                         bnx2_cnic_start(bp);
669                 }
670         }
671 }
672
673 static void
674 bnx2_free_tx_mem(struct bnx2 *bp)
675 {
676         int i;
677
678         for (i = 0; i < bp->num_tx_rings; i++) {
679                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
680                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
681
682                 if (txr->tx_desc_ring) {
683                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
684                                             txr->tx_desc_ring,
685                                             txr->tx_desc_mapping);
686                         txr->tx_desc_ring = NULL;
687                 }
688                 kfree(txr->tx_buf_ring);
689                 txr->tx_buf_ring = NULL;
690         }
691 }
692
693 static void
694 bnx2_free_rx_mem(struct bnx2 *bp)
695 {
696         int i;
697
698         for (i = 0; i < bp->num_rx_rings; i++) {
699                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
701                 int j;
702
703                 for (j = 0; j < bp->rx_max_ring; j++) {
704                         if (rxr->rx_desc_ring[j])
705                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
706                                                     rxr->rx_desc_ring[j],
707                                                     rxr->rx_desc_mapping[j]);
708                         rxr->rx_desc_ring[j] = NULL;
709                 }
710                 vfree(rxr->rx_buf_ring);
711                 rxr->rx_buf_ring = NULL;
712
713                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
714                         if (rxr->rx_pg_desc_ring[j])
715                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
716                                                     rxr->rx_pg_desc_ring[j],
717                                                     rxr->rx_pg_desc_mapping[j]);
718                         rxr->rx_pg_desc_ring[j] = NULL;
719                 }
720                 vfree(rxr->rx_pg_ring);
721                 rxr->rx_pg_ring = NULL;
722         }
723 }
724
725 static int
726 bnx2_alloc_tx_mem(struct bnx2 *bp)
727 {
728         int i;
729
730         for (i = 0; i < bp->num_tx_rings; i++) {
731                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
732                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
733
734                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
735                 if (txr->tx_buf_ring == NULL)
736                         return -ENOMEM;
737
738                 txr->tx_desc_ring =
739                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
740                                              &txr->tx_desc_mapping);
741                 if (txr->tx_desc_ring == NULL)
742                         return -ENOMEM;
743         }
744         return 0;
745 }
746
747 static int
748 bnx2_alloc_rx_mem(struct bnx2 *bp)
749 {
750         int i;
751
752         for (i = 0; i < bp->num_rx_rings; i++) {
753                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
754                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
755                 int j;
756
757                 rxr->rx_buf_ring =
758                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
759                 if (rxr->rx_buf_ring == NULL)
760                         return -ENOMEM;
761
762                 memset(rxr->rx_buf_ring, 0,
763                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
764
765                 for (j = 0; j < bp->rx_max_ring; j++) {
766                         rxr->rx_desc_ring[j] =
767                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
768                                                      &rxr->rx_desc_mapping[j]);
769                         if (rxr->rx_desc_ring[j] == NULL)
770                                 return -ENOMEM;
771
772                 }
773
774                 if (bp->rx_pg_ring_size) {
775                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
776                                                   bp->rx_max_pg_ring);
777                         if (rxr->rx_pg_ring == NULL)
778                                 return -ENOMEM;
779
780                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
781                                bp->rx_max_pg_ring);
782                 }
783
784                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
785                         rxr->rx_pg_desc_ring[j] =
786                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
787                                                 &rxr->rx_pg_desc_mapping[j]);
788                         if (rxr->rx_pg_desc_ring[j] == NULL)
789                                 return -ENOMEM;
790
791                 }
792         }
793         return 0;
794 }
795
796 static void
797 bnx2_free_mem(struct bnx2 *bp)
798 {
799         int i;
800         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
801
802         bnx2_free_tx_mem(bp);
803         bnx2_free_rx_mem(bp);
804
805         for (i = 0; i < bp->ctx_pages; i++) {
806                 if (bp->ctx_blk[i]) {
807                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
808                                             bp->ctx_blk[i],
809                                             bp->ctx_blk_mapping[i]);
810                         bp->ctx_blk[i] = NULL;
811                 }
812         }
813         if (bnapi->status_blk.msi) {
814                 pci_free_consistent(bp->pdev, bp->status_stats_size,
815                                     bnapi->status_blk.msi,
816                                     bp->status_blk_mapping);
817                 bnapi->status_blk.msi = NULL;
818                 bp->stats_blk = NULL;
819         }
820 }
821
822 static int
823 bnx2_alloc_mem(struct bnx2 *bp)
824 {
825         int i, status_blk_size, err;
826         struct bnx2_napi *bnapi;
827         void *status_blk;
828
829         /* Combine status and statistics blocks into one allocation. */
830         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
831         if (bp->flags & BNX2_FLAG_MSIX_CAP)
832                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
833                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
834         bp->status_stats_size = status_blk_size +
835                                 sizeof(struct statistics_block);
836
837         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
838                                           &bp->status_blk_mapping);
839         if (status_blk == NULL)
840                 goto alloc_mem_err;
841
842         memset(status_blk, 0, bp->status_stats_size);
843
844         bnapi = &bp->bnx2_napi[0];
845         bnapi->status_blk.msi = status_blk;
846         bnapi->hw_tx_cons_ptr =
847                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
848         bnapi->hw_rx_cons_ptr =
849                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
850         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
851                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
852                         struct status_block_msix *sblk;
853
854                         bnapi = &bp->bnx2_napi[i];
855
856                         sblk = (void *) (status_blk +
857                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
858                         bnapi->status_blk.msix = sblk;
859                         bnapi->hw_tx_cons_ptr =
860                                 &sblk->status_tx_quick_consumer_index;
861                         bnapi->hw_rx_cons_ptr =
862                                 &sblk->status_rx_quick_consumer_index;
863                         bnapi->int_num = i << 24;
864                 }
865         }
866
867         bp->stats_blk = status_blk + status_blk_size;
868
869         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
870
871         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
872                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
873                 if (bp->ctx_pages == 0)
874                         bp->ctx_pages = 1;
875                 for (i = 0; i < bp->ctx_pages; i++) {
876                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
877                                                 BCM_PAGE_SIZE,
878                                                 &bp->ctx_blk_mapping[i]);
879                         if (bp->ctx_blk[i] == NULL)
880                                 goto alloc_mem_err;
881                 }
882         }
883
884         err = bnx2_alloc_rx_mem(bp);
885         if (err)
886                 goto alloc_mem_err;
887
888         err = bnx2_alloc_tx_mem(bp);
889         if (err)
890                 goto alloc_mem_err;
891
892         return 0;
893
894 alloc_mem_err:
895         bnx2_free_mem(bp);
896         return -ENOMEM;
897 }
898
899 static void
900 bnx2_report_fw_link(struct bnx2 *bp)
901 {
902         u32 fw_link_status = 0;
903
904         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
905                 return;
906
907         if (bp->link_up) {
908                 u32 bmsr;
909
910                 switch (bp->line_speed) {
911                 case SPEED_10:
912                         if (bp->duplex == DUPLEX_HALF)
913                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
914                         else
915                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
916                         break;
917                 case SPEED_100:
918                         if (bp->duplex == DUPLEX_HALF)
919                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
920                         else
921                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
922                         break;
923                 case SPEED_1000:
924                         if (bp->duplex == DUPLEX_HALF)
925                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
926                         else
927                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
928                         break;
929                 case SPEED_2500:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
934                         break;
935                 }
936
937                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
938
939                 if (bp->autoneg) {
940                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
941
942                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
943                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
944
945                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
946                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
947                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
948                         else
949                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
950                 }
951         }
952         else
953                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
954
955         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
956 }
957
958 static char *
959 bnx2_xceiver_str(struct bnx2 *bp)
960 {
961         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
962                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
963                  "Copper"));
964 }
965
966 static void
967 bnx2_report_link(struct bnx2 *bp)
968 {
969         if (bp->link_up) {
970                 netif_carrier_on(bp->dev);
971                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
972                        bnx2_xceiver_str(bp));
973
974                 printk("%d Mbps ", bp->line_speed);
975
976                 if (bp->duplex == DUPLEX_FULL)
977                         printk("full duplex");
978                 else
979                         printk("half duplex");
980
981                 if (bp->flow_ctrl) {
982                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
983                                 printk(", receive ");
984                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
985                                         printk("& transmit ");
986                         }
987                         else {
988                                 printk(", transmit ");
989                         }
990                         printk("flow control ON");
991                 }
992                 printk("\n");
993         }
994         else {
995                 netif_carrier_off(bp->dev);
996                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
997                        bnx2_xceiver_str(bp));
998         }
999
1000         bnx2_report_fw_link(bp);
1001 }
1002
1003 static void
1004 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1005 {
1006         u32 local_adv, remote_adv;
1007
1008         bp->flow_ctrl = 0;
1009         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1010                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1011
1012                 if (bp->duplex == DUPLEX_FULL) {
1013                         bp->flow_ctrl = bp->req_flow_ctrl;
1014                 }
1015                 return;
1016         }
1017
1018         if (bp->duplex != DUPLEX_FULL) {
1019                 return;
1020         }
1021
1022         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1023             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1024                 u32 val;
1025
1026                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1027                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1028                         bp->flow_ctrl |= FLOW_CTRL_TX;
1029                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1030                         bp->flow_ctrl |= FLOW_CTRL_RX;
1031                 return;
1032         }
1033
1034         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1035         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1036
1037         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1038                 u32 new_local_adv = 0;
1039                 u32 new_remote_adv = 0;
1040
1041                 if (local_adv & ADVERTISE_1000XPAUSE)
1042                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1043                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1044                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1045                 if (remote_adv & ADVERTISE_1000XPAUSE)
1046                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1047                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1048                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1049
1050                 local_adv = new_local_adv;
1051                 remote_adv = new_remote_adv;
1052         }
1053
1054         /* See Table 28B-3 of 802.3ab-1999 spec. */
1055         if (local_adv & ADVERTISE_PAUSE_CAP) {
1056                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1057                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1058                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1059                         }
1060                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1061                                 bp->flow_ctrl = FLOW_CTRL_RX;
1062                         }
1063                 }
1064                 else {
1065                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1066                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1067                         }
1068                 }
1069         }
1070         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1071                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1072                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1073
1074                         bp->flow_ctrl = FLOW_CTRL_TX;
1075                 }
1076         }
1077 }
1078
1079 static int
1080 bnx2_5709s_linkup(struct bnx2 *bp)
1081 {
1082         u32 val, speed;
1083
1084         bp->link_up = 1;
1085
1086         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1087         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1088         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1089
1090         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1091                 bp->line_speed = bp->req_line_speed;
1092                 bp->duplex = bp->req_duplex;
1093                 return 0;
1094         }
1095         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1096         switch (speed) {
1097                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1098                         bp->line_speed = SPEED_10;
1099                         break;
1100                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1101                         bp->line_speed = SPEED_100;
1102                         break;
1103                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1104                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1105                         bp->line_speed = SPEED_1000;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1108                         bp->line_speed = SPEED_2500;
1109                         break;
1110         }
1111         if (val & MII_BNX2_GP_TOP_AN_FD)
1112                 bp->duplex = DUPLEX_FULL;
1113         else
1114                 bp->duplex = DUPLEX_HALF;
1115         return 0;
1116 }
1117
1118 static int
1119 bnx2_5708s_linkup(struct bnx2 *bp)
1120 {
1121         u32 val;
1122
1123         bp->link_up = 1;
1124         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1125         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1126                 case BCM5708S_1000X_STAT1_SPEED_10:
1127                         bp->line_speed = SPEED_10;
1128                         break;
1129                 case BCM5708S_1000X_STAT1_SPEED_100:
1130                         bp->line_speed = SPEED_100;
1131                         break;
1132                 case BCM5708S_1000X_STAT1_SPEED_1G:
1133                         bp->line_speed = SPEED_1000;
1134                         break;
1135                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1136                         bp->line_speed = SPEED_2500;
1137                         break;
1138         }
1139         if (val & BCM5708S_1000X_STAT1_FD)
1140                 bp->duplex = DUPLEX_FULL;
1141         else
1142                 bp->duplex = DUPLEX_HALF;
1143
1144         return 0;
1145 }
1146
1147 static int
1148 bnx2_5706s_linkup(struct bnx2 *bp)
1149 {
1150         u32 bmcr, local_adv, remote_adv, common;
1151
1152         bp->link_up = 1;
1153         bp->line_speed = SPEED_1000;
1154
1155         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1156         if (bmcr & BMCR_FULLDPLX) {
1157                 bp->duplex = DUPLEX_FULL;
1158         }
1159         else {
1160                 bp->duplex = DUPLEX_HALF;
1161         }
1162
1163         if (!(bmcr & BMCR_ANENABLE)) {
1164                 return 0;
1165         }
1166
1167         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1168         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1169
1170         common = local_adv & remote_adv;
1171         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1172
1173                 if (common & ADVERTISE_1000XFULL) {
1174                         bp->duplex = DUPLEX_FULL;
1175                 }
1176                 else {
1177                         bp->duplex = DUPLEX_HALF;
1178                 }
1179         }
1180
1181         return 0;
1182 }
1183
1184 static int
1185 bnx2_copper_linkup(struct bnx2 *bp)
1186 {
1187         u32 bmcr;
1188
1189         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1190         if (bmcr & BMCR_ANENABLE) {
1191                 u32 local_adv, remote_adv, common;
1192
1193                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1194                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1195
1196                 common = local_adv & (remote_adv >> 2);
1197                 if (common & ADVERTISE_1000FULL) {
1198                         bp->line_speed = SPEED_1000;
1199                         bp->duplex = DUPLEX_FULL;
1200                 }
1201                 else if (common & ADVERTISE_1000HALF) {
1202                         bp->line_speed = SPEED_1000;
1203                         bp->duplex = DUPLEX_HALF;
1204                 }
1205                 else {
1206                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1207                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1208
1209                         common = local_adv & remote_adv;
1210                         if (common & ADVERTISE_100FULL) {
1211                                 bp->line_speed = SPEED_100;
1212                                 bp->duplex = DUPLEX_FULL;
1213                         }
1214                         else if (common & ADVERTISE_100HALF) {
1215                                 bp->line_speed = SPEED_100;
1216                                 bp->duplex = DUPLEX_HALF;
1217                         }
1218                         else if (common & ADVERTISE_10FULL) {
1219                                 bp->line_speed = SPEED_10;
1220                                 bp->duplex = DUPLEX_FULL;
1221                         }
1222                         else if (common & ADVERTISE_10HALF) {
1223                                 bp->line_speed = SPEED_10;
1224                                 bp->duplex = DUPLEX_HALF;
1225                         }
1226                         else {
1227                                 bp->line_speed = 0;
1228                                 bp->link_up = 0;
1229                         }
1230                 }
1231         }
1232         else {
1233                 if (bmcr & BMCR_SPEED100) {
1234                         bp->line_speed = SPEED_100;
1235                 }
1236                 else {
1237                         bp->line_speed = SPEED_10;
1238                 }
1239                 if (bmcr & BMCR_FULLDPLX) {
1240                         bp->duplex = DUPLEX_FULL;
1241                 }
1242                 else {
1243                         bp->duplex = DUPLEX_HALF;
1244                 }
1245         }
1246
1247         return 0;
1248 }
1249
1250 static void
1251 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1252 {
1253         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1254
1255         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1256         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1257         val |= 0x02 << 8;
1258
1259         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1260                 u32 lo_water, hi_water;
1261
1262                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1263                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1264                 else
1265                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1266                 if (lo_water >= bp->rx_ring_size)
1267                         lo_water = 0;
1268
1269                 hi_water = bp->rx_ring_size / 4;
1270
1271                 if (hi_water <= lo_water)
1272                         lo_water = 0;
1273
1274                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1275                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1276
1277                 if (hi_water > 0xf)
1278                         hi_water = 0xf;
1279                 else if (hi_water == 0)
1280                         lo_water = 0;
1281                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1282         }
1283         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284 }
1285
1286 static void
1287 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 {
1289         int i;
1290         u32 cid;
1291
1292         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293                 if (i == 1)
1294                         cid = RX_RSS_CID;
1295                 bnx2_init_rx_context(bp, cid);
1296         }
1297 }
1298
1299 static void
1300 bnx2_set_mac_link(struct bnx2 *bp)
1301 {
1302         u32 val;
1303
1304         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1305         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306                 (bp->duplex == DUPLEX_HALF)) {
1307                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308         }
1309
1310         /* Configure the EMAC mode register. */
1311         val = REG_RD(bp, BNX2_EMAC_MODE);
1312
1313         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1314                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1315                 BNX2_EMAC_MODE_25G_MODE);
1316
1317         if (bp->link_up) {
1318                 switch (bp->line_speed) {
1319                         case SPEED_10:
1320                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1321                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1322                                         break;
1323                                 }
1324                                 /* fall through */
1325                         case SPEED_100:
1326                                 val |= BNX2_EMAC_MODE_PORT_MII;
1327                                 break;
1328                         case SPEED_2500:
1329                                 val |= BNX2_EMAC_MODE_25G_MODE;
1330                                 /* fall through */
1331                         case SPEED_1000:
1332                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1333                                 break;
1334                 }
1335         }
1336         else {
1337                 val |= BNX2_EMAC_MODE_PORT_GMII;
1338         }
1339
1340         /* Set the MAC to operate in the appropriate duplex mode. */
1341         if (bp->duplex == DUPLEX_HALF)
1342                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1343         REG_WR(bp, BNX2_EMAC_MODE, val);
1344
1345         /* Enable/disable rx PAUSE. */
1346         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347
1348         if (bp->flow_ctrl & FLOW_CTRL_RX)
1349                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1350         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351
1352         /* Enable/disable tx PAUSE. */
1353         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1354         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355
1356         if (bp->flow_ctrl & FLOW_CTRL_TX)
1357                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1358         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1359
1360         /* Acknowledge the interrupt. */
1361         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362
1363         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1364                 bnx2_init_all_rx_contexts(bp);
1365 }
1366
1367 static void
1368 bnx2_enable_bmsr1(struct bnx2 *bp)
1369 {
1370         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371             (CHIP_NUM(bp) == CHIP_NUM_5709))
1372                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373                                MII_BNX2_BLK_ADDR_GP_STATUS);
1374 }
1375
1376 static void
1377 bnx2_disable_bmsr1(struct bnx2 *bp)
1378 {
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5709))
1381                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1383 }
1384
1385 static int
1386 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1387 {
1388         u32 up1;
1389         int ret = 1;
1390
1391         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1392                 return 0;
1393
1394         if (bp->autoneg & AUTONEG_SPEED)
1395                 bp->advertising |= ADVERTISED_2500baseX_Full;
1396
1397         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1398                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1399
1400         bnx2_read_phy(bp, bp->mii_up1, &up1);
1401         if (!(up1 & BCM5708S_UP1_2G5)) {
1402                 up1 |= BCM5708S_UP1_2G5;
1403                 bnx2_write_phy(bp, bp->mii_up1, up1);
1404                 ret = 0;
1405         }
1406
1407         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1408                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1409                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1410
1411         return ret;
1412 }
1413
1414 static int
1415 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1416 {
1417         u32 up1;
1418         int ret = 0;
1419
1420         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1421                 return 0;
1422
1423         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1424                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1425
1426         bnx2_read_phy(bp, bp->mii_up1, &up1);
1427         if (up1 & BCM5708S_UP1_2G5) {
1428                 up1 &= ~BCM5708S_UP1_2G5;
1429                 bnx2_write_phy(bp, bp->mii_up1, up1);
1430                 ret = 1;
1431         }
1432
1433         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1434                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1435                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1436
1437         return ret;
1438 }
1439
1440 static void
1441 bnx2_enable_forced_2g5(struct bnx2 *bp)
1442 {
1443         u32 bmcr;
1444
1445         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446                 return;
1447
1448         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1449                 u32 val;
1450
1451                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1453                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1454                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1456                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1457
1458                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1460                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1461
1462         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1463                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1464                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1465         }
1466
1467         if (bp->autoneg & AUTONEG_SPEED) {
1468                 bmcr &= ~BMCR_ANENABLE;
1469                 if (bp->req_duplex == DUPLEX_FULL)
1470                         bmcr |= BMCR_FULLDPLX;
1471         }
1472         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473 }
1474
1475 static void
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 {
1478         u32 bmcr;
1479
1480         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481                 return;
1482
1483         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1484                 u32 val;
1485
1486                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1488                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1489                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1490                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1491
1492                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1495
1496         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1497                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1499         }
1500
1501         if (bp->autoneg & AUTONEG_SPEED)
1502                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1503         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1504 }
1505
1506 static void
1507 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1508 {
1509         u32 val;
1510
1511         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1512         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1513         if (start)
1514                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1515         else
1516                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1517 }
1518
1519 static int
1520 bnx2_set_link(struct bnx2 *bp)
1521 {
1522         u32 bmsr;
1523         u8 link_up;
1524
1525         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1526                 bp->link_up = 1;
1527                 return 0;
1528         }
1529
1530         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1531                 return 0;
1532
1533         link_up = bp->link_up;
1534
1535         bnx2_enable_bmsr1(bp);
1536         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1537         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1538         bnx2_disable_bmsr1(bp);
1539
1540         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1541             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1542                 u32 val, an_dbg;
1543
1544                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1545                         bnx2_5706s_force_link_dn(bp, 0);
1546                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1547                 }
1548                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1549
1550                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1551                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1552                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1553
1554                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1555                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1556                         bmsr |= BMSR_LSTATUS;
1557                 else
1558                         bmsr &= ~BMSR_LSTATUS;
1559         }
1560
1561         if (bmsr & BMSR_LSTATUS) {
1562                 bp->link_up = 1;
1563
1564                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1565                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1566                                 bnx2_5706s_linkup(bp);
1567                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1568                                 bnx2_5708s_linkup(bp);
1569                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1570                                 bnx2_5709s_linkup(bp);
1571                 }
1572                 else {
1573                         bnx2_copper_linkup(bp);
1574                 }
1575                 bnx2_resolve_flow_ctrl(bp);
1576         }
1577         else {
1578                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1579                     (bp->autoneg & AUTONEG_SPEED))
1580                         bnx2_disable_forced_2g5(bp);
1581
1582                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1583                         u32 bmcr;
1584
1585                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1586                         bmcr |= BMCR_ANENABLE;
1587                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1588
1589                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1590                 }
1591                 bp->link_up = 0;
1592         }
1593
1594         if (bp->link_up != link_up) {
1595                 bnx2_report_link(bp);
1596         }
1597
1598         bnx2_set_mac_link(bp);
1599
1600         return 0;
1601 }
1602
1603 static int
1604 bnx2_reset_phy(struct bnx2 *bp)
1605 {
1606         int i;
1607         u32 reg;
1608
1609         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1610
1611 #define PHY_RESET_MAX_WAIT 100
1612         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1613                 udelay(10);
1614
1615                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1616                 if (!(reg & BMCR_RESET)) {
1617                         udelay(20);
1618                         break;
1619                 }
1620         }
1621         if (i == PHY_RESET_MAX_WAIT) {
1622                 return -EBUSY;
1623         }
1624         return 0;
1625 }
1626
1627 static u32
1628 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1629 {
1630         u32 adv = 0;
1631
1632         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1633                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1634
1635                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1636                         adv = ADVERTISE_1000XPAUSE;
1637                 }
1638                 else {
1639                         adv = ADVERTISE_PAUSE_CAP;
1640                 }
1641         }
1642         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPSE_ASYM;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_ASYM;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         return adv;
1659 }
1660
1661 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1662
1663 static int
1664 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1665 __releases(&bp->phy_lock)
1666 __acquires(&bp->phy_lock)
1667 {
1668         u32 speed_arg = 0, pause_adv;
1669
1670         pause_adv = bnx2_phy_get_pause_adv(bp);
1671
1672         if (bp->autoneg & AUTONEG_SPEED) {
1673                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1674                 if (bp->advertising & ADVERTISED_10baseT_Half)
1675                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1676                 if (bp->advertising & ADVERTISED_10baseT_Full)
1677                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1678                 if (bp->advertising & ADVERTISED_100baseT_Half)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1680                 if (bp->advertising & ADVERTISED_100baseT_Full)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1682                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1684                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1686         } else {
1687                 if (bp->req_line_speed == SPEED_2500)
1688                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1689                 else if (bp->req_line_speed == SPEED_1000)
1690                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691                 else if (bp->req_line_speed == SPEED_100) {
1692                         if (bp->req_duplex == DUPLEX_FULL)
1693                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1694                         else
1695                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696                 } else if (bp->req_line_speed == SPEED_10) {
1697                         if (bp->req_duplex == DUPLEX_FULL)
1698                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1699                         else
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1701                 }
1702         }
1703
1704         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1705                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1706         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1708
1709         if (port == PORT_TP)
1710                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1711                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1712
1713         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1714
1715         spin_unlock_bh(&bp->phy_lock);
1716         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1717         spin_lock_bh(&bp->phy_lock);
1718
1719         return 0;
1720 }
1721
1722 static int
1723 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1724 __releases(&bp->phy_lock)
1725 __acquires(&bp->phy_lock)
1726 {
1727         u32 adv, bmcr;
1728         u32 new_adv = 0;
1729
1730         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1731                 return (bnx2_setup_remote_phy(bp, port));
1732
1733         if (!(bp->autoneg & AUTONEG_SPEED)) {
1734                 u32 new_bmcr;
1735                 int force_link_down = 0;
1736
1737                 if (bp->req_line_speed == SPEED_2500) {
1738                         if (!bnx2_test_and_enable_2g5(bp))
1739                                 force_link_down = 1;
1740                 } else if (bp->req_line_speed == SPEED_1000) {
1741                         if (bnx2_test_and_disable_2g5(bp))
1742                                 force_link_down = 1;
1743                 }
1744                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1745                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1746
1747                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1748                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1749                 new_bmcr |= BMCR_SPEED1000;
1750
1751                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1752                         if (bp->req_line_speed == SPEED_2500)
1753                                 bnx2_enable_forced_2g5(bp);
1754                         else if (bp->req_line_speed == SPEED_1000) {
1755                                 bnx2_disable_forced_2g5(bp);
1756                                 new_bmcr &= ~0x2000;
1757                         }
1758
1759                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1762                         else
1763                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1764                 }
1765
1766                 if (bp->req_duplex == DUPLEX_FULL) {
1767                         adv |= ADVERTISE_1000XFULL;
1768                         new_bmcr |= BMCR_FULLDPLX;
1769                 }
1770                 else {
1771                         adv |= ADVERTISE_1000XHALF;
1772                         new_bmcr &= ~BMCR_FULLDPLX;
1773                 }
1774                 if ((new_bmcr != bmcr) || (force_link_down)) {
1775                         /* Force a link down visible on the other side */
1776                         if (bp->link_up) {
1777                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1778                                                ~(ADVERTISE_1000XFULL |
1779                                                  ADVERTISE_1000XHALF));
1780                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1781                                         BMCR_ANRESTART | BMCR_ANENABLE);
1782
1783                                 bp->link_up = 0;
1784                                 netif_carrier_off(bp->dev);
1785                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1786                                 bnx2_report_link(bp);
1787                         }
1788                         bnx2_write_phy(bp, bp->mii_adv, adv);
1789                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1790                 } else {
1791                         bnx2_resolve_flow_ctrl(bp);
1792                         bnx2_set_mac_link(bp);
1793                 }
1794                 return 0;
1795         }
1796
1797         bnx2_test_and_enable_2g5(bp);
1798
1799         if (bp->advertising & ADVERTISED_1000baseT_Full)
1800                 new_adv |= ADVERTISE_1000XFULL;
1801
1802         new_adv |= bnx2_phy_get_pause_adv(bp);
1803
1804         bnx2_read_phy(bp, bp->mii_adv, &adv);
1805         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1806
1807         bp->serdes_an_pending = 0;
1808         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1809                 /* Force a link down visible on the other side */
1810                 if (bp->link_up) {
1811                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1812                         spin_unlock_bh(&bp->phy_lock);
1813                         msleep(20);
1814                         spin_lock_bh(&bp->phy_lock);
1815                 }
1816
1817                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1818                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1819                         BMCR_ANENABLE);
1820                 /* Speed up link-up time when the link partner
1821                  * does not autonegotiate which is very common
1822                  * in blade servers. Some blade servers use
1823                  * IPMI for kerboard input and it's important
1824                  * to minimize link disruptions. Autoneg. involves
1825                  * exchanging base pages plus 3 next pages and
1826                  * normally completes in about 120 msec.
1827                  */
1828                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1829                 bp->serdes_an_pending = 1;
1830                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1831         } else {
1832                 bnx2_resolve_flow_ctrl(bp);
1833                 bnx2_set_mac_link(bp);
1834         }
1835
1836         return 0;
1837 }
1838
1839 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1840         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1841                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1842                 (ADVERTISED_1000baseT_Full)
1843
1844 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1845         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1846         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1847         ADVERTISED_1000baseT_Full)
1848
1849 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1850         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1851
1852 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1853
1854 static void
1855 bnx2_set_default_remote_link(struct bnx2 *bp)
1856 {
1857         u32 link;
1858
1859         if (bp->phy_port == PORT_TP)
1860                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1861         else
1862                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1863
1864         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1865                 bp->req_line_speed = 0;
1866                 bp->autoneg |= AUTONEG_SPEED;
1867                 bp->advertising = ADVERTISED_Autoneg;
1868                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1869                         bp->advertising |= ADVERTISED_10baseT_Half;
1870                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1871                         bp->advertising |= ADVERTISED_10baseT_Full;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1873                         bp->advertising |= ADVERTISED_100baseT_Half;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1875                         bp->advertising |= ADVERTISED_100baseT_Full;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1877                         bp->advertising |= ADVERTISED_1000baseT_Full;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1879                         bp->advertising |= ADVERTISED_2500baseX_Full;
1880         } else {
1881                 bp->autoneg = 0;
1882                 bp->advertising = 0;
1883                 bp->req_duplex = DUPLEX_FULL;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1885                         bp->req_line_speed = SPEED_10;
1886                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1887                                 bp->req_duplex = DUPLEX_HALF;
1888                 }
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1890                         bp->req_line_speed = SPEED_100;
1891                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1892                                 bp->req_duplex = DUPLEX_HALF;
1893                 }
1894                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1895                         bp->req_line_speed = SPEED_1000;
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1897                         bp->req_line_speed = SPEED_2500;
1898         }
1899 }
1900
1901 static void
1902 bnx2_set_default_link(struct bnx2 *bp)
1903 {
1904         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1905                 bnx2_set_default_remote_link(bp);
1906                 return;
1907         }
1908
1909         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1910         bp->req_line_speed = 0;
1911         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1912                 u32 reg;
1913
1914                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1915
1916                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1917                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1918                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1919                         bp->autoneg = 0;
1920                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1921                         bp->req_duplex = DUPLEX_FULL;
1922                 }
1923         } else
1924                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1925 }
1926
1927 static void
1928 bnx2_send_heart_beat(struct bnx2 *bp)
1929 {
1930         u32 msg;
1931         u32 addr;
1932
1933         spin_lock(&bp->indirect_lock);
1934         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1935         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1936         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1937         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1938         spin_unlock(&bp->indirect_lock);
1939 }
1940
1941 static void
1942 bnx2_remote_phy_event(struct bnx2 *bp)
1943 {
1944         u32 msg;
1945         u8 link_up = bp->link_up;
1946         u8 old_port;
1947
1948         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1949
1950         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1951                 bnx2_send_heart_beat(bp);
1952
1953         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1954
1955         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1956                 bp->link_up = 0;
1957         else {
1958                 u32 speed;
1959
1960                 bp->link_up = 1;
1961                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1962                 bp->duplex = DUPLEX_FULL;
1963                 switch (speed) {
1964                         case BNX2_LINK_STATUS_10HALF:
1965                                 bp->duplex = DUPLEX_HALF;
1966                         case BNX2_LINK_STATUS_10FULL:
1967                                 bp->line_speed = SPEED_10;
1968                                 break;
1969                         case BNX2_LINK_STATUS_100HALF:
1970                                 bp->duplex = DUPLEX_HALF;
1971                         case BNX2_LINK_STATUS_100BASE_T4:
1972                         case BNX2_LINK_STATUS_100FULL:
1973                                 bp->line_speed = SPEED_100;
1974                                 break;
1975                         case BNX2_LINK_STATUS_1000HALF:
1976                                 bp->duplex = DUPLEX_HALF;
1977                         case BNX2_LINK_STATUS_1000FULL:
1978                                 bp->line_speed = SPEED_1000;
1979                                 break;
1980                         case BNX2_LINK_STATUS_2500HALF:
1981                                 bp->duplex = DUPLEX_HALF;
1982                         case BNX2_LINK_STATUS_2500FULL:
1983                                 bp->line_speed = SPEED_2500;
1984                                 break;
1985                         default:
1986                                 bp->line_speed = 0;
1987                                 break;
1988                 }
1989
1990                 bp->flow_ctrl = 0;
1991                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1992                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1993                         if (bp->duplex == DUPLEX_FULL)
1994                                 bp->flow_ctrl = bp->req_flow_ctrl;
1995                 } else {
1996                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1997                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1998                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1999                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2000                 }
2001
2002                 old_port = bp->phy_port;
2003                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2004                         bp->phy_port = PORT_FIBRE;
2005                 else
2006                         bp->phy_port = PORT_TP;
2007
2008                 if (old_port != bp->phy_port)
2009                         bnx2_set_default_link(bp);
2010
2011         }
2012         if (bp->link_up != link_up)
2013                 bnx2_report_link(bp);
2014
2015         bnx2_set_mac_link(bp);
2016 }
2017
2018 static int
2019 bnx2_set_remote_link(struct bnx2 *bp)
2020 {
2021         u32 evt_code;
2022
2023         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2024         switch (evt_code) {
2025                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2026                         bnx2_remote_phy_event(bp);
2027                         break;
2028                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2029                 default:
2030                         bnx2_send_heart_beat(bp);
2031                         break;
2032         }
2033         return 0;
2034 }
2035
2036 static int
2037 bnx2_setup_copper_phy(struct bnx2 *bp)
2038 __releases(&bp->phy_lock)
2039 __acquires(&bp->phy_lock)
2040 {
2041         u32 bmcr;
2042         u32 new_bmcr;
2043
2044         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2045
2046         if (bp->autoneg & AUTONEG_SPEED) {
2047                 u32 adv_reg, adv1000_reg;
2048                 u32 new_adv_reg = 0;
2049                 u32 new_adv1000_reg = 0;
2050
2051                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2052                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2053                         ADVERTISE_PAUSE_ASYM);
2054
2055                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2056                 adv1000_reg &= PHY_ALL_1000_SPEED;
2057
2058                 if (bp->advertising & ADVERTISED_10baseT_Half)
2059                         new_adv_reg |= ADVERTISE_10HALF;
2060                 if (bp->advertising & ADVERTISED_10baseT_Full)
2061                         new_adv_reg |= ADVERTISE_10FULL;
2062                 if (bp->advertising & ADVERTISED_100baseT_Half)
2063                         new_adv_reg |= ADVERTISE_100HALF;
2064                 if (bp->advertising & ADVERTISED_100baseT_Full)
2065                         new_adv_reg |= ADVERTISE_100FULL;
2066                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2067                         new_adv1000_reg |= ADVERTISE_1000FULL;
2068
2069                 new_adv_reg |= ADVERTISE_CSMA;
2070
2071                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2072
2073                 if ((adv1000_reg != new_adv1000_reg) ||
2074                         (adv_reg != new_adv_reg) ||
2075                         ((bmcr & BMCR_ANENABLE) == 0)) {
2076
2077                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2078                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2079                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080                                 BMCR_ANENABLE);
2081                 }
2082                 else if (bp->link_up) {
2083                         /* Flow ctrl may have changed from auto to forced */
2084                         /* or vice-versa. */
2085
2086                         bnx2_resolve_flow_ctrl(bp);
2087                         bnx2_set_mac_link(bp);
2088                 }
2089                 return 0;
2090         }
2091
2092         new_bmcr = 0;
2093         if (bp->req_line_speed == SPEED_100) {
2094                 new_bmcr |= BMCR_SPEED100;
2095         }
2096         if (bp->req_duplex == DUPLEX_FULL) {
2097                 new_bmcr |= BMCR_FULLDPLX;
2098         }
2099         if (new_bmcr != bmcr) {
2100                 u32 bmsr;
2101
2102                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104
2105                 if (bmsr & BMSR_LSTATUS) {
2106                         /* Force link down */
2107                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108                         spin_unlock_bh(&bp->phy_lock);
2109                         msleep(50);
2110                         spin_lock_bh(&bp->phy_lock);
2111
2112                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114                 }
2115
2116                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117
2118                 /* Normally, the new speed is setup after the link has
2119                  * gone down and up again. In some cases, link will not go
2120                  * down so we need to set up the new speed here.
2121                  */
2122                 if (bmsr & BMSR_LSTATUS) {
2123                         bp->line_speed = bp->req_line_speed;
2124                         bp->duplex = bp->req_duplex;
2125                         bnx2_resolve_flow_ctrl(bp);
2126                         bnx2_set_mac_link(bp);
2127                 }
2128         } else {
2129                 bnx2_resolve_flow_ctrl(bp);
2130                 bnx2_set_mac_link(bp);
2131         }
2132         return 0;
2133 }
2134
2135 static int
2136 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137 __releases(&bp->phy_lock)
2138 __acquires(&bp->phy_lock)
2139 {
2140         if (bp->loopback == MAC_LOOPBACK)
2141                 return 0;
2142
2143         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144                 return (bnx2_setup_serdes_phy(bp, port));
2145         }
2146         else {
2147                 return (bnx2_setup_copper_phy(bp));
2148         }
2149 }
2150
2151 static int
2152 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153 {
2154         u32 val;
2155
2156         bp->mii_bmcr = MII_BMCR + 0x10;
2157         bp->mii_bmsr = MII_BMSR + 0x10;
2158         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159         bp->mii_adv = MII_ADVERTISE + 0x10;
2160         bp->mii_lpa = MII_LPA + 0x10;
2161         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167         if (reset_phy)
2168                 bnx2_reset_phy(bp);
2169
2170         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180                 val |= BCM5708S_UP1_2G5;
2181         else
2182                 val &= ~BCM5708S_UP1_2G5;
2183         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198         return 0;
2199 }
2200
2201 static int
2202 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203 {
2204         u32 val;
2205
2206         if (reset_phy)
2207                 bnx2_reset_phy(bp);
2208
2209         bp->mii_up1 = BCM5708S_UP1;
2210
2211         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
2223         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225                 val |= BCM5708S_UP1_2G5;
2226                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2227         }
2228
2229         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232                 /* increase tx signal amplitude */
2233                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234                                BCM5708S_BLK_ADDR_TX_MISC);
2235                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239         }
2240
2241         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244         if (val) {
2245                 u32 is_backplane;
2246
2247                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250                                        BCM5708S_BLK_ADDR_TX_MISC);
2251                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253                                        BCM5708S_BLK_ADDR_DIG);
2254                 }
2255         }
2256         return 0;
2257 }
2258
2259 static int
2260 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261 {
2262         if (reset_phy)
2263                 bnx2_reset_phy(bp);
2264
2265         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266
2267         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269
2270         if (bp->dev->mtu > 1500) {
2271                 u32 val;
2272
2273                 /* Set extended packet length bit */
2274                 bnx2_write_phy(bp, 0x18, 0x7);
2275                 bnx2_read_phy(bp, 0x18, &val);
2276                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2279                 bnx2_read_phy(bp, 0x1c, &val);
2280                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281         }
2282         else {
2283                 u32 val;
2284
2285                 bnx2_write_phy(bp, 0x18, 0x7);
2286                 bnx2_read_phy(bp, 0x18, &val);
2287                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290                 bnx2_read_phy(bp, 0x1c, &val);
2291                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292         }
2293
2294         return 0;
2295 }
2296
2297 static int
2298 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299 {
2300         u32 val;
2301
2302         if (reset_phy)
2303                 bnx2_reset_phy(bp);
2304
2305         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306                 bnx2_write_phy(bp, 0x18, 0x0c00);
2307                 bnx2_write_phy(bp, 0x17, 0x000a);
2308                 bnx2_write_phy(bp, 0x15, 0x310b);
2309                 bnx2_write_phy(bp, 0x17, 0x201f);
2310                 bnx2_write_phy(bp, 0x15, 0x9506);
2311                 bnx2_write_phy(bp, 0x17, 0x401f);
2312                 bnx2_write_phy(bp, 0x15, 0x14e2);
2313                 bnx2_write_phy(bp, 0x18, 0x0400);
2314         }
2315
2316         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2319                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320                 val &= ~(1 << 8);
2321                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322         }
2323
2324         if (bp->dev->mtu > 1500) {
2325                 /* Set extended packet length bit */
2326                 bnx2_write_phy(bp, 0x18, 0x7);
2327                 bnx2_read_phy(bp, 0x18, &val);
2328                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330                 bnx2_read_phy(bp, 0x10, &val);
2331                 bnx2_write_phy(bp, 0x10, val | 0x1);
2332         }
2333         else {
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2340         }
2341
2342         /* ethernet@wirespeed */
2343         bnx2_write_phy(bp, 0x18, 0x7007);
2344         bnx2_read_phy(bp, 0x18, &val);
2345         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346         return 0;
2347 }
2348
2349
2350 static int
2351 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352 __releases(&bp->phy_lock)
2353 __acquires(&bp->phy_lock)
2354 {
2355         u32 val;
2356         int rc = 0;
2357
2358         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360
2361         bp->mii_bmcr = MII_BMCR;
2362         bp->mii_bmsr = MII_BMSR;
2363         bp->mii_bmsr1 = MII_BMSR;
2364         bp->mii_adv = MII_ADVERTISE;
2365         bp->mii_lpa = MII_LPA;
2366
2367         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
2369         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370                 goto setup_phy;
2371
2372         bnx2_read_phy(bp, MII_PHYSID1, &val);
2373         bp->phy_id = val << 16;
2374         bnx2_read_phy(bp, MII_PHYSID2, &val);
2375         bp->phy_id |= val & 0xffff;
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2380                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2384         }
2385         else {
2386                 rc = bnx2_init_copper_phy(bp, reset_phy);
2387         }
2388
2389 setup_phy:
2390         if (!rc)
2391                 rc = bnx2_setup_phy(bp, bp->phy_port);
2392
2393         return rc;
2394 }
2395
2396 static int
2397 bnx2_set_mac_loopback(struct bnx2 *bp)
2398 {
2399         u32 mac_mode;
2400
2401         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405         bp->link_up = 1;
2406         return 0;
2407 }
2408
2409 static int bnx2_test_link(struct bnx2 *);
2410
2411 static int
2412 bnx2_set_phy_loopback(struct bnx2 *bp)
2413 {
2414         u32 mac_mode;
2415         int rc, i;
2416
2417         spin_lock_bh(&bp->phy_lock);
2418         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419                             BMCR_SPEED1000);
2420         spin_unlock_bh(&bp->phy_lock);
2421         if (rc)
2422                 return rc;
2423
2424         for (i = 0; i < 10; i++) {
2425                 if (bnx2_test_link(bp) == 0)
2426                         break;
2427                 msleep(100);
2428         }
2429
2430         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433                       BNX2_EMAC_MODE_25G_MODE);
2434
2435         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437         bp->link_up = 1;
2438         return 0;
2439 }
2440
2441 static int
2442 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2443 {
2444         int i;
2445         u32 val;
2446
2447         bp->fw_wr_seq++;
2448         msg_data |= bp->fw_wr_seq;
2449
2450         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2451
2452         if (!ack)
2453                 return 0;
2454
2455         /* wait for an acknowledgement. */
2456         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2457                 msleep(10);
2458
2459                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2460
2461                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2462                         break;
2463         }
2464         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2465                 return 0;
2466
2467         /* If we timed out, inform the firmware that this is the case. */
2468         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2469                 if (!silent)
2470                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2471                                             "%x\n", msg_data);
2472
2473                 msg_data &= ~BNX2_DRV_MSG_CODE;
2474                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2475
2476                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2477
2478                 return -EBUSY;
2479         }
2480
2481         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2482                 return -EIO;
2483
2484         return 0;
2485 }
2486
2487 static int
2488 bnx2_init_5709_context(struct bnx2 *bp)
2489 {
2490         int i, ret = 0;
2491         u32 val;
2492
2493         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2494         val |= (BCM_PAGE_BITS - 8) << 16;
2495         REG_WR(bp, BNX2_CTX_COMMAND, val);
2496         for (i = 0; i < 10; i++) {
2497                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2498                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2499                         break;
2500                 udelay(2);
2501         }
2502         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2503                 return -EBUSY;
2504
2505         for (i = 0; i < bp->ctx_pages; i++) {
2506                 int j;
2507
2508                 if (bp->ctx_blk[i])
2509                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2510                 else
2511                         return -ENOMEM;
2512
2513                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2514                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2515                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2516                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2517                        (u64) bp->ctx_blk_mapping[i] >> 32);
2518                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2519                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2520                 for (j = 0; j < 10; j++) {
2521
2522                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2523                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2524                                 break;
2525                         udelay(5);
2526                 }
2527                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2528                         ret = -EBUSY;
2529                         break;
2530                 }
2531         }
2532         return ret;
2533 }
2534
2535 static void
2536 bnx2_init_context(struct bnx2 *bp)
2537 {
2538         u32 vcid;
2539
2540         vcid = 96;
2541         while (vcid) {
2542                 u32 vcid_addr, pcid_addr, offset;
2543                 int i;
2544
2545                 vcid--;
2546
2547                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2548                         u32 new_vcid;
2549
2550                         vcid_addr = GET_PCID_ADDR(vcid);
2551                         if (vcid & 0x8) {
2552                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2553                         }
2554                         else {
2555                                 new_vcid = vcid;
2556                         }
2557                         pcid_addr = GET_PCID_ADDR(new_vcid);
2558                 }
2559                 else {
2560                         vcid_addr = GET_CID_ADDR(vcid);
2561                         pcid_addr = vcid_addr;
2562                 }
2563
2564                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2565                         vcid_addr += (i << PHY_CTX_SHIFT);
2566                         pcid_addr += (i << PHY_CTX_SHIFT);
2567
2568                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2569                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2570
2571                         /* Zero out the context. */
2572                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2573                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2574                 }
2575         }
2576 }
2577
2578 static int
2579 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2580 {
2581         u16 *good_mbuf;
2582         u32 good_mbuf_cnt;
2583         u32 val;
2584
2585         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2586         if (good_mbuf == NULL) {
2587                 printk(KERN_ERR PFX "Failed to allocate memory in "
2588                                     "bnx2_alloc_bad_rbuf\n");
2589                 return -ENOMEM;
2590         }
2591
2592         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2593                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2594
2595         good_mbuf_cnt = 0;
2596
2597         /* Allocate a bunch of mbufs and save the good ones in an array. */
2598         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2599         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2600                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2601                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2602
2603                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2604
2605                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2606
2607                 /* The addresses with Bit 9 set are bad memory blocks. */
2608                 if (!(val & (1 << 9))) {
2609                         good_mbuf[good_mbuf_cnt] = (u16) val;
2610                         good_mbuf_cnt++;
2611                 }
2612
2613                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2614         }
2615
2616         /* Free the good ones back to the mbuf pool thus discarding
2617          * all the bad ones. */
2618         while (good_mbuf_cnt) {
2619                 good_mbuf_cnt--;
2620
2621                 val = good_mbuf[good_mbuf_cnt];
2622                 val = (val << 9) | val | 1;
2623
2624                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2625         }
2626         kfree(good_mbuf);
2627         return 0;
2628 }
2629
2630 static void
2631 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2632 {
2633         u32 val;
2634
2635         val = (mac_addr[0] << 8) | mac_addr[1];
2636
2637         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2638
2639         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2640                 (mac_addr[4] << 8) | mac_addr[5];
2641
2642         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2643 }
2644
2645 static inline int
2646 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2647 {
2648         dma_addr_t mapping;
2649         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2650         struct rx_bd *rxbd =
2651                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2652         struct page *page = alloc_page(GFP_ATOMIC);
2653
2654         if (!page)
2655                 return -ENOMEM;
2656         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2657                                PCI_DMA_FROMDEVICE);
2658         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2659                 __free_page(page);
2660                 return -EIO;
2661         }
2662
2663         rx_pg->page = page;
2664         pci_unmap_addr_set(rx_pg, mapping, mapping);
2665         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2666         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2667         return 0;
2668 }
2669
2670 static void
2671 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2672 {
2673         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2674         struct page *page = rx_pg->page;
2675
2676         if (!page)
2677                 return;
2678
2679         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2680                        PCI_DMA_FROMDEVICE);
2681
2682         __free_page(page);
2683         rx_pg->page = NULL;
2684 }
2685
2686 static inline int
2687 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2688 {
2689         struct sk_buff *skb;
2690         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2691         dma_addr_t mapping;
2692         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2693         unsigned long align;
2694
2695         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2696         if (skb == NULL) {
2697                 return -ENOMEM;
2698         }
2699
2700         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2701                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2702
2703         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2704                 PCI_DMA_FROMDEVICE);
2705         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2706                 dev_kfree_skb(skb);
2707                 return -EIO;
2708         }
2709
2710         rx_buf->skb = skb;
2711         pci_unmap_addr_set(rx_buf, mapping, mapping);
2712
2713         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2714         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2715
2716         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2717
2718         return 0;
2719 }
2720
2721 static int
2722 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2723 {
2724         struct status_block *sblk = bnapi->status_blk.msi;
2725         u32 new_link_state, old_link_state;
2726         int is_set = 1;
2727
2728         new_link_state = sblk->status_attn_bits & event;
2729         old_link_state = sblk->status_attn_bits_ack & event;
2730         if (new_link_state != old_link_state) {
2731                 if (new_link_state)
2732                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2733                 else
2734                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2735         } else
2736                 is_set = 0;
2737
2738         return is_set;
2739 }
2740
2741 static void
2742 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2743 {
2744         spin_lock(&bp->phy_lock);
2745
2746         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2747                 bnx2_set_link(bp);
2748         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2749                 bnx2_set_remote_link(bp);
2750
2751         spin_unlock(&bp->phy_lock);
2752
2753 }
2754
2755 static inline u16
2756 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2757 {
2758         u16 cons;
2759
2760         /* Tell compiler that status block fields can change. */
2761         barrier();
2762         cons = *bnapi->hw_tx_cons_ptr;
2763         barrier();
2764         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2765                 cons++;
2766         return cons;
2767 }
2768
2769 static int
2770 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2771 {
2772         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2773         u16 hw_cons, sw_cons, sw_ring_cons;
2774         int tx_pkt = 0, index;
2775         struct netdev_queue *txq;
2776
2777         index = (bnapi - bp->bnx2_napi);
2778         txq = netdev_get_tx_queue(bp->dev, index);
2779
2780         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2781         sw_cons = txr->tx_cons;
2782
2783         while (sw_cons != hw_cons) {
2784                 struct sw_tx_bd *tx_buf;
2785                 struct sk_buff *skb;
2786                 int i, last;
2787
2788                 sw_ring_cons = TX_RING_IDX(sw_cons);
2789
2790                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2791                 skb = tx_buf->skb;
2792
2793                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2794                 prefetch(&skb->end);
2795
2796                 /* partial BD completions possible with TSO packets */
2797                 if (tx_buf->is_gso) {
2798                         u16 last_idx, last_ring_idx;
2799
2800                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2801                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2802                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2803                                 last_idx++;
2804                         }
2805                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2806                                 break;
2807                         }
2808                 }
2809
2810                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2811
2812                 tx_buf->skb = NULL;
2813                 last = tx_buf->nr_frags;
2814
2815                 for (i = 0; i < last; i++) {
2816                         sw_cons = NEXT_TX_BD(sw_cons);
2817                 }
2818
2819                 sw_cons = NEXT_TX_BD(sw_cons);
2820
2821                 dev_kfree_skb(skb);
2822                 tx_pkt++;
2823                 if (tx_pkt == budget)
2824                         break;
2825
2826                 if (hw_cons == sw_cons)
2827                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2828         }
2829
2830         txr->hw_tx_cons = hw_cons;
2831         txr->tx_cons = sw_cons;
2832
2833         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2834          * before checking for netif_tx_queue_stopped().  Without the
2835          * memory barrier, there is a small possibility that bnx2_start_xmit()
2836          * will miss it and cause the queue to be stopped forever.
2837          */
2838         smp_mb();
2839
2840         if (unlikely(netif_tx_queue_stopped(txq)) &&
2841                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2842                 __netif_tx_lock(txq, smp_processor_id());
2843                 if ((netif_tx_queue_stopped(txq)) &&
2844                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2845                         netif_tx_wake_queue(txq);
2846                 __netif_tx_unlock(txq);
2847         }
2848
2849         return tx_pkt;
2850 }
2851
2852 static void
2853 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2854                         struct sk_buff *skb, int count)
2855 {
2856         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2857         struct rx_bd *cons_bd, *prod_bd;
2858         int i;
2859         u16 hw_prod, prod;
2860         u16 cons = rxr->rx_pg_cons;
2861
2862         cons_rx_pg = &rxr->rx_pg_ring[cons];
2863
2864         /* The caller was unable to allocate a new page to replace the
2865          * last one in the frags array, so we need to recycle that page
2866          * and then free the skb.
2867          */
2868         if (skb) {
2869                 struct page *page;
2870                 struct skb_shared_info *shinfo;
2871
2872                 shinfo = skb_shinfo(skb);
2873                 shinfo->nr_frags--;
2874                 page = shinfo->frags[shinfo->nr_frags].page;
2875                 shinfo->frags[shinfo->nr_frags].page = NULL;
2876
2877                 cons_rx_pg->page = page;
2878                 dev_kfree_skb(skb);
2879         }
2880
2881         hw_prod = rxr->rx_pg_prod;
2882
2883         for (i = 0; i < count; i++) {
2884                 prod = RX_PG_RING_IDX(hw_prod);
2885
2886                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2887                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2888                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2889                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2890
2891                 if (prod != cons) {
2892                         prod_rx_pg->page = cons_rx_pg->page;
2893                         cons_rx_pg->page = NULL;
2894                         pci_unmap_addr_set(prod_rx_pg, mapping,
2895                                 pci_unmap_addr(cons_rx_pg, mapping));
2896
2897                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2898                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2899
2900                 }
2901                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2902                 hw_prod = NEXT_RX_BD(hw_prod);
2903         }
2904         rxr->rx_pg_prod = hw_prod;
2905         rxr->rx_pg_cons = cons;
2906 }
2907
2908 static inline void
2909 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2910                   struct sk_buff *skb, u16 cons, u16 prod)
2911 {
2912         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2913         struct rx_bd *cons_bd, *prod_bd;
2914
2915         cons_rx_buf = &rxr->rx_buf_ring[cons];
2916         prod_rx_buf = &rxr->rx_buf_ring[prod];
2917
2918         pci_dma_sync_single_for_device(bp->pdev,
2919                 pci_unmap_addr(cons_rx_buf, mapping),
2920                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2921
2922         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2923
2924         prod_rx_buf->skb = skb;
2925
2926         if (cons == prod)
2927                 return;
2928
2929         pci_unmap_addr_set(prod_rx_buf, mapping,
2930                         pci_unmap_addr(cons_rx_buf, mapping));
2931
2932         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2933         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2934         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2935         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2936 }
2937
2938 static int
2939 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2940             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2941             u32 ring_idx)
2942 {
2943         int err;
2944         u16 prod = ring_idx & 0xffff;
2945
2946         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2947         if (unlikely(err)) {
2948                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2949                 if (hdr_len) {
2950                         unsigned int raw_len = len + 4;
2951                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2952
2953                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2954                 }
2955                 return err;
2956         }
2957
2958         skb_reserve(skb, BNX2_RX_OFFSET);
2959         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2960                          PCI_DMA_FROMDEVICE);
2961
2962         if (hdr_len == 0) {
2963                 skb_put(skb, len);
2964                 return 0;
2965         } else {
2966                 unsigned int i, frag_len, frag_size, pages;
2967                 struct sw_pg *rx_pg;
2968                 u16 pg_cons = rxr->rx_pg_cons;
2969                 u16 pg_prod = rxr->rx_pg_prod;
2970
2971                 frag_size = len + 4 - hdr_len;
2972                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2973                 skb_put(skb, hdr_len);
2974
2975                 for (i = 0; i < pages; i++) {
2976                         dma_addr_t mapping_old;
2977
2978                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2979                         if (unlikely(frag_len <= 4)) {
2980                                 unsigned int tail = 4 - frag_len;
2981
2982                                 rxr->rx_pg_cons = pg_cons;
2983                                 rxr->rx_pg_prod = pg_prod;
2984                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2985                                                         pages - i);
2986                                 skb->len -= tail;
2987                                 if (i == 0) {
2988                                         skb->tail -= tail;
2989                                 } else {
2990                                         skb_frag_t *frag =
2991                                                 &skb_shinfo(skb)->frags[i - 1];
2992                                         frag->size -= tail;
2993                                         skb->data_len -= tail;
2994                                         skb->truesize -= tail;
2995                                 }
2996                                 return 0;
2997                         }
2998                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2999
3000                         /* Don't unmap yet.  If we're unable to allocate a new
3001                          * page, we need to recycle the page and the DMA addr.
3002                          */
3003                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3004                         if (i == pages - 1)
3005                                 frag_len -= 4;
3006
3007                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3008                         rx_pg->page = NULL;
3009
3010                         err = bnx2_alloc_rx_page(bp, rxr,
3011                                                  RX_PG_RING_IDX(pg_prod));
3012                         if (unlikely(err)) {
3013                                 rxr->rx_pg_cons = pg_cons;
3014                                 rxr->rx_pg_prod = pg_prod;
3015                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3016                                                         pages - i);
3017                                 return err;
3018                         }
3019
3020                         pci_unmap_page(bp->pdev, mapping_old,
3021                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3022
3023                         frag_size -= frag_len;
3024                         skb->data_len += frag_len;
3025                         skb->truesize += frag_len;
3026                         skb->len += frag_len;
3027
3028                         pg_prod = NEXT_RX_BD(pg_prod);
3029                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3030                 }
3031                 rxr->rx_pg_prod = pg_prod;
3032                 rxr->rx_pg_cons = pg_cons;
3033         }
3034         return 0;
3035 }
3036
3037 static inline u16
3038 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3039 {
3040         u16 cons;
3041
3042         /* Tell compiler that status block fields can change. */
3043         barrier();
3044         cons = *bnapi->hw_rx_cons_ptr;
3045         barrier();
3046         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3047                 cons++;
3048         return cons;
3049 }
3050
3051 static int
3052 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3053 {
3054         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3055         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3056         struct l2_fhdr *rx_hdr;
3057         int rx_pkt = 0, pg_ring_used = 0;
3058
3059         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3060         sw_cons = rxr->rx_cons;
3061         sw_prod = rxr->rx_prod;
3062
3063         /* Memory barrier necessary as speculative reads of the rx
3064          * buffer can be ahead of the index in the status block
3065          */
3066         rmb();
3067         while (sw_cons != hw_cons) {
3068                 unsigned int len, hdr_len;
3069                 u32 status;
3070                 struct sw_bd *rx_buf;
3071                 struct sk_buff *skb;
3072                 dma_addr_t dma_addr;
3073                 u16 vtag = 0;
3074                 int hw_vlan __maybe_unused = 0;
3075
3076                 sw_ring_cons = RX_RING_IDX(sw_cons);
3077                 sw_ring_prod = RX_RING_IDX(sw_prod);
3078
3079                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3080                 skb = rx_buf->skb;
3081
3082                 rx_buf->skb = NULL;
3083
3084                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3085
3086                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3087                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3088                         PCI_DMA_FROMDEVICE);
3089
3090                 rx_hdr = (struct l2_fhdr *) skb->data;
3091                 len = rx_hdr->l2_fhdr_pkt_len;
3092                 status = rx_hdr->l2_fhdr_status;
3093
3094                 hdr_len = 0;
3095                 if (status & L2_FHDR_STATUS_SPLIT) {
3096                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3097                         pg_ring_used = 1;
3098                 } else if (len > bp->rx_jumbo_thresh) {
3099                         hdr_len = bp->rx_jumbo_thresh;
3100                         pg_ring_used = 1;
3101                 }
3102
3103                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3104                                        L2_FHDR_ERRORS_PHY_DECODE |
3105                                        L2_FHDR_ERRORS_ALIGNMENT |
3106                                        L2_FHDR_ERRORS_TOO_SHORT |
3107                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3108
3109                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3110                                           sw_ring_prod);
3111                         if (pg_ring_used) {
3112                                 int pages;
3113
3114                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3115
3116                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3117                         }
3118                         goto next_rx;
3119                 }
3120
3121                 len -= 4;
3122
3123                 if (len <= bp->rx_copy_thresh) {
3124                         struct sk_buff *new_skb;
3125
3126                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3127                         if (new_skb == NULL) {
3128                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3129                                                   sw_ring_prod);
3130                                 goto next_rx;
3131                         }
3132
3133                         /* aligned copy */
3134                         skb_copy_from_linear_data_offset(skb,
3135                                                          BNX2_RX_OFFSET - 6,
3136                                       new_skb->data, len + 6);
3137                         skb_reserve(new_skb, 6);
3138                         skb_put(new_skb, len);
3139
3140                         bnx2_reuse_rx_skb(bp, rxr, skb,
3141                                 sw_ring_cons, sw_ring_prod);
3142
3143                         skb = new_skb;
3144                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3145                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3146                         goto next_rx;
3147
3148                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3149                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3150                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3151 #ifdef BCM_VLAN
3152                         if (bp->vlgrp)
3153                                 hw_vlan = 1;
3154                         else
3155 #endif
3156                         {
3157                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3158                                         __skb_push(skb, 4);
3159
3160                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3161                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3162                                 ve->h_vlan_TCI = htons(vtag);
3163                                 len += 4;
3164                         }
3165                 }
3166
3167                 skb->protocol = eth_type_trans(skb, bp->dev);
3168
3169                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3170                         (ntohs(skb->protocol) != 0x8100)) {
3171
3172                         dev_kfree_skb(skb);
3173                         goto next_rx;
3174
3175                 }
3176
3177                 skb->ip_summed = CHECKSUM_NONE;
3178                 if (bp->rx_csum &&
3179                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3180                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3181
3182                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3183                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3184                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3185                 }
3186
3187                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3188
3189 #ifdef BCM_VLAN
3190                 if (hw_vlan)
3191                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3192                 else
3193 #endif
3194                         netif_receive_skb(skb);
3195
3196                 rx_pkt++;
3197
3198 next_rx:
3199                 sw_cons = NEXT_RX_BD(sw_cons);
3200                 sw_prod = NEXT_RX_BD(sw_prod);
3201
3202                 if ((rx_pkt == budget))
3203                         break;
3204
3205                 /* Refresh hw_cons to see if there is new work */
3206                 if (sw_cons == hw_cons) {
3207                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3208                         rmb();
3209                 }
3210         }
3211         rxr->rx_cons = sw_cons;
3212         rxr->rx_prod = sw_prod;
3213
3214         if (pg_ring_used)
3215                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3216
3217         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3218
3219         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3220
3221         mmiowb();
3222
3223         return rx_pkt;
3224
3225 }
3226
3227 /* MSI ISR - The only difference between this and the INTx ISR
3228  * is that the MSI interrupt is always serviced.
3229  */
3230 static irqreturn_t
3231 bnx2_msi(int irq, void *dev_instance)
3232 {
3233         struct bnx2_napi *bnapi = dev_instance;
3234         struct bnx2 *bp = bnapi->bp;
3235
3236         prefetch(bnapi->status_blk.msi);
3237         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3238                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3239                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3240
3241         /* Return here if interrupt is disabled. */
3242         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3243                 return IRQ_HANDLED;
3244
3245         napi_schedule(&bnapi->napi);
3246
3247         return IRQ_HANDLED;
3248 }
3249
3250 static irqreturn_t
3251 bnx2_msi_1shot(int irq, void *dev_instance)
3252 {
3253         struct bnx2_napi *bnapi = dev_instance;
3254         struct bnx2 *bp = bnapi->bp;
3255
3256         prefetch(bnapi->status_blk.msi);
3257
3258         /* Return here if interrupt is disabled. */
3259         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3260                 return IRQ_HANDLED;
3261
3262         napi_schedule(&bnapi->napi);
3263
3264         return IRQ_HANDLED;
3265 }
3266
3267 static irqreturn_t
3268 bnx2_interrupt(int irq, void *dev_instance)
3269 {
3270         struct bnx2_napi *bnapi = dev_instance;
3271         struct bnx2 *bp = bnapi->bp;
3272         struct status_block *sblk = bnapi->status_blk.msi;
3273
3274         /* When using INTx, it is possible for the interrupt to arrive
3275          * at the CPU before the status block posted prior to the
3276          * interrupt. Reading a register will flush the status block.
3277          * When using MSI, the MSI message will always complete after
3278          * the status block write.
3279          */
3280         if ((sblk->status_idx == bnapi->last_status_idx) &&
3281             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3282              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3283                 return IRQ_NONE;
3284
3285         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3286                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3287                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3288
3289         /* Read back to deassert IRQ immediately to avoid too many
3290          * spurious interrupts.
3291          */
3292         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3293
3294         /* Return here if interrupt is shared and is disabled. */
3295         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3296                 return IRQ_HANDLED;
3297
3298         if (napi_schedule_prep(&bnapi->napi)) {
3299                 bnapi->last_status_idx = sblk->status_idx;
3300                 __napi_schedule(&bnapi->napi);
3301         }
3302
3303         return IRQ_HANDLED;
3304 }
3305
3306 static inline int
3307 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3308 {
3309         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3310         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3311
3312         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3313             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3314                 return 1;
3315         return 0;
3316 }
3317
3318 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3319                                  STATUS_ATTN_BITS_TIMER_ABORT)
3320
3321 static inline int
3322 bnx2_has_work(struct bnx2_napi *bnapi)
3323 {
3324         struct status_block *sblk = bnapi->status_blk.msi;
3325
3326         if (bnx2_has_fast_work(bnapi))
3327                 return 1;
3328
3329 #ifdef BCM_CNIC
3330         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3331                 return 1;
3332 #endif
3333
3334         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3335             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3336                 return 1;
3337
3338         return 0;
3339 }
3340
3341 static void
3342 bnx2_chk_missed_msi(struct bnx2 *bp)
3343 {
3344         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3345         u32 msi_ctrl;
3346
3347         if (bnx2_has_work(bnapi)) {
3348                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3349                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3350                         return;
3351
3352                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3353                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3354                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3355                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3356                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3357                 }
3358         }
3359
3360         bp->idle_chk_status_idx = bnapi->last_status_idx;
3361 }
3362
3363 #ifdef BCM_CNIC
3364 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3365 {
3366         struct cnic_ops *c_ops;
3367
3368         if (!bnapi->cnic_present)
3369                 return;
3370
3371         rcu_read_lock();
3372         c_ops = rcu_dereference(bp->cnic_ops);
3373         if (c_ops)
3374                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3375                                                       bnapi->status_blk.msi);
3376         rcu_read_unlock();
3377 }
3378 #endif
3379
3380 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3381 {
3382         struct status_block *sblk = bnapi->status_blk.msi;
3383         u32 status_attn_bits = sblk->status_attn_bits;
3384         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3385
3386         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3387             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3388
3389                 bnx2_phy_int(bp, bnapi);
3390
3391                 /* This is needed to take care of transient status
3392                  * during link changes.
3393                  */
3394                 REG_WR(bp, BNX2_HC_COMMAND,
3395                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3396                 REG_RD(bp, BNX2_HC_COMMAND);
3397         }
3398 }
3399
3400 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3401                           int work_done, int budget)
3402 {
3403         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3404         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3405
3406         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3407                 bnx2_tx_int(bp, bnapi, 0);
3408
3409         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3410                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3411
3412         return work_done;
3413 }
3414
3415 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3416 {
3417         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3418         struct bnx2 *bp = bnapi->bp;
3419         int work_done = 0;
3420         struct status_block_msix *sblk = bnapi->status_blk.msix;
3421
3422         while (1) {
3423                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3424                 if (unlikely(work_done >= budget))
3425                         break;
3426
3427                 bnapi->last_status_idx = sblk->status_idx;
3428                 /* status idx must be read before checking for more work. */
3429                 rmb();
3430                 if (likely(!bnx2_has_fast_work(bnapi))) {
3431
3432                         napi_complete(napi);
3433                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3434                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3435                                bnapi->last_status_idx);
3436                         break;
3437                 }
3438         }
3439         return work_done;
3440 }
3441
3442 static int bnx2_poll(struct napi_struct *napi, int budget)
3443 {
3444         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3445         struct bnx2 *bp = bnapi->bp;
3446         int work_done = 0;
3447         struct status_block *sblk = bnapi->status_blk.msi;
3448
3449         while (1) {
3450                 bnx2_poll_link(bp, bnapi);
3451
3452                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3453
3454 #ifdef BCM_CNIC
3455                 bnx2_poll_cnic(bp, bnapi);
3456 #endif
3457
3458                 /* bnapi->last_status_idx is used below to tell the hw how
3459                  * much work has been processed, so we must read it before
3460                  * checking for more work.
3461                  */
3462                 bnapi->last_status_idx = sblk->status_idx;
3463
3464                 if (unlikely(work_done >= budget))
3465                         break;
3466
3467                 rmb();
3468                 if (likely(!bnx2_has_work(bnapi))) {
3469                         napi_complete(napi);
3470                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3471                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3472                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3473                                        bnapi->last_status_idx);
3474                                 break;
3475                         }
3476                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3477                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3478                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3479                                bnapi->last_status_idx);
3480
3481                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3482                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3483                                bnapi->last_status_idx);
3484                         break;
3485                 }
3486         }
3487
3488         return work_done;
3489 }
3490
3491 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3492  * from set_multicast.
3493  */
3494 static void
3495 bnx2_set_rx_mode(struct net_device *dev)
3496 {
3497         struct bnx2 *bp = netdev_priv(dev);
3498         u32 rx_mode, sort_mode;
3499         struct netdev_hw_addr *ha;
3500         int i;
3501
3502         if (!netif_running(dev))
3503                 return;
3504
3505         spin_lock_bh(&bp->phy_lock);
3506
3507         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3508                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3509         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3510 #ifdef BCM_VLAN
3511         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3512                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3513 #else
3514         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3515                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3516 #endif
3517         if (dev->flags & IFF_PROMISC) {
3518                 /* Promiscuous mode. */
3519                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3520                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3521                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3522         }
3523         else if (dev->flags & IFF_ALLMULTI) {
3524                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3525                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3526                                0xffffffff);
3527                 }
3528                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3529         }
3530         else {
3531                 /* Accept one or more multicast(s). */
3532                 struct dev_mc_list *mclist;
3533                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3534                 u32 regidx;
3535                 u32 bit;
3536                 u32 crc;
3537
3538                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3539
3540                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3541                      i++, mclist = mclist->next) {
3542
3543                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3544                         bit = crc & 0xff;
3545                         regidx = (bit & 0xe0) >> 5;
3546                         bit &= 0x1f;
3547                         mc_filter[regidx] |= (1 << bit);
3548                 }
3549
3550                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3551                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3552                                mc_filter[i]);
3553                 }
3554
3555                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3556         }
3557
3558         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3559                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3560                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3561                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3562         } else if (!(dev->flags & IFF_PROMISC)) {
3563                 /* Add all entries into to the match filter list */
3564                 i = 0;
3565                 list_for_each_entry(ha, &dev->uc.list, list) {
3566                         bnx2_set_mac_addr(bp, ha->addr,
3567                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3568                         sort_mode |= (1 <<
3569                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3570                         i++;
3571                 }
3572
3573         }
3574
3575         if (rx_mode != bp->rx_mode) {
3576                 bp->rx_mode = rx_mode;
3577                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3578         }
3579
3580         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3581         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3582         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3583
3584         spin_unlock_bh(&bp->phy_lock);
3585 }
3586
3587 static int __devinit
3588 check_fw_section(const struct firmware *fw,
3589                  const struct bnx2_fw_file_section *section,
3590                  u32 alignment, bool non_empty)
3591 {
3592         u32 offset = be32_to_cpu(section->offset);
3593         u32 len = be32_to_cpu(section->len);
3594
3595         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3596                 return -EINVAL;
3597         if ((non_empty && len == 0) || len > fw->size - offset ||
3598             len & (alignment - 1))
3599                 return -EINVAL;
3600         return 0;
3601 }
3602
3603 static int __devinit
3604 check_mips_fw_entry(const struct firmware *fw,
3605                     const struct bnx2_mips_fw_file_entry *entry)
3606 {
3607         if (check_fw_section(fw, &entry->text, 4, true) ||
3608             check_fw_section(fw, &entry->data, 4, false) ||
3609             check_fw_section(fw, &entry->rodata, 4, false))
3610                 return -EINVAL;
3611         return 0;
3612 }
3613
3614 static int __devinit
3615 bnx2_request_firmware(struct bnx2 *bp)
3616 {
3617         const char *mips_fw_file, *rv2p_fw_file;
3618         const struct bnx2_mips_fw_file *mips_fw;
3619         const struct bnx2_rv2p_fw_file *rv2p_fw;
3620         int rc;
3621
3622         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3623                 mips_fw_file = FW_MIPS_FILE_09;
3624                 rv2p_fw_file = FW_RV2P_FILE_09;
3625         } else {
3626                 mips_fw_file = FW_MIPS_FILE_06;
3627                 rv2p_fw_file = FW_RV2P_FILE_06;
3628         }
3629
3630         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3631         if (rc) {
3632                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3633                        mips_fw_file);
3634                 return rc;
3635         }
3636
3637         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3638         if (rc) {
3639                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3640                        rv2p_fw_file);
3641                 return rc;
3642         }
3643         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3644         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3645         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3646             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3647             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3648             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3649             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3650             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3651                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3652                        mips_fw_file);
3653                 return -EINVAL;
3654         }
3655         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3656             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3657             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3658                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3659                        rv2p_fw_file);
3660                 return -EINVAL;
3661         }
3662
3663         return 0;
3664 }
3665
3666 static u32
3667 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3668 {
3669         switch (idx) {
3670         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3671                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3672                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3673                 break;
3674         }
3675         return rv2p_code;
3676 }
3677
3678 static int
3679 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3680              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3681 {
3682         u32 rv2p_code_len, file_offset;
3683         __be32 *rv2p_code;
3684         int i;
3685         u32 val, cmd, addr;
3686
3687         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3688         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3689
3690         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3691
3692         if (rv2p_proc == RV2P_PROC1) {
3693                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3694                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3695         } else {
3696                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3697                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3698         }
3699
3700         for (i = 0; i < rv2p_code_len; i += 8) {
3701                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3702                 rv2p_code++;
3703                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3704                 rv2p_code++;
3705
3706                 val = (i / 8) | cmd;
3707                 REG_WR(bp, addr, val);
3708         }
3709
3710         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3711         for (i = 0; i < 8; i++) {
3712                 u32 loc, code;
3713
3714                 loc = be32_to_cpu(fw_entry->fixup[i]);
3715                 if (loc && ((loc * 4) < rv2p_code_len)) {
3716                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3717                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3718                         code = be32_to_cpu(*(rv2p_code + loc));
3719                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3720                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3721
3722                         val = (loc / 2) | cmd;
3723                         REG_WR(bp, addr, val);
3724                 }
3725         }
3726
3727         /* Reset the processor, un-stall is done later. */
3728         if (rv2p_proc == RV2P_PROC1) {
3729                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3730         }
3731         else {
3732                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3733         }
3734
3735         return 0;
3736 }
3737
3738 static int
3739 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3740             const struct bnx2_mips_fw_file_entry *fw_entry)
3741 {
3742         u32 addr, len, file_offset;
3743         __be32 *data;
3744         u32 offset;
3745         u32 val;
3746
3747         /* Halt the CPU. */
3748         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3749         val |= cpu_reg->mode_value_halt;
3750         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3751         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3752
3753         /* Load the Text area. */
3754         addr = be32_to_cpu(fw_entry->text.addr);
3755         len = be32_to_cpu(fw_entry->text.len);
3756         file_offset = be32_to_cpu(fw_entry->text.offset);
3757         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3758
3759         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3760         if (len) {
3761                 int j;
3762
3763                 for (j = 0; j < (len / 4); j++, offset += 4)
3764                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3765         }
3766
3767         /* Load the Data area. */
3768         addr = be32_to_cpu(fw_entry->data.addr);
3769         len = be32_to_cpu(fw_entry->data.len);
3770         file_offset = be32_to_cpu(fw_entry->data.offset);
3771         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3772
3773         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3774         if (len) {
3775                 int j;
3776
3777                 for (j = 0; j < (len / 4); j++, offset += 4)
3778                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3779         }
3780
3781         /* Load the Read-Only area. */
3782         addr = be32_to_cpu(fw_entry->rodata.addr);
3783         len = be32_to_cpu(fw_entry->rodata.len);
3784         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3785         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3786
3787         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3788         if (len) {
3789                 int j;
3790
3791                 for (j = 0; j < (len / 4); j++, offset += 4)
3792                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3793         }
3794
3795         /* Clear the pre-fetch instruction. */
3796         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3797
3798         val = be32_to_cpu(fw_entry->start_addr);
3799         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3800
3801         /* Start the CPU. */
3802         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3803         val &= ~cpu_reg->mode_value_halt;
3804         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3805         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3806
3807         return 0;
3808 }
3809
3810 static int
3811 bnx2_init_cpus(struct bnx2 *bp)
3812 {
3813         const struct bnx2_mips_fw_file *mips_fw =
3814                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3815         const struct bnx2_rv2p_fw_file *rv2p_fw =
3816                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3817         int rc;
3818
3819         /* Initialize the RV2P processor. */
3820         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3821         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3822
3823         /* Initialize the RX Processor. */
3824         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3825         if (rc)
3826                 goto init_cpu_err;
3827
3828         /* Initialize the TX Processor. */
3829         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3830         if (rc)
3831                 goto init_cpu_err;
3832
3833         /* Initialize the TX Patch-up Processor. */
3834         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3835         if (rc)
3836                 goto init_cpu_err;
3837
3838         /* Initialize the Completion Processor. */
3839         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3840         if (rc)
3841                 goto init_cpu_err;
3842
3843         /* Initialize the Command Processor. */
3844         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3845
3846 init_cpu_err:
3847         return rc;
3848 }
3849
3850 static int
3851 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3852 {
3853         u16 pmcsr;
3854
3855         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3856
3857         switch (state) {
3858         case PCI_D0: {
3859                 u32 val;
3860
3861                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3862                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3863                         PCI_PM_CTRL_PME_STATUS);
3864
3865                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3866                         /* delay required during transition out of D3hot */
3867                         msleep(20);
3868
3869                 val = REG_RD(bp, BNX2_EMAC_MODE);
3870                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3871                 val &= ~BNX2_EMAC_MODE_MPKT;
3872                 REG_WR(bp, BNX2_EMAC_MODE, val);
3873
3874                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3875                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3876                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3877                 break;
3878         }
3879         case PCI_D3hot: {
3880                 int i;
3881                 u32 val, wol_msg;
3882
3883                 if (bp->wol) {
3884                         u32 advertising;
3885                         u8 autoneg;
3886
3887                         autoneg = bp->autoneg;
3888                         advertising = bp->advertising;
3889
3890                         if (bp->phy_port == PORT_TP) {
3891                                 bp->autoneg = AUTONEG_SPEED;
3892                                 bp->advertising = ADVERTISED_10baseT_Half |
3893                                         ADVERTISED_10baseT_Full |
3894                                         ADVERTISED_100baseT_Half |
3895                                         ADVERTISED_100baseT_Full |
3896                                         ADVERTISED_Autoneg;
3897                         }
3898
3899                         spin_lock_bh(&bp->phy_lock);
3900                         bnx2_setup_phy(bp, bp->phy_port);
3901                         spin_unlock_bh(&bp->phy_lock);
3902
3903                         bp->autoneg = autoneg;
3904                         bp->advertising = advertising;
3905
3906                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3907
3908                         val = REG_RD(bp, BNX2_EMAC_MODE);
3909
3910                         /* Enable port mode. */
3911                         val &= ~BNX2_EMAC_MODE_PORT;
3912                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3913                                BNX2_EMAC_MODE_ACPI_RCVD |
3914                                BNX2_EMAC_MODE_MPKT;
3915                         if (bp->phy_port == PORT_TP)
3916                                 val |= BNX2_EMAC_MODE_PORT_MII;
3917                         else {
3918                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3919                                 if (bp->line_speed == SPEED_2500)
3920                                         val |= BNX2_EMAC_MODE_25G_MODE;
3921                         }
3922
3923                         REG_WR(bp, BNX2_EMAC_MODE, val);
3924
3925                         /* receive all multicast */
3926                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3927                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3928                                        0xffffffff);
3929                         }
3930                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3931                                BNX2_EMAC_RX_MODE_SORT_MODE);
3932
3933                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3934                               BNX2_RPM_SORT_USER0_MC_EN;
3935                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3936                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3937                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3938                                BNX2_RPM_SORT_USER0_ENA);
3939
3940                         /* Need to enable EMAC and RPM for WOL. */
3941                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3942                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3943                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3944                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3945
3946                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3947                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3948                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3949
3950                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3951                 }
3952                 else {
3953                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3954                 }
3955
3956                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3957                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3958                                      1, 0);
3959
3960                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3961                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3962                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3963
3964                         if (bp->wol)
3965                                 pmcsr |= 3;
3966                 }
3967                 else {
3968                         pmcsr |= 3;
3969                 }
3970                 if (bp->wol) {
3971                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3972                 }
3973                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3974                                       pmcsr);
3975
3976                 /* No more memory access after this point until
3977                  * device is brought back to D0.
3978                  */
3979                 udelay(50);
3980                 break;
3981         }
3982         default:
3983                 return -EINVAL;
3984         }
3985         return 0;
3986 }
3987
3988 static int
3989 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3990 {
3991         u32 val;
3992         int j;
3993
3994         /* Request access to the flash interface. */
3995         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3996         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3997                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3998                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3999                         break;
4000
4001                 udelay(5);
4002         }
4003
4004         if (j >= NVRAM_TIMEOUT_COUNT)
4005                 return -EBUSY;
4006
4007         return 0;
4008 }
4009
4010 static int
4011 bnx2_release_nvram_lock(struct bnx2 *bp)
4012 {
4013         int j;
4014         u32 val;
4015
4016         /* Relinquish nvram interface. */
4017         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4018
4019         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4020                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4021                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4022                         break;
4023
4024                 udelay(5);
4025         }
4026
4027         if (j >= NVRAM_TIMEOUT_COUNT)
4028                 return -EBUSY;
4029
4030         return 0;
4031 }
4032
4033
4034 static int
4035 bnx2_enable_nvram_write(struct bnx2 *bp)
4036 {
4037         u32 val;
4038
4039         val = REG_RD(bp, BNX2_MISC_CFG);
4040         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4041
4042         if (bp->flash_info->flags & BNX2_NV_WREN) {
4043                 int j;
4044
4045                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4046                 REG_WR(bp, BNX2_NVM_COMMAND,
4047                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4048
4049                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4050                         udelay(5);
4051
4052                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4053                         if (val & BNX2_NVM_COMMAND_DONE)
4054                                 break;
4055                 }
4056
4057                 if (j >= NVRAM_TIMEOUT_COUNT)
4058                         return -EBUSY;
4059         }
4060         return 0;
4061 }
4062
4063 static void
4064 bnx2_disable_nvram_write(struct bnx2 *bp)
4065 {
4066         u32 val;
4067
4068         val = REG_RD(bp, BNX2_MISC_CFG);
4069         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4070 }
4071
4072
4073 static void
4074 bnx2_enable_nvram_access(struct bnx2 *bp)
4075 {
4076         u32 val;
4077
4078         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4079         /* Enable both bits, even on read. */
4080         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4081                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4082 }
4083
4084 static void
4085 bnx2_disable_nvram_access(struct bnx2 *bp)
4086 {
4087         u32 val;
4088
4089         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4090         /* Disable both bits, even after read. */
4091         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4092                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4093                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4094 }
4095
4096 static int
4097 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4098 {
4099         u32 cmd;
4100         int j;
4101
4102         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4103                 /* Buffered flash, no erase needed */
4104                 return 0;
4105
4106         /* Build an erase command */
4107         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4108               BNX2_NVM_COMMAND_DOIT;
4109
4110         /* Need to clear DONE bit separately. */
4111         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4112
4113         /* Address of the NVRAM to read from. */
4114         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4115
4116         /* Issue an erase command. */
4117         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4118
4119         /* Wait for completion. */
4120         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4121                 u32 val;
4122
4123                 udelay(5);
4124
4125                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4126                 if (val & BNX2_NVM_COMMAND_DONE)
4127                         break;
4128         }
4129
4130         if (j >= NVRAM_TIMEOUT_COUNT)
4131                 return -EBUSY;
4132
4133         return 0;
4134 }
4135
4136 static int
4137 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4138 {
4139         u32 cmd;
4140         int j;
4141
4142         /* Build the command word. */
4143         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4144
4145         /* Calculate an offset of a buffered flash, not needed for 5709. */
4146         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4147                 offset = ((offset / bp->flash_info->page_size) <<
4148                            bp->flash_info->page_bits) +
4149                           (offset % bp->flash_info->page_size);
4150         }
4151
4152         /* Need to clear DONE bit separately. */
4153         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4154
4155         /* Address of the NVRAM to read from. */
4156         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4157
4158         /* Issue a read command. */
4159         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4160
4161         /* Wait for completion. */
4162         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4163                 u32 val;
4164
4165                 udelay(5);
4166
4167                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4168                 if (val & BNX2_NVM_COMMAND_DONE) {
4169                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4170                         memcpy(ret_val, &v, 4);
4171                         break;
4172                 }
4173         }
4174         if (j >= NVRAM_TIMEOUT_COUNT)
4175                 return -EBUSY;
4176
4177         return 0;
4178 }
4179
4180
4181 static int
4182 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4183 {
4184         u32 cmd;
4185         __be32 val32;
4186         int j;
4187
4188         /* Build the command word. */
4189         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4190
4191         /* Calculate an offset of a buffered flash, not needed for 5709. */
4192         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4193                 offset = ((offset / bp->flash_info->page_size) <<
4194                           bp->flash_info->page_bits) +
4195                          (offset % bp->flash_info->page_size);
4196         }
4197
4198         /* Need to clear DONE bit separately. */
4199         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200
4201         memcpy(&val32, val, 4);
4202
4203         /* Write the data. */
4204         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4205
4206         /* Address of the NVRAM to write to. */
4207         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4208
4209         /* Issue the write command. */
4210         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4211
4212         /* Wait for completion. */
4213         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4214                 udelay(5);
4215
4216                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4217                         break;
4218         }
4219         if (j >= NVRAM_TIMEOUT_COUNT)
4220                 return -EBUSY;
4221
4222         return 0;
4223 }
4224
4225 static int
4226 bnx2_init_nvram(struct bnx2 *bp)
4227 {
4228         u32 val;
4229         int j, entry_count, rc = 0;
4230         const struct flash_spec *flash;
4231
4232         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4233                 bp->flash_info = &flash_5709;
4234                 goto get_flash_size;
4235         }
4236
4237         /* Determine the selected interface. */
4238         val = REG_RD(bp, BNX2_NVM_CFG1);
4239
4240         entry_count = ARRAY_SIZE(flash_table);
4241
4242         if (val & 0x40000000) {
4243
4244                 /* Flash interface has been reconfigured */
4245                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4246                      j++, flash++) {
4247                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4248                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4249                                 bp->flash_info = flash;
4250                                 break;
4251                         }
4252                 }
4253         }
4254         else {
4255                 u32 mask;
4256                 /* Not yet been reconfigured */
4257
4258                 if (val & (1 << 23))
4259                         mask = FLASH_BACKUP_STRAP_MASK;
4260                 else
4261                         mask = FLASH_STRAP_MASK;
4262
4263                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4264                         j++, flash++) {
4265
4266                         if ((val & mask) == (flash->strapping & mask)) {
4267                                 bp->flash_info = flash;
4268
4269                                 /* Request access to the flash interface. */
4270                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4271                                         return rc;
4272
4273                                 /* Enable access to flash interface */
4274                                 bnx2_enable_nvram_access(bp);
4275
4276                                 /* Reconfigure the flash interface */
4277                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4278                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4279                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4280                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4281
4282                                 /* Disable access to flash interface */
4283                                 bnx2_disable_nvram_access(bp);
4284                                 bnx2_release_nvram_lock(bp);
4285
4286                                 break;
4287                         }
4288                 }
4289         } /* if (val & 0x40000000) */
4290
4291         if (j == entry_count) {
4292                 bp->flash_info = NULL;
4293                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4294                 return -ENODEV;
4295         }
4296
4297 get_flash_size:
4298         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4299         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4300         if (val)
4301                 bp->flash_size = val;
4302         else
4303                 bp->flash_size = bp->flash_info->total_size;
4304
4305         return rc;
4306 }
4307
4308 static int
4309 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4310                 int buf_size)
4311 {
4312         int rc = 0;
4313         u32 cmd_flags, offset32, len32, extra;
4314
4315         if (buf_size == 0)
4316                 return 0;
4317
4318         /* Request access to the flash interface. */
4319         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4320                 return rc;
4321
4322         /* Enable access to flash interface */
4323         bnx2_enable_nvram_access(bp);
4324
4325         len32 = buf_size;
4326         offset32 = offset;
4327         extra = 0;
4328
4329         cmd_flags = 0;
4330
4331         if (offset32 & 3) {
4332                 u8 buf[4];
4333                 u32 pre_len;
4334
4335                 offset32 &= ~3;
4336                 pre_len = 4 - (offset & 3);
4337
4338                 if (pre_len >= len32) {
4339                         pre_len = len32;
4340                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4341                                     BNX2_NVM_COMMAND_LAST;
4342                 }
4343                 else {
4344                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4345                 }
4346
4347                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4348
4349                 if (rc)
4350                         return rc;
4351
4352                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4353
4354                 offset32 += 4;
4355                 ret_buf += pre_len;
4356                 len32 -= pre_len;
4357         }
4358         if (len32 & 3) {
4359                 extra = 4 - (len32 & 3);
4360                 len32 = (len32 + 4) & ~3;
4361         }
4362
4363         if (len32 == 4) {
4364                 u8 buf[4];
4365
4366                 if (cmd_flags)
4367                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4368                 else
4369                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4370                                     BNX2_NVM_COMMAND_LAST;
4371
4372                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4373
4374                 memcpy(ret_buf, buf, 4 - extra);
4375         }
4376         else if (len32 > 0) {
4377                 u8 buf[4];
4378
4379                 /* Read the first word. */
4380                 if (cmd_flags)
4381                         cmd_flags = 0;
4382                 else
4383                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4384
4385                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4386
4387                 /* Advance to the next dword. */
4388                 offset32 += 4;
4389                 ret_buf += 4;
4390                 len32 -= 4;
4391
4392                 while (len32 > 4 && rc == 0) {
4393                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4394
4395                         /* Advance to the next dword. */
4396                         offset32 += 4;
4397                         ret_buf += 4;
4398                         len32 -= 4;
4399                 }
4400
4401                 if (rc)
4402                         return rc;
4403
4404                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4405                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4406
4407                 memcpy(ret_buf, buf, 4 - extra);
4408         }
4409
4410         /* Disable access to flash interface */
4411         bnx2_disable_nvram_access(bp);
4412
4413         bnx2_release_nvram_lock(bp);
4414
4415         return rc;
4416 }
4417
4418 static int
4419 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4420                 int buf_size)
4421 {
4422         u32 written, offset32, len32;
4423         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4424         int rc = 0;
4425         int align_start, align_end;
4426
4427         buf = data_buf;
4428         offset32 = offset;
4429         len32 = buf_size;
4430         align_start = align_end = 0;
4431
4432         if ((align_start = (offset32 & 3))) {
4433                 offset32 &= ~3;
4434                 len32 += align_start;
4435                 if (len32 < 4)
4436                         len32 = 4;
4437                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4438                         return rc;
4439         }
4440
4441         if (len32 & 3) {
4442                 align_end = 4 - (len32 & 3);
4443                 len32 += align_end;
4444                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4445                         return rc;
4446         }
4447
4448         if (align_start || align_end) {
4449                 align_buf = kmalloc(len32, GFP_KERNEL);
4450                 if (align_buf == NULL)
4451                         return -ENOMEM;
4452                 if (align_start) {
4453                         memcpy(align_buf, start, 4);
4454                 }
4455                 if (align_end) {
4456                         memcpy(align_buf + len32 - 4, end, 4);
4457                 }
4458                 memcpy(align_buf + align_start, data_buf, buf_size);
4459                 buf = align_buf;
4460         }
4461
4462         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4463                 flash_buffer = kmalloc(264, GFP_KERNEL);
4464                 if (flash_buffer == NULL) {
4465                         rc = -ENOMEM;
4466                         goto nvram_write_end;
4467                 }
4468         }
4469
4470         written = 0;
4471         while ((written < len32) && (rc == 0)) {
4472                 u32 page_start, page_end, data_start, data_end;
4473                 u32 addr, cmd_flags;
4474                 int i;
4475
4476                 /* Find the page_start addr */
4477                 page_start = offset32 + written;
4478                 page_start -= (page_start % bp->flash_info->page_size);
4479                 /* Find the page_end addr */
4480                 page_end = page_start + bp->flash_info->page_size;
4481                 /* Find the data_start addr */
4482                 data_start = (written == 0) ? offset32 : page_start;
4483                 /* Find the data_end addr */
4484                 data_end = (page_end > offset32 + len32) ?
4485                         (offset32 + len32) : page_end;
4486
4487                 /* Request access to the flash interface. */
4488                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4489                         goto nvram_write_end;
4490
4491                 /* Enable access to flash interface */
4492                 bnx2_enable_nvram_access(bp);
4493
4494                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4495                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4496                         int j;
4497
4498                         /* Read the whole page into the buffer
4499                          * (non-buffer flash only) */
4500                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4501                                 if (j == (bp->flash_info->page_size - 4)) {
4502                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4503                                 }
4504                                 rc = bnx2_nvram_read_dword(bp,
4505                                         page_start + j,
4506                                         &flash_buffer[j],
4507                                         cmd_flags);
4508
4509                                 if (rc)
4510                                         goto nvram_write_end;
4511
4512                                 cmd_flags = 0;
4513                         }
4514                 }
4515
4516                 /* Enable writes to flash interface (unlock write-protect) */
4517                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4518                         goto nvram_write_end;
4519
4520                 /* Loop to write back the buffer data from page_start to
4521                  * data_start */
4522                 i = 0;
4523                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4524                         /* Erase the page */
4525                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4526                                 goto nvram_write_end;
4527
4528                         /* Re-enable the write again for the actual write */
4529                         bnx2_enable_nvram_write(bp);
4530
4531                         for (addr = page_start; addr < data_start;
4532                                 addr += 4, i += 4) {
4533
4534                                 rc = bnx2_nvram_write_dword(bp, addr,
4535                                         &flash_buffer[i], cmd_flags);
4536
4537                                 if (rc != 0)
4538                                         goto nvram_write_end;
4539
4540                                 cmd_flags = 0;
4541                         }
4542                 }
4543
4544                 /* Loop to write the new data from data_start to data_end */
4545                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4546                         if ((addr == page_end - 4) ||
4547                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4548                                  (addr == data_end - 4))) {
4549
4550                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4551                         }
4552                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4553                                 cmd_flags);
4554
4555                         if (rc != 0)
4556                                 goto nvram_write_end;
4557
4558                         cmd_flags = 0;
4559                         buf += 4;
4560                 }
4561
4562                 /* Loop to write back the buffer data from data_end
4563                  * to page_end */
4564                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4565                         for (addr = data_end; addr < page_end;
4566                                 addr += 4, i += 4) {
4567
4568                                 if (addr == page_end-4) {
4569                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4570                                 }
4571                                 rc = bnx2_nvram_write_dword(bp, addr,
4572                                         &flash_buffer[i], cmd_flags);
4573
4574                                 if (rc != 0)
4575                                         goto nvram_write_end;
4576
4577                                 cmd_flags = 0;
4578                         }
4579                 }
4580
4581                 /* Disable writes to flash interface (lock write-protect) */
4582                 bnx2_disable_nvram_write(bp);
4583
4584                 /* Disable access to flash interface */
4585                 bnx2_disable_nvram_access(bp);
4586                 bnx2_release_nvram_lock(bp);
4587
4588                 /* Increment written */
4589                 written += data_end - data_start;
4590         }
4591
4592 nvram_write_end:
4593         kfree(flash_buffer);
4594         kfree(align_buf);
4595         return rc;
4596 }
4597
4598 static void
4599 bnx2_init_fw_cap(struct bnx2 *bp)
4600 {
4601         u32 val, sig = 0;
4602
4603         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4604         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4605
4606         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4607                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4608
4609         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4610         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4611                 return;
4612
4613         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4614                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4615                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4616         }
4617
4618         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4619             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4620                 u32 link;
4621
4622                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4623
4624                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4625                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4626                         bp->phy_port = PORT_FIBRE;
4627                 else
4628                         bp->phy_port = PORT_TP;
4629
4630                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4631                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4632         }
4633
4634         if (netif_running(bp->dev) && sig)
4635                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4636 }
4637
4638 static void
4639 bnx2_setup_msix_tbl(struct bnx2 *bp)
4640 {
4641         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4642
4643         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4644         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4645 }
4646
4647 static int
4648 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4649 {
4650         u32 val;
4651         int i, rc = 0;
4652         u8 old_port;
4653
4654         /* Wait for the current PCI transaction to complete before
4655          * issuing a reset. */
4656         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4657                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4658                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4659                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4660                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4661         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4662         udelay(5);
4663
4664         /* Wait for the firmware to tell us it is ok to issue a reset. */
4665         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4666
4667         /* Deposit a driver reset signature so the firmware knows that
4668          * this is a soft reset. */
4669         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4670                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4671
4672         /* Do a dummy read to force the chip to complete all current transaction
4673          * before we issue a reset. */
4674         val = REG_RD(bp, BNX2_MISC_ID);
4675
4676         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4677                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4678                 REG_RD(bp, BNX2_MISC_COMMAND);
4679                 udelay(5);
4680
4681                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4682                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4683
4684                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4685
4686         } else {
4687                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4688                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4689                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4690
4691                 /* Chip reset. */
4692                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4693
4694                 /* Reading back any register after chip reset will hang the
4695                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4696                  * of margin for write posting.
4697                  */
4698                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4699                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4700                         msleep(20);
4701
4702                 /* Reset takes approximate 30 usec */
4703                 for (i = 0; i < 10; i++) {
4704                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4705                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4706                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4707                                 break;
4708                         udelay(10);
4709                 }
4710
4711                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4712                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4713                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4714                         return -EBUSY;
4715                 }
4716         }
4717
4718         /* Make sure byte swapping is properly configured. */
4719         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4720         if (val != 0x01020304) {
4721                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4722                 return -ENODEV;
4723         }
4724
4725         /* Wait for the firmware to finish its initialization. */
4726         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4727         if (rc)
4728                 return rc;
4729
4730         spin_lock_bh(&bp->phy_lock);
4731         old_port = bp->phy_port;
4732         bnx2_init_fw_cap(bp);
4733         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4734             old_port != bp->phy_port)
4735                 bnx2_set_default_remote_link(bp);
4736         spin_unlock_bh(&bp->phy_lock);
4737
4738         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4739                 /* Adjust the voltage regular to two steps lower.  The default
4740                  * of this register is 0x0000000e. */
4741                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4742
4743                 /* Remove bad rbuf memory from the free pool. */
4744                 rc = bnx2_alloc_bad_rbuf(bp);
4745         }
4746
4747         if (bp->flags & BNX2_FLAG_USING_MSIX)
4748                 bnx2_setup_msix_tbl(bp);
4749
4750         return rc;
4751 }
4752
4753 static int
4754 bnx2_init_chip(struct bnx2 *bp)
4755 {
4756         u32 val, mtu;
4757         int rc, i;
4758
4759         /* Make sure the interrupt is not active. */
4760         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4761
4762         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4763               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4764 #ifdef __BIG_ENDIAN
4765               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4766 #endif
4767               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4768               DMA_READ_CHANS << 12 |
4769               DMA_WRITE_CHANS << 16;
4770
4771         val |= (0x2 << 20) | (1 << 11);
4772
4773         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4774                 val |= (1 << 23);
4775
4776         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4777             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4778                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4779
4780         REG_WR(bp, BNX2_DMA_CONFIG, val);
4781
4782         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4783                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4784                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4785                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4786         }
4787
4788         if (bp->flags & BNX2_FLAG_PCIX) {
4789                 u16 val16;
4790
4791                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4792                                      &val16);
4793                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4794                                       val16 & ~PCI_X_CMD_ERO);
4795         }
4796
4797         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4798                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4799                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4800                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4801
4802         /* Initialize context mapping and zero out the quick contexts.  The
4803          * context block must have already been enabled. */
4804         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4805                 rc = bnx2_init_5709_context(bp);
4806                 if (rc)
4807                         return rc;
4808         } else
4809                 bnx2_init_context(bp);
4810
4811         if ((rc = bnx2_init_cpus(bp)) != 0)
4812                 return rc;
4813
4814         bnx2_init_nvram(bp);
4815
4816         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4817
4818         val = REG_RD(bp, BNX2_MQ_CONFIG);
4819         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4820         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4821         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4822                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4823                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4824                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4825         }
4826
4827         REG_WR(bp, BNX2_MQ_CONFIG, val);
4828
4829         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4830         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4831         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4832
4833         val = (BCM_PAGE_BITS - 8) << 24;
4834         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4835
4836         /* Configure page size. */
4837         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4838         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4839         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4840         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4841
4842         val = bp->mac_addr[0] +
4843               (bp->mac_addr[1] << 8) +
4844               (bp->mac_addr[2] << 16) +
4845               bp->mac_addr[3] +
4846               (bp->mac_addr[4] << 8) +
4847               (bp->mac_addr[5] << 16);
4848         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4849
4850         /* Program the MTU.  Also include 4 bytes for CRC32. */
4851         mtu = bp->dev->mtu;
4852         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4853         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4854                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4855         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4856
4857         if (mtu < 1500)
4858                 mtu = 1500;
4859
4860         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4861         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4862         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4863
4864         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4865         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4866                 bp->bnx2_napi[i].last_status_idx = 0;
4867
4868         bp->idle_chk_status_idx = 0xffff;
4869
4870         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4871
4872         /* Set up how to generate a link change interrupt. */
4873         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4874
4875         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4876                (u64) bp->status_blk_mapping & 0xffffffff);
4877         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4878
4879         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4880                (u64) bp->stats_blk_mapping & 0xffffffff);
4881         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4882                (u64) bp->stats_blk_mapping >> 32);
4883
4884         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4885                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4886
4887         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4888                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4889
4890         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4891                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4892
4893         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4894
4895         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4896
4897         REG_WR(bp, BNX2_HC_COM_TICKS,
4898                (bp->com_ticks_int << 16) | bp->com_ticks);
4899
4900         REG_WR(bp, BNX2_HC_CMD_TICKS,
4901                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4902
4903         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4904                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4905         else
4906                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4907         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4908
4909         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4910                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4911         else {
4912                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4913                       BNX2_HC_CONFIG_COLLECT_STATS;
4914         }
4915
4916         if (bp->irq_nvecs > 1) {
4917                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4918                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4919
4920                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4921         }
4922
4923         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4924                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4925
4926         REG_WR(bp, BNX2_HC_CONFIG, val);
4927
4928         for (i = 1; i < bp->irq_nvecs; i++) {
4929                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4930                            BNX2_HC_SB_CONFIG_1;
4931
4932                 REG_WR(bp, base,
4933                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4934                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4935                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4936
4937                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4938                         (bp->tx_quick_cons_trip_int << 16) |
4939                          bp->tx_quick_cons_trip);
4940
4941                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4942                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4943
4944                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4945                        (bp->rx_quick_cons_trip_int << 16) |
4946                         bp->rx_quick_cons_trip);
4947
4948                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4949                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4950         }
4951
4952         /* Clear internal stats counters. */
4953         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4954
4955         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4956
4957         /* Initialize the receive filter. */
4958         bnx2_set_rx_mode(bp->dev);
4959
4960         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4961                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4962                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4963                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4964         }
4965         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4966                           1, 0);
4967
4968         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4969         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4970
4971         udelay(20);
4972
4973         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4974
4975         return rc;
4976 }
4977
4978 static void
4979 bnx2_clear_ring_states(struct bnx2 *bp)
4980 {
4981         struct bnx2_napi *bnapi;
4982         struct bnx2_tx_ring_info *txr;
4983         struct bnx2_rx_ring_info *rxr;
4984         int i;
4985
4986         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4987                 bnapi = &bp->bnx2_napi[i];
4988                 txr = &bnapi->tx_ring;
4989                 rxr = &bnapi->rx_ring;
4990
4991                 txr->tx_cons = 0;
4992                 txr->hw_tx_cons = 0;
4993                 rxr->rx_prod_bseq = 0;
4994                 rxr->rx_prod = 0;
4995                 rxr->rx_cons = 0;
4996                 rxr->rx_pg_prod = 0;
4997                 rxr->rx_pg_cons = 0;
4998         }
4999 }
5000
5001 static void
5002 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5003 {
5004         u32 val, offset0, offset1, offset2, offset3;
5005         u32 cid_addr = GET_CID_ADDR(cid);
5006
5007         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5008                 offset0 = BNX2_L2CTX_TYPE_XI;
5009                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5010                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5011                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5012         } else {
5013                 offset0 = BNX2_L2CTX_TYPE;
5014                 offset1 = BNX2_L2CTX_CMD_TYPE;
5015                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5016                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5017         }
5018         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5019         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5020
5021         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5022         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5023
5024         val = (u64) txr->tx_desc_mapping >> 32;
5025         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5026
5027         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5028         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5029 }
5030
5031 static void
5032 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5033 {
5034         struct tx_bd *txbd;
5035         u32 cid = TX_CID;
5036         struct bnx2_napi *bnapi;
5037         struct bnx2_tx_ring_info *txr;
5038
5039         bnapi = &bp->bnx2_napi[ring_num];
5040         txr = &bnapi->tx_ring;
5041
5042         if (ring_num == 0)
5043                 cid = TX_CID;
5044         else
5045                 cid = TX_TSS_CID + ring_num - 1;
5046
5047         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5048
5049         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5050
5051         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5052         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5053
5054         txr->tx_prod = 0;
5055         txr->tx_prod_bseq = 0;
5056
5057         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5058         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5059
5060         bnx2_init_tx_context(bp, cid, txr);
5061 }
5062
5063 static void
5064 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5065                      int num_rings)
5066 {
5067         int i;
5068         struct rx_bd *rxbd;
5069
5070         for (i = 0; i < num_rings; i++) {
5071                 int j;
5072
5073                 rxbd = &rx_ring[i][0];
5074                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5075                         rxbd->rx_bd_len = buf_size;
5076                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5077                 }
5078                 if (i == (num_rings - 1))
5079                         j = 0;
5080                 else
5081                         j = i + 1;
5082                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5083                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5084         }
5085 }
5086
5087 static void
5088 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5089 {
5090         int i;
5091         u16 prod, ring_prod;
5092         u32 cid, rx_cid_addr, val;
5093         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5094         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5095
5096         if (ring_num == 0)
5097                 cid = RX_CID;
5098         else
5099                 cid = RX_RSS_CID + ring_num - 1;
5100
5101         rx_cid_addr = GET_CID_ADDR(cid);
5102
5103         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5104                              bp->rx_buf_use_size, bp->rx_max_ring);
5105
5106         bnx2_init_rx_context(bp, cid);
5107
5108         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5109                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5110                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5111         }
5112
5113         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5114         if (bp->rx_pg_ring_size) {
5115                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5116                                      rxr->rx_pg_desc_mapping,
5117                                      PAGE_SIZE, bp->rx_max_pg_ring);
5118                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5119                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5120                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5121                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5122
5123                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5124                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5125
5126                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5127                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5128
5129                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5130                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5131         }
5132
5133         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5134         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5135
5136         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5137         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5138
5139         ring_prod = prod = rxr->rx_pg_prod;
5140         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5141                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5142                         break;
5143                 prod = NEXT_RX_BD(prod);
5144                 ring_prod = RX_PG_RING_IDX(prod);
5145         }
5146         rxr->rx_pg_prod = prod;
5147
5148         ring_prod = prod = rxr->rx_prod;
5149         for (i = 0; i < bp->rx_ring_size; i++) {
5150                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5151                         break;
5152                 prod = NEXT_RX_BD(prod);
5153                 ring_prod = RX_RING_IDX(prod);
5154         }
5155         rxr->rx_prod = prod;
5156
5157         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5158         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5159         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5160
5161         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5162         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5163
5164         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5165 }
5166
5167 static void
5168 bnx2_init_all_rings(struct bnx2 *bp)
5169 {
5170         int i;
5171         u32 val;
5172
5173         bnx2_clear_ring_states(bp);
5174
5175         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5176         for (i = 0; i < bp->num_tx_rings; i++)
5177                 bnx2_init_tx_ring(bp, i);
5178
5179         if (bp->num_tx_rings > 1)
5180                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5181                        (TX_TSS_CID << 7));
5182
5183         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5184         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5185
5186         for (i = 0; i < bp->num_rx_rings; i++)
5187                 bnx2_init_rx_ring(bp, i);
5188
5189         if (bp->num_rx_rings > 1) {
5190                 u32 tbl_32;
5191                 u8 *tbl = (u8 *) &tbl_32;
5192
5193                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5194                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5195
5196                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5197                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5198                         if ((i % 4) == 3)
5199                                 bnx2_reg_wr_ind(bp,
5200                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5201                                                 cpu_to_be32(tbl_32));
5202                 }
5203
5204                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5205                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5206
5207                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5208
5209         }
5210 }
5211
5212 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5213 {
5214         u32 max, num_rings = 1;
5215
5216         while (ring_size > MAX_RX_DESC_CNT) {
5217                 ring_size -= MAX_RX_DESC_CNT;
5218                 num_rings++;
5219         }
5220         /* round to next power of 2 */
5221         max = max_size;
5222         while ((max & num_rings) == 0)
5223                 max >>= 1;
5224
5225         if (num_rings != max)
5226                 max <<= 1;
5227
5228         return max;
5229 }
5230
5231 static void
5232 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5233 {
5234         u32 rx_size, rx_space, jumbo_size;
5235
5236         /* 8 for CRC and VLAN */
5237         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5238
5239         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5240                 sizeof(struct skb_shared_info);
5241
5242         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5243         bp->rx_pg_ring_size = 0;
5244         bp->rx_max_pg_ring = 0;
5245         bp->rx_max_pg_ring_idx = 0;
5246         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5247                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5248
5249                 jumbo_size = size * pages;
5250                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5251                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5252
5253                 bp->rx_pg_ring_size = jumbo_size;
5254                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5255                                                         MAX_RX_PG_RINGS);
5256                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5257                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5258                 bp->rx_copy_thresh = 0;
5259         }
5260
5261         bp->rx_buf_use_size = rx_size;
5262         /* hw alignment */
5263         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5264         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5265         bp->rx_ring_size = size;
5266         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5267         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5268 }
5269
5270 static void
5271 bnx2_free_tx_skbs(struct bnx2 *bp)
5272 {
5273         int i;
5274
5275         for (i = 0; i < bp->num_tx_rings; i++) {
5276                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5277                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5278                 int j;
5279
5280                 if (txr->tx_buf_ring == NULL)
5281                         continue;
5282
5283                 for (j = 0; j < TX_DESC_CNT; ) {
5284                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5285                         struct sk_buff *skb = tx_buf->skb;
5286
5287                         if (skb == NULL) {
5288                                 j++;
5289                                 continue;
5290                         }
5291
5292                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5293
5294                         tx_buf->skb = NULL;
5295
5296                         j += skb_shinfo(skb)->nr_frags + 1;
5297                         dev_kfree_skb(skb);
5298                 }
5299         }
5300 }
5301
5302 static void
5303 bnx2_free_rx_skbs(struct bnx2 *bp)
5304 {
5305         int i;
5306
5307         for (i = 0; i < bp->num_rx_rings; i++) {
5308                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5309                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5310                 int j;
5311
5312                 if (rxr->rx_buf_ring == NULL)
5313                         return;
5314
5315                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5316                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5317                         struct sk_buff *skb = rx_buf->skb;
5318
5319                         if (skb == NULL)
5320                                 continue;
5321
5322                         pci_unmap_single(bp->pdev,
5323                                          pci_unmap_addr(rx_buf, mapping),
5324                                          bp->rx_buf_use_size,
5325                                          PCI_DMA_FROMDEVICE);
5326
5327                         rx_buf->skb = NULL;
5328
5329                         dev_kfree_skb(skb);
5330                 }
5331                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5332                         bnx2_free_rx_page(bp, rxr, j);
5333         }
5334 }
5335
5336 static void
5337 bnx2_free_skbs(struct bnx2 *bp)
5338 {
5339         bnx2_free_tx_skbs(bp);
5340         bnx2_free_rx_skbs(bp);
5341 }
5342
5343 static int
5344 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5345 {
5346         int rc;
5347
5348         rc = bnx2_reset_chip(bp, reset_code);
5349         bnx2_free_skbs(bp);
5350         if (rc)
5351                 return rc;
5352
5353         if ((rc = bnx2_init_chip(bp)) != 0)
5354                 return rc;
5355
5356         bnx2_init_all_rings(bp);
5357         return 0;
5358 }
5359
5360 static int
5361 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5362 {
5363         int rc;
5364
5365         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5366                 return rc;
5367
5368         spin_lock_bh(&bp->phy_lock);
5369         bnx2_init_phy(bp, reset_phy);
5370         bnx2_set_link(bp);
5371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5372                 bnx2_remote_phy_event(bp);
5373         spin_unlock_bh(&bp->phy_lock);
5374         return 0;
5375 }
5376
5377 static int
5378 bnx2_shutdown_chip(struct bnx2 *bp)
5379 {
5380         u32 reset_code;
5381
5382         if (bp->flags & BNX2_FLAG_NO_WOL)
5383                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5384         else if (bp->wol)
5385                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5386         else
5387                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5388
5389         return bnx2_reset_chip(bp, reset_code);
5390 }
5391
5392 static int
5393 bnx2_test_registers(struct bnx2 *bp)
5394 {
5395         int ret;
5396         int i, is_5709;
5397         static const struct {
5398                 u16   offset;
5399                 u16   flags;
5400 #define BNX2_FL_NOT_5709        1
5401                 u32   rw_mask;
5402                 u32   ro_mask;
5403         } reg_tbl[] = {
5404                 { 0x006c, 0, 0x00000000, 0x0000003f },
5405                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5406                 { 0x0094, 0, 0x00000000, 0x00000000 },
5407
5408                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5409                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5410                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5411                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5412                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5413                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5414                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5415                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5416                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417
5418                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5419                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5420                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5421                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5422                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5423                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5424
5425                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5426                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5427                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5428
5429                 { 0x1000, 0, 0x00000000, 0x00000001 },
5430                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5431
5432                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5433                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5434                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5435                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5436                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5437                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5438                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5439                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5440                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5441                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5442
5443                 { 0x1800, 0, 0x00000000, 0x00000001 },
5444                 { 0x1804, 0, 0x00000000, 0x00000003 },
5445
5446                 { 0x2800, 0, 0x00000000, 0x00000001 },
5447                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5448                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5449                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5450                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5451                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5452                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5453                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5454                 { 0x2840, 0, 0x00000000, 0xffffffff },
5455                 { 0x2844, 0, 0x00000000, 0xffffffff },
5456                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5457                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5458
5459                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5460                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5461
5462                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5463                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5464                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5465                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5466                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5467                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5468                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5469                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5470                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5471
5472                 { 0x5004, 0, 0x00000000, 0x0000007f },
5473                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5474
5475                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5476                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5477                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5478                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5479                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5480                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5481                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5482                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5483                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5484
5485                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5486                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5487                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5488                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5489                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5490                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5491                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5492                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5493                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5494                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5495                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5496                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5497                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5498                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5499                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5500                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5501                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5502                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5503                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5504                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5505                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5506                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5507                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5508
5509                 { 0xffff, 0, 0x00000000, 0x00000000 },
5510         };
5511
5512         ret = 0;
5513         is_5709 = 0;
5514         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5515                 is_5709 = 1;
5516
5517         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5518                 u32 offset, rw_mask, ro_mask, save_val, val;
5519                 u16 flags = reg_tbl[i].flags;
5520
5521                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5522                         continue;
5523
5524                 offset = (u32) reg_tbl[i].offset;
5525                 rw_mask = reg_tbl[i].rw_mask;
5526                 ro_mask = reg_tbl[i].ro_mask;
5527
5528                 save_val = readl(bp->regview + offset);
5529
5530                 writel(0, bp->regview + offset);
5531
5532                 val = readl(bp->regview + offset);
5533                 if ((val & rw_mask) != 0) {
5534                         goto reg_test_err;
5535                 }
5536
5537                 if ((val & ro_mask) != (save_val & ro_mask)) {
5538                         goto reg_test_err;
5539                 }
5540
5541                 writel(0xffffffff, bp->regview + offset);
5542
5543                 val = readl(bp->regview + offset);
5544                 if ((val & rw_mask) != rw_mask) {
5545                         goto reg_test_err;
5546                 }
5547
5548                 if ((val & ro_mask) != (save_val & ro_mask)) {
5549                         goto reg_test_err;
5550                 }
5551
5552                 writel(save_val, bp->regview + offset);
5553                 continue;
5554
5555 reg_test_err:
5556                 writel(save_val, bp->regview + offset);
5557                 ret = -ENODEV;
5558                 break;
5559         }
5560         return ret;
5561 }
5562
5563 static int
5564 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5565 {
5566         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5567                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5568         int i;
5569
5570         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5571                 u32 offset;
5572
5573                 for (offset = 0; offset < size; offset += 4) {
5574
5575                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5576
5577                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5578                                 test_pattern[i]) {
5579                                 return -ENODEV;
5580                         }
5581                 }
5582         }
5583         return 0;
5584 }
5585
5586 static int
5587 bnx2_test_memory(struct bnx2 *bp)
5588 {
5589         int ret = 0;
5590         int i;
5591         static struct mem_entry {
5592                 u32   offset;
5593                 u32   len;
5594         } mem_tbl_5706[] = {
5595                 { 0x60000,  0x4000 },
5596                 { 0xa0000,  0x3000 },
5597                 { 0xe0000,  0x4000 },
5598                 { 0x120000, 0x4000 },
5599                 { 0x1a0000, 0x4000 },
5600                 { 0x160000, 0x4000 },
5601                 { 0xffffffff, 0    },
5602         },
5603         mem_tbl_5709[] = {
5604                 { 0x60000,  0x4000 },
5605                 { 0xa0000,  0x3000 },
5606                 { 0xe0000,  0x4000 },
5607                 { 0x120000, 0x4000 },
5608                 { 0x1a0000, 0x4000 },
5609                 { 0xffffffff, 0    },
5610         };
5611         struct mem_entry *mem_tbl;
5612
5613         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5614                 mem_tbl = mem_tbl_5709;
5615         else
5616                 mem_tbl = mem_tbl_5706;
5617
5618         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5619                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5620                         mem_tbl[i].len)) != 0) {
5621                         return ret;
5622                 }
5623         }
5624
5625         return ret;
5626 }
5627
5628 #define BNX2_MAC_LOOPBACK       0
5629 #define BNX2_PHY_LOOPBACK       1
5630
5631 static int
5632 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5633 {
5634         unsigned int pkt_size, num_pkts, i;
5635         struct sk_buff *skb, *rx_skb;
5636         unsigned char *packet;
5637         u16 rx_start_idx, rx_idx;
5638         dma_addr_t map;
5639         struct tx_bd *txbd;
5640         struct sw_bd *rx_buf;
5641         struct l2_fhdr *rx_hdr;
5642         int ret = -ENODEV;
5643         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5644         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5645         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5646
5647         tx_napi = bnapi;
5648
5649         txr = &tx_napi->tx_ring;
5650         rxr = &bnapi->rx_ring;
5651         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5652                 bp->loopback = MAC_LOOPBACK;
5653                 bnx2_set_mac_loopback(bp);
5654         }
5655         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5656                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5657                         return 0;
5658
5659                 bp->loopback = PHY_LOOPBACK;
5660                 bnx2_set_phy_loopback(bp);
5661         }
5662         else
5663                 return -EINVAL;
5664
5665         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5666         skb = netdev_alloc_skb(bp->dev, pkt_size);
5667         if (!skb)
5668                 return -ENOMEM;
5669         packet = skb_put(skb, pkt_size);
5670         memcpy(packet, bp->dev->dev_addr, 6);
5671         memset(packet + 6, 0x0, 8);
5672         for (i = 14; i < pkt_size; i++)
5673                 packet[i] = (unsigned char) (i & 0xff);
5674
5675         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5676                 dev_kfree_skb(skb);
5677                 return -EIO;
5678         }
5679         map = skb_shinfo(skb)->dma_head;
5680
5681         REG_WR(bp, BNX2_HC_COMMAND,
5682                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5683
5684         REG_RD(bp, BNX2_HC_COMMAND);
5685
5686         udelay(5);
5687         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5688
5689         num_pkts = 0;
5690
5691         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5692
5693         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5694         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5695         txbd->tx_bd_mss_nbytes = pkt_size;
5696         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5697
5698         num_pkts++;
5699         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5700         txr->tx_prod_bseq += pkt_size;
5701
5702         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5703         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5704
5705         udelay(100);
5706
5707         REG_WR(bp, BNX2_HC_COMMAND,
5708                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5709
5710         REG_RD(bp, BNX2_HC_COMMAND);
5711
5712         udelay(5);
5713
5714         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5715         dev_kfree_skb(skb);
5716
5717         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5718                 goto loopback_test_done;
5719
5720         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5721         if (rx_idx != rx_start_idx + num_pkts) {
5722                 goto loopback_test_done;
5723         }
5724
5725         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5726         rx_skb = rx_buf->skb;
5727
5728         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5729         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5730
5731         pci_dma_sync_single_for_cpu(bp->pdev,
5732                 pci_unmap_addr(rx_buf, mapping),
5733                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5734
5735         if (rx_hdr->l2_fhdr_status &
5736                 (L2_FHDR_ERRORS_BAD_CRC |
5737                 L2_FHDR_ERRORS_PHY_DECODE |
5738                 L2_FHDR_ERRORS_ALIGNMENT |
5739                 L2_FHDR_ERRORS_TOO_SHORT |
5740                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5741
5742                 goto loopback_test_done;
5743         }
5744
5745         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5746                 goto loopback_test_done;
5747         }
5748
5749         for (i = 14; i < pkt_size; i++) {
5750                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5751                         goto loopback_test_done;
5752                 }
5753         }
5754
5755         ret = 0;
5756
5757 loopback_test_done:
5758         bp->loopback = 0;
5759         return ret;
5760 }
5761
5762 #define BNX2_MAC_LOOPBACK_FAILED        1
5763 #define BNX2_PHY_LOOPBACK_FAILED        2
5764 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5765                                          BNX2_PHY_LOOPBACK_FAILED)
5766
5767 static int
5768 bnx2_test_loopback(struct bnx2 *bp)
5769 {
5770         int rc = 0;
5771
5772         if (!netif_running(bp->dev))
5773                 return BNX2_LOOPBACK_FAILED;
5774
5775         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5776         spin_lock_bh(&bp->phy_lock);
5777         bnx2_init_phy(bp, 1);
5778         spin_unlock_bh(&bp->phy_lock);
5779         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5780                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5781         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5782                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5783         return rc;
5784 }
5785
5786 #define NVRAM_SIZE 0x200
5787 #define CRC32_RESIDUAL 0xdebb20e3
5788
5789 static int
5790 bnx2_test_nvram(struct bnx2 *bp)
5791 {
5792         __be32 buf[NVRAM_SIZE / 4];
5793         u8 *data = (u8 *) buf;
5794         int rc = 0;
5795         u32 magic, csum;
5796
5797         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5798                 goto test_nvram_done;
5799
5800         magic = be32_to_cpu(buf[0]);
5801         if (magic != 0x669955aa) {
5802                 rc = -ENODEV;
5803                 goto test_nvram_done;
5804         }
5805
5806         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5807                 goto test_nvram_done;
5808
5809         csum = ether_crc_le(0x100, data);
5810         if (csum != CRC32_RESIDUAL) {
5811                 rc = -ENODEV;
5812                 goto test_nvram_done;
5813         }
5814
5815         csum = ether_crc_le(0x100, data + 0x100);
5816         if (csum != CRC32_RESIDUAL) {
5817                 rc = -ENODEV;
5818         }
5819
5820 test_nvram_done:
5821         return rc;
5822 }
5823
5824 static int
5825 bnx2_test_link(struct bnx2 *bp)
5826 {
5827         u32 bmsr;
5828
5829         if (!netif_running(bp->dev))
5830                 return -ENODEV;
5831
5832         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5833                 if (bp->link_up)
5834                         return 0;
5835                 return -ENODEV;
5836         }
5837         spin_lock_bh(&bp->phy_lock);
5838         bnx2_enable_bmsr1(bp);
5839         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5840         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5841         bnx2_disable_bmsr1(bp);
5842         spin_unlock_bh(&bp->phy_lock);
5843
5844         if (bmsr & BMSR_LSTATUS) {
5845                 return 0;
5846         }
5847         return -ENODEV;
5848 }
5849
5850 static int
5851 bnx2_test_intr(struct bnx2 *bp)
5852 {
5853         int i;
5854         u16 status_idx;
5855
5856         if (!netif_running(bp->dev))
5857                 return -ENODEV;
5858
5859         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5860
5861         /* This register is not touched during run-time. */
5862         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5863         REG_RD(bp, BNX2_HC_COMMAND);
5864
5865         for (i = 0; i < 10; i++) {
5866                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5867                         status_idx) {
5868
5869                         break;
5870                 }
5871
5872                 msleep_interruptible(10);
5873         }
5874         if (i < 10)
5875                 return 0;
5876
5877         return -ENODEV;
5878 }
5879
5880 /* Determining link for parallel detection. */
5881 static int
5882 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5883 {
5884         u32 mode_ctl, an_dbg, exp;
5885
5886         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5887                 return 0;
5888
5889         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5890         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5891
5892         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5893                 return 0;
5894
5895         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5896         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5897         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5898
5899         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5900                 return 0;
5901
5902         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5903         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5904         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5905
5906         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5907                 return 0;
5908
5909         return 1;
5910 }
5911
5912 static void
5913 bnx2_5706_serdes_timer(struct bnx2 *bp)
5914 {
5915         int check_link = 1;
5916
5917         spin_lock(&bp->phy_lock);
5918         if (bp->serdes_an_pending) {
5919                 bp->serdes_an_pending--;
5920                 check_link = 0;
5921         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5922                 u32 bmcr;
5923
5924                 bp->current_interval = BNX2_TIMER_INTERVAL;
5925
5926                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5927
5928                 if (bmcr & BMCR_ANENABLE) {
5929                         if (bnx2_5706_serdes_has_link(bp)) {
5930                                 bmcr &= ~BMCR_ANENABLE;
5931                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5932                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5933                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5934                         }
5935                 }
5936         }
5937         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5938                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5939                 u32 phy2;
5940
5941                 bnx2_write_phy(bp, 0x17, 0x0f01);
5942                 bnx2_read_phy(bp, 0x15, &phy2);
5943                 if (phy2 & 0x20) {
5944                         u32 bmcr;
5945
5946                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5947                         bmcr |= BMCR_ANENABLE;
5948                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5949
5950                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5951                 }
5952         } else
5953                 bp->current_interval = BNX2_TIMER_INTERVAL;
5954
5955         if (check_link) {
5956                 u32 val;
5957
5958                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5959                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5960                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5961
5962                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5963                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5964                                 bnx2_5706s_force_link_dn(bp, 1);
5965                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5966                         } else
5967                                 bnx2_set_link(bp);
5968                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5969                         bnx2_set_link(bp);
5970         }
5971         spin_unlock(&bp->phy_lock);
5972 }
5973
5974 static void
5975 bnx2_5708_serdes_timer(struct bnx2 *bp)
5976 {
5977         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5978                 return;
5979
5980         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5981                 bp->serdes_an_pending = 0;
5982                 return;
5983         }
5984
5985         spin_lock(&bp->phy_lock);
5986         if (bp->serdes_an_pending)
5987                 bp->serdes_an_pending--;
5988         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5989                 u32 bmcr;
5990
5991                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5992                 if (bmcr & BMCR_ANENABLE) {
5993                         bnx2_enable_forced_2g5(bp);
5994                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5995                 } else {
5996                         bnx2_disable_forced_2g5(bp);
5997                         bp->serdes_an_pending = 2;
5998                         bp->current_interval = BNX2_TIMER_INTERVAL;
5999                 }
6000
6001         } else
6002                 bp->current_interval = BNX2_TIMER_INTERVAL;
6003
6004         spin_unlock(&bp->phy_lock);
6005 }
6006
6007 static void
6008 bnx2_timer(unsigned long data)
6009 {
6010         struct bnx2 *bp = (struct bnx2 *) data;
6011
6012         if (!netif_running(bp->dev))
6013                 return;
6014
6015         if (atomic_read(&bp->intr_sem) != 0)
6016                 goto bnx2_restart_timer;
6017
6018         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6019              BNX2_FLAG_USING_MSI)
6020                 bnx2_chk_missed_msi(bp);
6021
6022         bnx2_send_heart_beat(bp);
6023
6024         bp->stats_blk->stat_FwRxDrop =
6025                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6026
6027         /* workaround occasional corrupted counters */
6028         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6029                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6030                                             BNX2_HC_COMMAND_STATS_NOW);
6031
6032         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6033                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6034                         bnx2_5706_serdes_timer(bp);
6035                 else
6036                         bnx2_5708_serdes_timer(bp);
6037         }
6038
6039 bnx2_restart_timer:
6040         mod_timer(&bp->timer, jiffies + bp->current_interval);
6041 }
6042
6043 static int
6044 bnx2_request_irq(struct bnx2 *bp)
6045 {
6046         unsigned long flags;
6047         struct bnx2_irq *irq;
6048         int rc = 0, i;
6049
6050         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6051                 flags = 0;
6052         else
6053                 flags = IRQF_SHARED;
6054
6055         for (i = 0; i < bp->irq_nvecs; i++) {
6056                 irq = &bp->irq_tbl[i];
6057                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6058                                  &bp->bnx2_napi[i]);
6059                 if (rc)
6060                         break;
6061                 irq->requested = 1;
6062         }
6063         return rc;
6064 }
6065
6066 static void
6067 bnx2_free_irq(struct bnx2 *bp)
6068 {
6069         struct bnx2_irq *irq;
6070         int i;
6071
6072         for (i = 0; i < bp->irq_nvecs; i++) {
6073                 irq = &bp->irq_tbl[i];
6074                 if (irq->requested)
6075                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6076                 irq->requested = 0;
6077         }
6078         if (bp->flags & BNX2_FLAG_USING_MSI)
6079                 pci_disable_msi(bp->pdev);
6080         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6081                 pci_disable_msix(bp->pdev);
6082
6083         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6084 }
6085
6086 static void
6087 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6088 {
6089         int i, rc;
6090         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6091         struct net_device *dev = bp->dev;
6092         const int len = sizeof(bp->irq_tbl[0].name);
6093
6094         bnx2_setup_msix_tbl(bp);
6095         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6096         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6097         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6098
6099         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6100                 msix_ent[i].entry = i;
6101                 msix_ent[i].vector = 0;
6102         }
6103
6104         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6105         if (rc != 0)
6106                 return;
6107
6108         bp->irq_nvecs = msix_vecs;
6109         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6110         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6111                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6112                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6113                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6114         }
6115 }
6116
6117 static void
6118 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6119 {
6120         int cpus = num_online_cpus();
6121         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6122
6123         bp->irq_tbl[0].handler = bnx2_interrupt;
6124         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6125         bp->irq_nvecs = 1;
6126         bp->irq_tbl[0].vector = bp->pdev->irq;
6127
6128         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6129                 bnx2_enable_msix(bp, msix_vecs);
6130
6131         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6132             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6133                 if (pci_enable_msi(bp->pdev) == 0) {
6134                         bp->flags |= BNX2_FLAG_USING_MSI;
6135                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6136                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6137                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6138                         } else
6139                                 bp->irq_tbl[0].handler = bnx2_msi;
6140
6141                         bp->irq_tbl[0].vector = bp->pdev->irq;
6142                 }
6143         }
6144
6145         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6146         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6147
6148         bp->num_rx_rings = bp->irq_nvecs;
6149 }
6150
6151 /* Called with rtnl_lock */
6152 static int
6153 bnx2_open(struct net_device *dev)
6154 {
6155         struct bnx2 *bp = netdev_priv(dev);
6156         int rc;
6157
6158         netif_carrier_off(dev);
6159
6160         bnx2_set_power_state(bp, PCI_D0);
6161         bnx2_disable_int(bp);
6162
6163         bnx2_setup_int_mode(bp, disable_msi);
6164         bnx2_napi_enable(bp);
6165         rc = bnx2_alloc_mem(bp);
6166         if (rc)
6167                 goto open_err;
6168
6169         rc = bnx2_request_irq(bp);
6170         if (rc)
6171                 goto open_err;
6172
6173         rc = bnx2_init_nic(bp, 1);
6174         if (rc)
6175                 goto open_err;
6176
6177         mod_timer(&bp->timer, jiffies + bp->current_interval);
6178
6179         atomic_set(&bp->intr_sem, 0);
6180
6181         bnx2_enable_int(bp);
6182
6183         if (bp->flags & BNX2_FLAG_USING_MSI) {
6184                 /* Test MSI to make sure it is working
6185                  * If MSI test fails, go back to INTx mode
6186                  */
6187                 if (bnx2_test_intr(bp) != 0) {
6188                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6189                                " using MSI, switching to INTx mode. Please"
6190                                " report this failure to the PCI maintainer"
6191                                " and include system chipset information.\n",
6192                                bp->dev->name);
6193
6194                         bnx2_disable_int(bp);
6195                         bnx2_free_irq(bp);
6196
6197                         bnx2_setup_int_mode(bp, 1);
6198
6199                         rc = bnx2_init_nic(bp, 0);
6200
6201                         if (!rc)
6202                                 rc = bnx2_request_irq(bp);
6203
6204                         if (rc) {
6205                                 del_timer_sync(&bp->timer);
6206                                 goto open_err;
6207                         }
6208                         bnx2_enable_int(bp);
6209                 }
6210         }
6211         if (bp->flags & BNX2_FLAG_USING_MSI)
6212                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6213         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6214                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6215
6216         netif_tx_start_all_queues(dev);
6217
6218         return 0;
6219
6220 open_err:
6221         bnx2_napi_disable(bp);
6222         bnx2_free_skbs(bp);
6223         bnx2_free_irq(bp);
6224         bnx2_free_mem(bp);
6225         return rc;
6226 }
6227
6228 static void
6229 bnx2_reset_task(struct work_struct *work)
6230 {
6231         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6232
6233         if (!netif_running(bp->dev))
6234                 return;
6235
6236         bnx2_netif_stop(bp);
6237
6238         bnx2_init_nic(bp, 1);
6239
6240         atomic_set(&bp->intr_sem, 1);
6241         bnx2_netif_start(bp);
6242 }
6243
6244 static void
6245 bnx2_tx_timeout(struct net_device *dev)
6246 {
6247         struct bnx2 *bp = netdev_priv(dev);
6248
6249         /* This allows the netif to be shutdown gracefully before resetting */
6250         schedule_work(&bp->reset_task);
6251 }
6252
6253 #ifdef BCM_VLAN
6254 /* Called with rtnl_lock */
6255 static void
6256 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6257 {
6258         struct bnx2 *bp = netdev_priv(dev);
6259
6260         if (netif_running(dev))
6261                 bnx2_netif_stop(bp);
6262
6263         bp->vlgrp = vlgrp;
6264
6265         if (!netif_running(dev))
6266                 return;
6267
6268         bnx2_set_rx_mode(dev);
6269         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6270                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6271
6272         bnx2_netif_start(bp);
6273 }
6274 #endif
6275
6276 /* Called with netif_tx_lock.
6277  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6278  * netif_wake_queue().
6279  */
6280 static int
6281 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6282 {
6283         struct bnx2 *bp = netdev_priv(dev);
6284         dma_addr_t mapping;
6285         struct tx_bd *txbd;
6286         struct sw_tx_bd *tx_buf;
6287         u32 len, vlan_tag_flags, last_frag, mss;
6288         u16 prod, ring_prod;
6289         int i;
6290         struct bnx2_napi *bnapi;
6291         struct bnx2_tx_ring_info *txr;
6292         struct netdev_queue *txq;
6293         struct skb_shared_info *sp;
6294
6295         /*  Determine which tx ring we will be placed on */
6296         i = skb_get_queue_mapping(skb);
6297         bnapi = &bp->bnx2_napi[i];
6298         txr = &bnapi->tx_ring;
6299         txq = netdev_get_tx_queue(dev, i);
6300
6301         if (unlikely(bnx2_tx_avail(bp, txr) <
6302             (skb_shinfo(skb)->nr_frags + 1))) {
6303                 netif_tx_stop_queue(txq);
6304                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6305                         dev->name);
6306
6307                 return NETDEV_TX_BUSY;
6308         }
6309         len = skb_headlen(skb);
6310         prod = txr->tx_prod;
6311         ring_prod = TX_RING_IDX(prod);
6312
6313         vlan_tag_flags = 0;
6314         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6315                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6316         }
6317
6318 #ifdef BCM_VLAN
6319         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6320                 vlan_tag_flags |=
6321                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6322         }
6323 #endif
6324         if ((mss = skb_shinfo(skb)->gso_size)) {
6325                 u32 tcp_opt_len;
6326                 struct iphdr *iph;
6327
6328                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6329
6330                 tcp_opt_len = tcp_optlen(skb);
6331
6332                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6333                         u32 tcp_off = skb_transport_offset(skb) -
6334                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6335
6336                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6337                                           TX_BD_FLAGS_SW_FLAGS;
6338                         if (likely(tcp_off == 0))
6339                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6340                         else {
6341                                 tcp_off >>= 3;
6342                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6343                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6344                                                   ((tcp_off & 0x10) <<
6345                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6346                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6347                         }
6348                 } else {
6349                         iph = ip_hdr(skb);
6350                         if (tcp_opt_len || (iph->ihl > 5)) {
6351                                 vlan_tag_flags |= ((iph->ihl - 5) +
6352                                                    (tcp_opt_len >> 2)) << 8;
6353                         }
6354                 }
6355         } else
6356                 mss = 0;
6357
6358         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6359                 dev_kfree_skb(skb);
6360                 return NETDEV_TX_OK;
6361         }
6362
6363         sp = skb_shinfo(skb);
6364         mapping = sp->dma_head;
6365
6366         tx_buf = &txr->tx_buf_ring[ring_prod];
6367         tx_buf->skb = skb;
6368
6369         txbd = &txr->tx_desc_ring[ring_prod];
6370
6371         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6372         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6373         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6374         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6375
6376         last_frag = skb_shinfo(skb)->nr_frags;
6377         tx_buf->nr_frags = last_frag;
6378         tx_buf->is_gso = skb_is_gso(skb);
6379
6380         for (i = 0; i < last_frag; i++) {
6381                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6382
6383                 prod = NEXT_TX_BD(prod);
6384                 ring_prod = TX_RING_IDX(prod);
6385                 txbd = &txr->tx_desc_ring[ring_prod];
6386
6387                 len = frag->size;
6388                 mapping = sp->dma_maps[i];
6389
6390                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6391                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6392                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6393                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6394
6395         }
6396         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6397
6398         prod = NEXT_TX_BD(prod);
6399         txr->tx_prod_bseq += skb->len;
6400
6401         REG_WR16(bp, txr->tx_bidx_addr, prod);
6402         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6403
6404         mmiowb();
6405
6406         txr->tx_prod = prod;
6407
6408         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6409                 netif_tx_stop_queue(txq);
6410                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6411                         netif_tx_wake_queue(txq);
6412         }
6413
6414         return NETDEV_TX_OK;
6415 }
6416
6417 /* Called with rtnl_lock */
6418 static int
6419 bnx2_close(struct net_device *dev)
6420 {
6421         struct bnx2 *bp = netdev_priv(dev);
6422
6423         cancel_work_sync(&bp->reset_task);
6424
6425         bnx2_disable_int_sync(bp);
6426         bnx2_napi_disable(bp);
6427         del_timer_sync(&bp->timer);
6428         bnx2_shutdown_chip(bp);
6429         bnx2_free_irq(bp);
6430         bnx2_free_skbs(bp);
6431         bnx2_free_mem(bp);
6432         bp->link_up = 0;
6433         netif_carrier_off(bp->dev);
6434         bnx2_set_power_state(bp, PCI_D3hot);
6435         return 0;
6436 }
6437
6438 #define GET_NET_STATS64(ctr)                                    \
6439         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6440         (unsigned long) (ctr##_lo)
6441
6442 #define GET_NET_STATS32(ctr)            \
6443         (ctr##_lo)
6444
6445 #if (BITS_PER_LONG == 64)
6446 #define GET_NET_STATS   GET_NET_STATS64
6447 #else
6448 #define GET_NET_STATS   GET_NET_STATS32
6449 #endif
6450
6451 static struct net_device_stats *
6452 bnx2_get_stats(struct net_device *dev)
6453 {
6454         struct bnx2 *bp = netdev_priv(dev);
6455         struct statistics_block *stats_blk = bp->stats_blk;
6456         struct net_device_stats *net_stats = &dev->stats;
6457
6458         if (bp->stats_blk == NULL) {
6459                 return net_stats;
6460         }
6461         net_stats->rx_packets =
6462                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6463                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6464                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6465
6466         net_stats->tx_packets =
6467                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6468                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6469                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6470
6471         net_stats->rx_bytes =
6472                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6473
6474         net_stats->tx_bytes =
6475                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6476
6477         net_stats->multicast =
6478                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6479
6480         net_stats->collisions =
6481                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6482
6483         net_stats->rx_length_errors =
6484                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6485                 stats_blk->stat_EtherStatsOverrsizePkts);
6486
6487         net_stats->rx_over_errors =
6488                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6489                 stats_blk->stat_IfInMBUFDiscards);
6490
6491         net_stats->rx_frame_errors =
6492                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6493
6494         net_stats->rx_crc_errors =
6495                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6496
6497         net_stats->rx_errors = net_stats->rx_length_errors +
6498                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6499                 net_stats->rx_crc_errors;
6500
6501         net_stats->tx_aborted_errors =
6502                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6503                 stats_blk->stat_Dot3StatsLateCollisions);
6504
6505         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6506             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6507                 net_stats->tx_carrier_errors = 0;
6508         else {
6509                 net_stats->tx_carrier_errors =
6510                         (unsigned long)
6511                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6512         }
6513
6514         net_stats->tx_errors =
6515                 (unsigned long)
6516                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6517                 +
6518                 net_stats->tx_aborted_errors +
6519                 net_stats->tx_carrier_errors;
6520
6521         net_stats->rx_missed_errors =
6522                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6523                 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6524
6525         return net_stats;
6526 }
6527
6528 /* All ethtool functions called with rtnl_lock */
6529
6530 static int
6531 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6532 {
6533         struct bnx2 *bp = netdev_priv(dev);
6534         int support_serdes = 0, support_copper = 0;
6535
6536         cmd->supported = SUPPORTED_Autoneg;
6537         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6538                 support_serdes = 1;
6539                 support_copper = 1;
6540         } else if (bp->phy_port == PORT_FIBRE)
6541                 support_serdes = 1;
6542         else
6543                 support_copper = 1;
6544
6545         if (support_serdes) {
6546                 cmd->supported |= SUPPORTED_1000baseT_Full |
6547                         SUPPORTED_FIBRE;
6548                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6549                         cmd->supported |= SUPPORTED_2500baseX_Full;
6550
6551         }
6552         if (support_copper) {
6553                 cmd->supported |= SUPPORTED_10baseT_Half |
6554                         SUPPORTED_10baseT_Full |
6555                         SUPPORTED_100baseT_Half |
6556                         SUPPORTED_100baseT_Full |
6557                         SUPPORTED_1000baseT_Full |
6558                         SUPPORTED_TP;
6559
6560         }
6561
6562         spin_lock_bh(&bp->phy_lock);
6563         cmd->port = bp->phy_port;
6564         cmd->advertising = bp->advertising;
6565
6566         if (bp->autoneg & AUTONEG_SPEED) {
6567                 cmd->autoneg = AUTONEG_ENABLE;
6568         }
6569         else {
6570                 cmd->autoneg = AUTONEG_DISABLE;
6571         }
6572
6573         if (netif_carrier_ok(dev)) {
6574                 cmd->speed = bp->line_speed;
6575                 cmd->duplex = bp->duplex;
6576         }
6577         else {
6578                 cmd->speed = -1;
6579                 cmd->duplex = -1;
6580         }
6581         spin_unlock_bh(&bp->phy_lock);
6582
6583         cmd->transceiver = XCVR_INTERNAL;
6584         cmd->phy_address = bp->phy_addr;
6585
6586         return 0;
6587 }
6588
6589 static int
6590 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6591 {
6592         struct bnx2 *bp = netdev_priv(dev);
6593         u8 autoneg = bp->autoneg;
6594         u8 req_duplex = bp->req_duplex;
6595         u16 req_line_speed = bp->req_line_speed;
6596         u32 advertising = bp->advertising;
6597         int err = -EINVAL;
6598
6599         spin_lock_bh(&bp->phy_lock);
6600
6601         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6602                 goto err_out_unlock;
6603
6604         if (cmd->port != bp->phy_port &&
6605             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6606                 goto err_out_unlock;
6607
6608         /* If device is down, we can store the settings only if the user
6609          * is setting the currently active port.
6610          */
6611         if (!netif_running(dev) && cmd->port != bp->phy_port)
6612                 goto err_out_unlock;
6613
6614         if (cmd->autoneg == AUTONEG_ENABLE) {
6615                 autoneg |= AUTONEG_SPEED;
6616
6617                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6618
6619                 /* allow advertising 1 speed */
6620                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6621                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6622                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6623                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6624
6625                         if (cmd->port == PORT_FIBRE)
6626                                 goto err_out_unlock;
6627
6628                         advertising = cmd->advertising;
6629
6630                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6631                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6632                             (cmd->port == PORT_TP))
6633                                 goto err_out_unlock;
6634                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6635                         advertising = cmd->advertising;
6636                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6637                         goto err_out_unlock;
6638                 else {
6639                         if (cmd->port == PORT_FIBRE)
6640                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6641                         else
6642                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6643                 }
6644                 advertising |= ADVERTISED_Autoneg;
6645         }
6646         else {
6647                 if (cmd->port == PORT_FIBRE) {
6648                         if ((cmd->speed != SPEED_1000 &&
6649                              cmd->speed != SPEED_2500) ||
6650                             (cmd->duplex != DUPLEX_FULL))
6651                                 goto err_out_unlock;
6652
6653                         if (cmd->speed == SPEED_2500 &&
6654                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6655                                 goto err_out_unlock;
6656                 }
6657                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6658                         goto err_out_unlock;
6659
6660                 autoneg &= ~AUTONEG_SPEED;
6661                 req_line_speed = cmd->speed;
6662                 req_duplex = cmd->duplex;
6663                 advertising = 0;
6664         }
6665
6666         bp->autoneg = autoneg;
6667         bp->advertising = advertising;
6668         bp->req_line_speed = req_line_speed;
6669         bp->req_duplex = req_duplex;
6670
6671         err = 0;
6672         /* If device is down, the new settings will be picked up when it is
6673          * brought up.
6674          */
6675         if (netif_running(dev))
6676                 err = bnx2_setup_phy(bp, cmd->port);
6677
6678 err_out_unlock:
6679         spin_unlock_bh(&bp->phy_lock);
6680
6681         return err;
6682 }
6683
6684 static void
6685 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6686 {
6687         struct bnx2 *bp = netdev_priv(dev);
6688
6689         strcpy(info->driver, DRV_MODULE_NAME);
6690         strcpy(info->version, DRV_MODULE_VERSION);
6691         strcpy(info->bus_info, pci_name(bp->pdev));
6692         strcpy(info->fw_version, bp->fw_version);
6693 }
6694
6695 #define BNX2_REGDUMP_LEN                (32 * 1024)
6696
6697 static int
6698 bnx2_get_regs_len(struct net_device *dev)
6699 {
6700         return BNX2_REGDUMP_LEN;
6701 }
6702
6703 static void
6704 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6705 {
6706         u32 *p = _p, i, offset;
6707         u8 *orig_p = _p;
6708         struct bnx2 *bp = netdev_priv(dev);
6709         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6710                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6711                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6712                                  0x1040, 0x1048, 0x1080, 0x10a4,
6713                                  0x1400, 0x1490, 0x1498, 0x14f0,
6714                                  0x1500, 0x155c, 0x1580, 0x15dc,
6715                                  0x1600, 0x1658, 0x1680, 0x16d8,
6716                                  0x1800, 0x1820, 0x1840, 0x1854,
6717                                  0x1880, 0x1894, 0x1900, 0x1984,
6718                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6719                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6720                                  0x2000, 0x2030, 0x23c0, 0x2400,
6721                                  0x2800, 0x2820, 0x2830, 0x2850,
6722                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6723                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6724                                  0x4080, 0x4090, 0x43c0, 0x4458,
6725                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6726                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6727                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6728                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6729                                  0x6800, 0x6848, 0x684c, 0x6860,
6730                                  0x6888, 0x6910, 0x8000 };
6731
6732         regs->version = 0;
6733
6734         memset(p, 0, BNX2_REGDUMP_LEN);
6735
6736         if (!netif_running(bp->dev))
6737                 return;
6738
6739         i = 0;
6740         offset = reg_boundaries[0];
6741         p += offset;
6742         while (offset < BNX2_REGDUMP_LEN) {
6743                 *p++ = REG_RD(bp, offset);
6744                 offset += 4;
6745                 if (offset == reg_boundaries[i + 1]) {
6746                         offset = reg_boundaries[i + 2];
6747                         p = (u32 *) (orig_p + offset);
6748                         i += 2;
6749                 }
6750         }
6751 }
6752
6753 static void
6754 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6755 {
6756         struct bnx2 *bp = netdev_priv(dev);
6757
6758         if (bp->flags & BNX2_FLAG_NO_WOL) {
6759                 wol->supported = 0;
6760                 wol->wolopts = 0;
6761         }
6762         else {
6763                 wol->supported = WAKE_MAGIC;
6764                 if (bp->wol)
6765                         wol->wolopts = WAKE_MAGIC;
6766                 else
6767                         wol->wolopts = 0;
6768         }
6769         memset(&wol->sopass, 0, sizeof(wol->sopass));
6770 }
6771
6772 static int
6773 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6774 {
6775         struct bnx2 *bp = netdev_priv(dev);
6776
6777         if (wol->wolopts & ~WAKE_MAGIC)
6778                 return -EINVAL;
6779
6780         if (wol->wolopts & WAKE_MAGIC) {
6781                 if (bp->flags & BNX2_FLAG_NO_WOL)
6782                         return -EINVAL;
6783
6784                 bp->wol = 1;
6785         }
6786         else {
6787                 bp->wol = 0;
6788         }
6789         return 0;
6790 }
6791
6792 static int
6793 bnx2_nway_reset(struct net_device *dev)
6794 {
6795         struct bnx2 *bp = netdev_priv(dev);
6796         u32 bmcr;
6797
6798         if (!netif_running(dev))
6799                 return -EAGAIN;
6800
6801         if (!(bp->autoneg & AUTONEG_SPEED)) {
6802                 return -EINVAL;
6803         }
6804
6805         spin_lock_bh(&bp->phy_lock);
6806
6807         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6808                 int rc;
6809
6810                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6811                 spin_unlock_bh(&bp->phy_lock);
6812                 return rc;
6813         }
6814
6815         /* Force a link down visible on the other side */
6816         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6817                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6818                 spin_unlock_bh(&bp->phy_lock);
6819
6820                 msleep(20);
6821
6822                 spin_lock_bh(&bp->phy_lock);
6823
6824                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6825                 bp->serdes_an_pending = 1;
6826                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6827         }
6828
6829         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6830         bmcr &= ~BMCR_LOOPBACK;
6831         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6832
6833         spin_unlock_bh(&bp->phy_lock);
6834
6835         return 0;
6836 }
6837
6838 static u32
6839 bnx2_get_link(struct net_device *dev)
6840 {
6841         struct bnx2 *bp = netdev_priv(dev);
6842
6843         return bp->link_up;
6844 }
6845
6846 static int
6847 bnx2_get_eeprom_len(struct net_device *dev)
6848 {
6849         struct bnx2 *bp = netdev_priv(dev);
6850
6851         if (bp->flash_info == NULL)
6852                 return 0;
6853
6854         return (int) bp->flash_size;
6855 }
6856
6857 static int
6858 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6859                 u8 *eebuf)
6860 {
6861         struct bnx2 *bp = netdev_priv(dev);
6862         int rc;
6863
6864         if (!netif_running(dev))
6865                 return -EAGAIN;
6866
6867         /* parameters already validated in ethtool_get_eeprom */
6868
6869         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6870
6871         return rc;
6872 }
6873
6874 static int
6875 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6876                 u8 *eebuf)
6877 {
6878         struct bnx2 *bp = netdev_priv(dev);
6879         int rc;
6880
6881         if (!netif_running(dev))
6882                 return -EAGAIN;
6883
6884         /* parameters already validated in ethtool_set_eeprom */
6885
6886         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6887
6888         return rc;
6889 }
6890
6891 static int
6892 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6893 {
6894         struct bnx2 *bp = netdev_priv(dev);
6895
6896         memset(coal, 0, sizeof(struct ethtool_coalesce));
6897
6898         coal->rx_coalesce_usecs = bp->rx_ticks;
6899         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6900         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6901         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6902
6903         coal->tx_coalesce_usecs = bp->tx_ticks;
6904         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6905         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6906         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6907
6908         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6909
6910         return 0;
6911 }
6912
6913 static int
6914 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6915 {
6916         struct bnx2 *bp = netdev_priv(dev);
6917
6918         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6919         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6920
6921         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6922         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6923
6924         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6925         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6926
6927         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6928         if (bp->rx_quick_cons_trip_int > 0xff)
6929                 bp->rx_quick_cons_trip_int = 0xff;
6930
6931         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6932         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6933
6934         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6935         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6936
6937         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6938         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6939
6940         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6941         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6942                 0xff;
6943
6944         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6945         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
6946                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6947                         bp->stats_ticks = USEC_PER_SEC;
6948         }
6949         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6950                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6951         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6952
6953         if (netif_running(bp->dev)) {
6954                 bnx2_netif_stop(bp);
6955                 bnx2_init_nic(bp, 0);
6956                 bnx2_netif_start(bp);
6957         }
6958
6959         return 0;
6960 }
6961
6962 static void
6963 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6964 {
6965         struct bnx2 *bp = netdev_priv(dev);
6966
6967         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6968         ering->rx_mini_max_pending = 0;
6969         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6970
6971         ering->rx_pending = bp->rx_ring_size;
6972         ering->rx_mini_pending = 0;
6973         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6974
6975         ering->tx_max_pending = MAX_TX_DESC_CNT;
6976         ering->tx_pending = bp->tx_ring_size;
6977 }
6978
6979 static int
6980 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6981 {
6982         if (netif_running(bp->dev)) {
6983                 bnx2_netif_stop(bp);
6984                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6985                 bnx2_free_skbs(bp);
6986                 bnx2_free_mem(bp);
6987         }
6988
6989         bnx2_set_rx_ring_size(bp, rx);
6990         bp->tx_ring_size = tx;
6991
6992         if (netif_running(bp->dev)) {
6993                 int rc;
6994
6995                 rc = bnx2_alloc_mem(bp);
6996                 if (!rc)
6997                         rc = bnx2_init_nic(bp, 0);
6998
6999                 if (rc) {
7000                         bnx2_napi_enable(bp);
7001                         dev_close(bp->dev);
7002                         return rc;
7003                 }
7004                 bnx2_netif_start(bp);
7005         }
7006         return 0;
7007 }
7008
7009 static int
7010 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7011 {
7012         struct bnx2 *bp = netdev_priv(dev);
7013         int rc;
7014
7015         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7016                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7017                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7018
7019                 return -EINVAL;
7020         }
7021         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7022         return rc;
7023 }
7024
7025 static void
7026 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7027 {
7028         struct bnx2 *bp = netdev_priv(dev);
7029
7030         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7031         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7032         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7033 }
7034
7035 static int
7036 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7037 {
7038         struct bnx2 *bp = netdev_priv(dev);
7039
7040         bp->req_flow_ctrl = 0;
7041         if (epause->rx_pause)
7042                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7043         if (epause->tx_pause)
7044                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7045
7046         if (epause->autoneg) {
7047                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7048         }
7049         else {
7050                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7051         }
7052
7053         if (netif_running(dev)) {
7054                 spin_lock_bh(&bp->phy_lock);
7055                 bnx2_setup_phy(bp, bp->phy_port);
7056                 spin_unlock_bh(&bp->phy_lock);
7057         }
7058
7059         return 0;
7060 }
7061
7062 static u32
7063 bnx2_get_rx_csum(struct net_device *dev)
7064 {
7065         struct bnx2 *bp = netdev_priv(dev);
7066
7067         return bp->rx_csum;
7068 }
7069
7070 static int
7071 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7072 {
7073         struct bnx2 *bp = netdev_priv(dev);
7074
7075         bp->rx_csum = data;
7076         return 0;
7077 }
7078
7079 static int
7080 bnx2_set_tso(struct net_device *dev, u32 data)
7081 {
7082         struct bnx2 *bp = netdev_priv(dev);
7083
7084         if (data) {
7085                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7086                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7087                         dev->features |= NETIF_F_TSO6;
7088         } else
7089                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7090                                    NETIF_F_TSO_ECN);
7091         return 0;
7092 }
7093
7094 static struct {
7095         char string[ETH_GSTRING_LEN];
7096 } bnx2_stats_str_arr[] = {
7097         { "rx_bytes" },
7098         { "rx_error_bytes" },
7099         { "tx_bytes" },
7100         { "tx_error_bytes" },
7101         { "rx_ucast_packets" },
7102         { "rx_mcast_packets" },
7103         { "rx_bcast_packets" },
7104         { "tx_ucast_packets" },
7105         { "tx_mcast_packets" },
7106         { "tx_bcast_packets" },
7107         { "tx_mac_errors" },
7108         { "tx_carrier_errors" },
7109         { "rx_crc_errors" },
7110         { "rx_align_errors" },
7111         { "tx_single_collisions" },
7112         { "tx_multi_collisions" },
7113         { "tx_deferred" },
7114         { "tx_excess_collisions" },
7115         { "tx_late_collisions" },
7116         { "tx_total_collisions" },
7117         { "rx_fragments" },
7118         { "rx_jabbers" },
7119         { "rx_undersize_packets" },
7120         { "rx_oversize_packets" },
7121         { "rx_64_byte_packets" },
7122         { "rx_65_to_127_byte_packets" },
7123         { "rx_128_to_255_byte_packets" },
7124         { "rx_256_to_511_byte_packets" },
7125         { "rx_512_to_1023_byte_packets" },
7126         { "rx_1024_to_1522_byte_packets" },
7127         { "rx_1523_to_9022_byte_packets" },
7128         { "tx_64_byte_packets" },
7129         { "tx_65_to_127_byte_packets" },
7130         { "tx_128_to_255_byte_packets" },
7131         { "tx_256_to_511_byte_packets" },
7132         { "tx_512_to_1023_byte_packets" },
7133         { "tx_1024_to_1522_byte_packets" },
7134         { "tx_1523_to_9022_byte_packets" },
7135         { "rx_xon_frames" },
7136         { "rx_xoff_frames" },
7137         { "tx_xon_frames" },
7138         { "tx_xoff_frames" },
7139         { "rx_mac_ctrl_frames" },
7140         { "rx_filtered_packets" },
7141         { "rx_ftq_discards" },
7142         { "rx_discards" },
7143         { "rx_fw_discards" },
7144 };
7145
7146 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7147                         sizeof(bnx2_stats_str_arr[0]))
7148
7149 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7150
7151 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7152     STATS_OFFSET32(stat_IfHCInOctets_hi),
7153     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7154     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7155     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7156     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7157     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7158     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7159     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7160     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7161     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7162     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7163     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7164     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7165     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7166     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7167     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7168     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7169     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7170     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7171     STATS_OFFSET32(stat_EtherStatsCollisions),
7172     STATS_OFFSET32(stat_EtherStatsFragments),
7173     STATS_OFFSET32(stat_EtherStatsJabbers),
7174     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7175     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7176     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7177     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7178     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7179     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7180     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7181     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7182     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7183     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7184     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7185     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7186     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7187     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7188     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7189     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7190     STATS_OFFSET32(stat_XonPauseFramesReceived),
7191     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7192     STATS_OFFSET32(stat_OutXonSent),
7193     STATS_OFFSET32(stat_OutXoffSent),
7194     STATS_OFFSET32(stat_MacControlFramesReceived),
7195     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7196     STATS_OFFSET32(stat_IfInFTQDiscards),
7197     STATS_OFFSET32(stat_IfInMBUFDiscards),
7198     STATS_OFFSET32(stat_FwRxDrop),
7199 };
7200
7201 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7202  * skipped because of errata.
7203  */
7204 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7205         8,0,8,8,8,8,8,8,8,8,
7206         4,0,4,4,4,4,4,4,4,4,
7207         4,4,4,4,4,4,4,4,4,4,
7208         4,4,4,4,4,4,4,4,4,4,
7209         4,4,4,4,4,4,4,
7210 };
7211
7212 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7213         8,0,8,8,8,8,8,8,8,8,
7214         4,4,4,4,4,4,4,4,4,4,
7215         4,4,4,4,4,4,4,4,4,4,
7216         4,4,4,4,4,4,4,4,4,4,
7217         4,4,4,4,4,4,4,
7218 };
7219
7220 #define BNX2_NUM_TESTS 6
7221
7222 static struct {
7223         char string[ETH_GSTRING_LEN];
7224 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7225         { "register_test (offline)" },
7226         { "memory_test (offline)" },
7227         { "loopback_test (offline)" },
7228         { "nvram_test (online)" },
7229         { "interrupt_test (online)" },
7230         { "link_test (online)" },
7231 };
7232
7233 static int
7234 bnx2_get_sset_count(struct net_device *dev, int sset)
7235 {
7236         switch (sset) {
7237         case ETH_SS_TEST:
7238                 return BNX2_NUM_TESTS;
7239         case ETH_SS_STATS:
7240                 return BNX2_NUM_STATS;
7241         default:
7242                 return -EOPNOTSUPP;
7243         }
7244 }
7245
7246 static void
7247 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7248 {
7249         struct bnx2 *bp = netdev_priv(dev);
7250
7251         bnx2_set_power_state(bp, PCI_D0);
7252
7253         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7254         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7255                 int i;
7256
7257                 bnx2_netif_stop(bp);
7258                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7259                 bnx2_free_skbs(bp);
7260
7261                 if (bnx2_test_registers(bp) != 0) {
7262                         buf[0] = 1;
7263                         etest->flags |= ETH_TEST_FL_FAILED;
7264                 }
7265                 if (bnx2_test_memory(bp) != 0) {
7266                         buf[1] = 1;
7267                         etest->flags |= ETH_TEST_FL_FAILED;
7268                 }
7269                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7270                         etest->flags |= ETH_TEST_FL_FAILED;
7271
7272                 if (!netif_running(bp->dev))
7273                         bnx2_shutdown_chip(bp);
7274                 else {
7275                         bnx2_init_nic(bp, 1);
7276                         bnx2_netif_start(bp);
7277                 }
7278
7279                 /* wait for link up */
7280                 for (i = 0; i < 7; i++) {
7281                         if (bp->link_up)
7282                                 break;
7283                         msleep_interruptible(1000);
7284                 }
7285         }
7286
7287         if (bnx2_test_nvram(bp) != 0) {
7288                 buf[3] = 1;
7289                 etest->flags |= ETH_TEST_FL_FAILED;
7290         }
7291         if (bnx2_test_intr(bp) != 0) {
7292                 buf[4] = 1;
7293                 etest->flags |= ETH_TEST_FL_FAILED;
7294         }
7295
7296         if (bnx2_test_link(bp) != 0) {
7297                 buf[5] = 1;
7298                 etest->flags |= ETH_TEST_FL_FAILED;
7299
7300         }
7301         if (!netif_running(bp->dev))
7302                 bnx2_set_power_state(bp, PCI_D3hot);
7303 }
7304
7305 static void
7306 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7307 {
7308         switch (stringset) {
7309         case ETH_SS_STATS:
7310                 memcpy(buf, bnx2_stats_str_arr,
7311                         sizeof(bnx2_stats_str_arr));
7312                 break;
7313         case ETH_SS_TEST:
7314                 memcpy(buf, bnx2_tests_str_arr,
7315                         sizeof(bnx2_tests_str_arr));
7316                 break;
7317         }
7318 }
7319
7320 static void
7321 bnx2_get_ethtool_stats(struct net_device *dev,
7322                 struct ethtool_stats *stats, u64 *buf)
7323 {
7324         struct bnx2 *bp = netdev_priv(dev);
7325         int i;
7326         u32 *hw_stats = (u32 *) bp->stats_blk;
7327         u8 *stats_len_arr = NULL;
7328
7329         if (hw_stats == NULL) {
7330                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7331                 return;
7332         }
7333
7334         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7335             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7336             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7337             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7338                 stats_len_arr = bnx2_5706_stats_len_arr;
7339         else
7340                 stats_len_arr = bnx2_5708_stats_len_arr;
7341
7342         for (i = 0; i < BNX2_NUM_STATS; i++) {
7343                 if (stats_len_arr[i] == 0) {
7344                         /* skip this counter */
7345                         buf[i] = 0;
7346                         continue;
7347                 }
7348                 if (stats_len_arr[i] == 4) {
7349                         /* 4-byte counter */
7350                         buf[i] = (u64)
7351                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7352                         continue;
7353                 }
7354                 /* 8-byte counter */
7355                 buf[i] = (((u64) *(hw_stats +
7356                                         bnx2_stats_offset_arr[i])) << 32) +
7357                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7358         }
7359 }
7360
7361 static int
7362 bnx2_phys_id(struct net_device *dev, u32 data)
7363 {
7364         struct bnx2 *bp = netdev_priv(dev);
7365         int i;
7366         u32 save;
7367
7368         bnx2_set_power_state(bp, PCI_D0);
7369
7370         if (data == 0)
7371                 data = 2;
7372
7373         save = REG_RD(bp, BNX2_MISC_CFG);
7374         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7375
7376         for (i = 0; i < (data * 2); i++) {
7377                 if ((i % 2) == 0) {
7378                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7379                 }
7380                 else {
7381                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7382                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7383                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7384                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7385                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7386                                 BNX2_EMAC_LED_TRAFFIC);
7387                 }
7388                 msleep_interruptible(500);
7389                 if (signal_pending(current))
7390                         break;
7391         }
7392         REG_WR(bp, BNX2_EMAC_LED, 0);
7393         REG_WR(bp, BNX2_MISC_CFG, save);
7394
7395         if (!netif_running(dev))
7396                 bnx2_set_power_state(bp, PCI_D3hot);
7397
7398         return 0;
7399 }
7400
7401 static int
7402 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7403 {
7404         struct bnx2 *bp = netdev_priv(dev);
7405
7406         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7407                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7408         else
7409                 return (ethtool_op_set_tx_csum(dev, data));
7410 }
7411
7412 static const struct ethtool_ops bnx2_ethtool_ops = {
7413         .get_settings           = bnx2_get_settings,
7414         .set_settings           = bnx2_set_settings,
7415         .get_drvinfo            = bnx2_get_drvinfo,
7416         .get_regs_len           = bnx2_get_regs_len,
7417         .get_regs               = bnx2_get_regs,
7418         .get_wol                = bnx2_get_wol,
7419         .set_wol                = bnx2_set_wol,
7420         .nway_reset             = bnx2_nway_reset,
7421         .get_link               = bnx2_get_link,
7422         .get_eeprom_len         = bnx2_get_eeprom_len,
7423         .get_eeprom             = bnx2_get_eeprom,
7424         .set_eeprom             = bnx2_set_eeprom,
7425         .get_coalesce           = bnx2_get_coalesce,
7426         .set_coalesce           = bnx2_set_coalesce,
7427         .get_ringparam          = bnx2_get_ringparam,
7428         .set_ringparam          = bnx2_set_ringparam,
7429         .get_pauseparam         = bnx2_get_pauseparam,
7430         .set_pauseparam         = bnx2_set_pauseparam,
7431         .get_rx_csum            = bnx2_get_rx_csum,
7432         .set_rx_csum            = bnx2_set_rx_csum,
7433         .set_tx_csum            = bnx2_set_tx_csum,
7434         .set_sg                 = ethtool_op_set_sg,
7435         .set_tso                = bnx2_set_tso,
7436         .self_test              = bnx2_self_test,
7437         .get_strings            = bnx2_get_strings,
7438         .phys_id                = bnx2_phys_id,
7439         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7440         .get_sset_count         = bnx2_get_sset_count,
7441 };
7442
7443 /* Called with rtnl_lock */
7444 static int
7445 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7446 {
7447         struct mii_ioctl_data *data = if_mii(ifr);
7448         struct bnx2 *bp = netdev_priv(dev);
7449         int err;
7450
7451         switch(cmd) {
7452         case SIOCGMIIPHY:
7453                 data->phy_id = bp->phy_addr;
7454
7455                 /* fallthru */
7456         case SIOCGMIIREG: {
7457                 u32 mii_regval;
7458
7459                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7460                         return -EOPNOTSUPP;
7461
7462                 if (!netif_running(dev))
7463                         return -EAGAIN;
7464
7465                 spin_lock_bh(&bp->phy_lock);
7466                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7467                 spin_unlock_bh(&bp->phy_lock);
7468
7469                 data->val_out = mii_regval;
7470
7471                 return err;
7472         }
7473
7474         case SIOCSMIIREG:
7475                 if (!capable(CAP_NET_ADMIN))
7476                         return -EPERM;
7477
7478                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7479                         return -EOPNOTSUPP;
7480
7481                 if (!netif_running(dev))
7482                         return -EAGAIN;
7483
7484                 spin_lock_bh(&bp->phy_lock);
7485                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7486                 spin_unlock_bh(&bp->phy_lock);
7487
7488                 return err;
7489
7490         default:
7491                 /* do nothing */
7492                 break;
7493         }
7494         return -EOPNOTSUPP;
7495 }
7496
7497 /* Called with rtnl_lock */
7498 static int
7499 bnx2_change_mac_addr(struct net_device *dev, void *p)
7500 {
7501         struct sockaddr *addr = p;
7502         struct bnx2 *bp = netdev_priv(dev);
7503
7504         if (!is_valid_ether_addr(addr->sa_data))
7505                 return -EINVAL;
7506
7507         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7508         if (netif_running(dev))
7509                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7510
7511         return 0;
7512 }
7513
7514 /* Called with rtnl_lock */
7515 static int
7516 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7517 {
7518         struct bnx2 *bp = netdev_priv(dev);
7519
7520         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7521                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7522                 return -EINVAL;
7523
7524         dev->mtu = new_mtu;
7525         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7526 }
7527
7528 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7529 static void
7530 poll_bnx2(struct net_device *dev)
7531 {
7532         struct bnx2 *bp = netdev_priv(dev);
7533         int i;
7534
7535         for (i = 0; i < bp->irq_nvecs; i++) {
7536                 disable_irq(bp->irq_tbl[i].vector);
7537                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7538                 enable_irq(bp->irq_tbl[i].vector);
7539         }
7540 }
7541 #endif
7542
7543 static void __devinit
7544 bnx2_get_5709_media(struct bnx2 *bp)
7545 {
7546         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7547         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7548         u32 strap;
7549
7550         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7551                 return;
7552         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7553                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7554                 return;
7555         }
7556
7557         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7558                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7559         else
7560                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7561
7562         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7563                 switch (strap) {
7564                 case 0x4:
7565                 case 0x5:
7566                 case 0x6:
7567                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7568                         return;
7569                 }
7570         } else {
7571                 switch (strap) {
7572                 case 0x1:
7573                 case 0x2:
7574                 case 0x4:
7575                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7576                         return;
7577                 }
7578         }
7579 }
7580
7581 static void __devinit
7582 bnx2_get_pci_speed(struct bnx2 *bp)
7583 {
7584         u32 reg;
7585
7586         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7587         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7588                 u32 clkreg;
7589
7590                 bp->flags |= BNX2_FLAG_PCIX;
7591
7592                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7593
7594                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7595                 switch (clkreg) {
7596                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7597                         bp->bus_speed_mhz = 133;
7598                         break;
7599
7600                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7601                         bp->bus_speed_mhz = 100;
7602                         break;
7603
7604                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7605                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7606                         bp->bus_speed_mhz = 66;
7607                         break;
7608
7609                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7610                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7611                         bp->bus_speed_mhz = 50;
7612                         break;
7613
7614                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7615                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7616                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7617                         bp->bus_speed_mhz = 33;
7618                         break;
7619                 }
7620         }
7621         else {
7622                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7623                         bp->bus_speed_mhz = 66;
7624                 else
7625                         bp->bus_speed_mhz = 33;
7626         }
7627
7628         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7629                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7630
7631 }
7632
7633 static int __devinit
7634 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7635 {
7636         struct bnx2 *bp;
7637         unsigned long mem_len;
7638         int rc, i, j;
7639         u32 reg;
7640         u64 dma_mask, persist_dma_mask;
7641
7642         SET_NETDEV_DEV(dev, &pdev->dev);
7643         bp = netdev_priv(dev);
7644
7645         bp->flags = 0;
7646         bp->phy_flags = 0;
7647
7648         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7649         rc = pci_enable_device(pdev);
7650         if (rc) {
7651                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7652                 goto err_out;
7653         }
7654
7655         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7656                 dev_err(&pdev->dev,
7657                         "Cannot find PCI device base address, aborting.\n");
7658                 rc = -ENODEV;
7659                 goto err_out_disable;
7660         }
7661
7662         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7663         if (rc) {
7664                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7665                 goto err_out_disable;
7666         }
7667
7668         pci_set_master(pdev);
7669         pci_save_state(pdev);
7670
7671         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7672         if (bp->pm_cap == 0) {
7673                 dev_err(&pdev->dev,
7674                         "Cannot find power management capability, aborting.\n");
7675                 rc = -EIO;
7676                 goto err_out_release;
7677         }
7678
7679         bp->dev = dev;
7680         bp->pdev = pdev;
7681
7682         spin_lock_init(&bp->phy_lock);
7683         spin_lock_init(&bp->indirect_lock);
7684         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7685
7686         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7687         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7688         dev->mem_end = dev->mem_start + mem_len;
7689         dev->irq = pdev->irq;
7690
7691         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7692
7693         if (!bp->regview) {
7694                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7695                 rc = -ENOMEM;
7696                 goto err_out_release;
7697         }
7698
7699         /* Configure byte swap and enable write to the reg_window registers.
7700          * Rely on CPU to do target byte swapping on big endian systems
7701          * The chip's target access swapping will not swap all accesses
7702          */
7703         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7704                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7705                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7706
7707         bnx2_set_power_state(bp, PCI_D0);
7708
7709         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7710
7711         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7712                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7713                         dev_err(&pdev->dev,
7714                                 "Cannot find PCIE capability, aborting.\n");
7715                         rc = -EIO;
7716                         goto err_out_unmap;
7717                 }
7718                 bp->flags |= BNX2_FLAG_PCIE;
7719                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7720                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7721         } else {
7722                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7723                 if (bp->pcix_cap == 0) {
7724                         dev_err(&pdev->dev,
7725                                 "Cannot find PCIX capability, aborting.\n");
7726                         rc = -EIO;
7727                         goto err_out_unmap;
7728                 }
7729                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7730         }
7731
7732         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7733                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7734                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7735         }
7736
7737         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7738                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7739                         bp->flags |= BNX2_FLAG_MSI_CAP;
7740         }
7741
7742         /* 5708 cannot support DMA addresses > 40-bit.  */
7743         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7744                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7745         else
7746                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7747
7748         /* Configure DMA attributes. */
7749         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7750                 dev->features |= NETIF_F_HIGHDMA;
7751                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7752                 if (rc) {
7753                         dev_err(&pdev->dev,
7754                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7755                         goto err_out_unmap;
7756                 }
7757         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7758                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7759                 goto err_out_unmap;
7760         }
7761
7762         if (!(bp->flags & BNX2_FLAG_PCIE))
7763                 bnx2_get_pci_speed(bp);
7764
7765         /* 5706A0 may falsely detect SERR and PERR. */
7766         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7767                 reg = REG_RD(bp, PCI_COMMAND);
7768                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7769                 REG_WR(bp, PCI_COMMAND, reg);
7770         }
7771         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7772                 !(bp->flags & BNX2_FLAG_PCIX)) {
7773
7774                 dev_err(&pdev->dev,
7775                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7776                 goto err_out_unmap;
7777         }
7778
7779         bnx2_init_nvram(bp);
7780
7781         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7782
7783         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7784             BNX2_SHM_HDR_SIGNATURE_SIG) {
7785                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7786
7787                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7788         } else
7789                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7790
7791         /* Get the permanent MAC address.  First we need to make sure the
7792          * firmware is actually running.
7793          */
7794         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7795
7796         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7797             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7798                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7799                 rc = -ENODEV;
7800                 goto err_out_unmap;
7801         }
7802
7803         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7804         for (i = 0, j = 0; i < 3; i++) {
7805                 u8 num, k, skip0;
7806
7807                 num = (u8) (reg >> (24 - (i * 8)));
7808                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7809                         if (num >= k || !skip0 || k == 1) {
7810                                 bp->fw_version[j++] = (num / k) + '0';
7811                                 skip0 = 0;
7812                         }
7813                 }
7814                 if (i != 2)
7815                         bp->fw_version[j++] = '.';
7816         }
7817         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7818         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7819                 bp->wol = 1;
7820
7821         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7822                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7823
7824                 for (i = 0; i < 30; i++) {
7825                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7826                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7827                                 break;
7828                         msleep(10);
7829                 }
7830         }
7831         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7832         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7833         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7834             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7835                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7836
7837                 bp->fw_version[j++] = ' ';
7838                 for (i = 0; i < 3; i++) {
7839                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7840                         reg = swab32(reg);
7841                         memcpy(&bp->fw_version[j], &reg, 4);
7842                         j += 4;
7843                 }
7844         }
7845
7846         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7847         bp->mac_addr[0] = (u8) (reg >> 8);
7848         bp->mac_addr[1] = (u8) reg;
7849
7850         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7851         bp->mac_addr[2] = (u8) (reg >> 24);
7852         bp->mac_addr[3] = (u8) (reg >> 16);
7853         bp->mac_addr[4] = (u8) (reg >> 8);
7854         bp->mac_addr[5] = (u8) reg;
7855
7856         bp->tx_ring_size = MAX_TX_DESC_CNT;
7857         bnx2_set_rx_ring_size(bp, 255);
7858
7859         bp->rx_csum = 1;
7860
7861         bp->tx_quick_cons_trip_int = 2;
7862         bp->tx_quick_cons_trip = 20;
7863         bp->tx_ticks_int = 18;
7864         bp->tx_ticks = 80;
7865
7866         bp->rx_quick_cons_trip_int = 2;
7867         bp->rx_quick_cons_trip = 12;
7868         bp->rx_ticks_int = 18;
7869         bp->rx_ticks = 18;
7870
7871         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7872
7873         bp->current_interval = BNX2_TIMER_INTERVAL;
7874
7875         bp->phy_addr = 1;
7876
7877         /* Disable WOL support if we are running on a SERDES chip. */
7878         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7879                 bnx2_get_5709_media(bp);
7880         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7881                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7882
7883         bp->phy_port = PORT_TP;
7884         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7885                 bp->phy_port = PORT_FIBRE;
7886                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7887                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7888                         bp->flags |= BNX2_FLAG_NO_WOL;
7889                         bp->wol = 0;
7890                 }
7891                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7892                         /* Don't do parallel detect on this board because of
7893                          * some board problems.  The link will not go down
7894                          * if we do parallel detect.
7895                          */
7896                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7897                             pdev->subsystem_device == 0x310c)
7898                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7899                 } else {
7900                         bp->phy_addr = 2;
7901                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7902                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7903                 }
7904         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7905                    CHIP_NUM(bp) == CHIP_NUM_5708)
7906                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7907         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7908                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7909                   CHIP_REV(bp) == CHIP_REV_Bx))
7910                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7911
7912         bnx2_init_fw_cap(bp);
7913
7914         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7915             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7916             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7917             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7918                 bp->flags |= BNX2_FLAG_NO_WOL;
7919                 bp->wol = 0;
7920         }
7921
7922         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7923                 bp->tx_quick_cons_trip_int =
7924                         bp->tx_quick_cons_trip;
7925                 bp->tx_ticks_int = bp->tx_ticks;
7926                 bp->rx_quick_cons_trip_int =
7927                         bp->rx_quick_cons_trip;
7928                 bp->rx_ticks_int = bp->rx_ticks;
7929                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7930                 bp->com_ticks_int = bp->com_ticks;
7931                 bp->cmd_ticks_int = bp->cmd_ticks;
7932         }
7933
7934         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7935          *
7936          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7937          * with byte enables disabled on the unused 32-bit word.  This is legal
7938          * but causes problems on the AMD 8132 which will eventually stop
7939          * responding after a while.
7940          *
7941          * AMD believes this incompatibility is unique to the 5706, and
7942          * prefers to locally disable MSI rather than globally disabling it.
7943          */
7944         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7945                 struct pci_dev *amd_8132 = NULL;
7946
7947                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7948                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7949                                                   amd_8132))) {
7950
7951                         if (amd_8132->revision >= 0x10 &&
7952                             amd_8132->revision <= 0x13) {
7953                                 disable_msi = 1;
7954                                 pci_dev_put(amd_8132);
7955                                 break;
7956                         }
7957                 }
7958         }
7959
7960         bnx2_set_default_link(bp);
7961         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7962
7963         init_timer(&bp->timer);
7964         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7965         bp->timer.data = (unsigned long) bp;
7966         bp->timer.function = bnx2_timer;
7967
7968         return 0;
7969
7970 err_out_unmap:
7971         if (bp->regview) {
7972                 iounmap(bp->regview);
7973                 bp->regview = NULL;
7974         }
7975
7976 err_out_release:
7977         pci_release_regions(pdev);
7978
7979 err_out_disable:
7980         pci_disable_device(pdev);
7981         pci_set_drvdata(pdev, NULL);
7982
7983 err_out:
7984         return rc;
7985 }
7986
7987 static char * __devinit
7988 bnx2_bus_string(struct bnx2 *bp, char *str)
7989 {
7990         char *s = str;
7991
7992         if (bp->flags & BNX2_FLAG_PCIE) {
7993                 s += sprintf(s, "PCI Express");
7994         } else {
7995                 s += sprintf(s, "PCI");
7996                 if (bp->flags & BNX2_FLAG_PCIX)
7997                         s += sprintf(s, "-X");
7998                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7999                         s += sprintf(s, " 32-bit");
8000                 else
8001                         s += sprintf(s, " 64-bit");
8002                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8003         }
8004         return str;
8005 }
8006
8007 static void __devinit
8008 bnx2_init_napi(struct bnx2 *bp)
8009 {
8010         int i;
8011
8012         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8013                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8014                 int (*poll)(struct napi_struct *, int);
8015
8016                 if (i == 0)
8017                         poll = bnx2_poll;
8018                 else
8019                         poll = bnx2_poll_msix;
8020
8021                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8022                 bnapi->bp = bp;
8023         }
8024 }
8025
8026 static const struct net_device_ops bnx2_netdev_ops = {
8027         .ndo_open               = bnx2_open,
8028         .ndo_start_xmit         = bnx2_start_xmit,
8029         .ndo_stop               = bnx2_close,
8030         .ndo_get_stats          = bnx2_get_stats,
8031         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8032         .ndo_do_ioctl           = bnx2_ioctl,
8033         .ndo_validate_addr      = eth_validate_addr,
8034         .ndo_set_mac_address    = bnx2_change_mac_addr,
8035         .ndo_change_mtu         = bnx2_change_mtu,
8036         .ndo_tx_timeout         = bnx2_tx_timeout,
8037 #ifdef BCM_VLAN
8038         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8039 #endif
8040 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8041         .ndo_poll_controller    = poll_bnx2,
8042 #endif
8043 };
8044
8045 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8046 {
8047 #ifdef BCM_VLAN
8048         dev->vlan_features |= flags;
8049 #endif
8050 }
8051
8052 static int __devinit
8053 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8054 {
8055         static int version_printed = 0;
8056         struct net_device *dev = NULL;
8057         struct bnx2 *bp;
8058         int rc;
8059         char str[40];
8060
8061         if (version_printed++ == 0)
8062                 printk(KERN_INFO "%s", version);
8063
8064         /* dev zeroed in init_etherdev */
8065         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8066
8067         if (!dev)
8068                 return -ENOMEM;
8069
8070         rc = bnx2_init_board(pdev, dev);
8071         if (rc < 0) {
8072                 free_netdev(dev);
8073                 return rc;
8074         }
8075
8076         dev->netdev_ops = &bnx2_netdev_ops;
8077         dev->watchdog_timeo = TX_TIMEOUT;
8078         dev->ethtool_ops = &bnx2_ethtool_ops;
8079
8080         bp = netdev_priv(dev);
8081         bnx2_init_napi(bp);
8082
8083         pci_set_drvdata(pdev, dev);
8084
8085         rc = bnx2_request_firmware(bp);
8086         if (rc)
8087                 goto error;
8088
8089         memcpy(dev->dev_addr, bp->mac_addr, 6);
8090         memcpy(dev->perm_addr, bp->mac_addr, 6);
8091
8092         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8093         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8094         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8095                 dev->features |= NETIF_F_IPV6_CSUM;
8096                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8097         }
8098 #ifdef BCM_VLAN
8099         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8100 #endif
8101         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8102         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8103         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8104                 dev->features |= NETIF_F_TSO6;
8105                 vlan_features_add(dev, NETIF_F_TSO6);
8106         }
8107         if ((rc = register_netdev(dev))) {
8108                 dev_err(&pdev->dev, "Cannot register net device\n");
8109                 goto error;
8110         }
8111
8112         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8113                 "IRQ %d, node addr %pM\n",
8114                 dev->name,
8115                 board_info[ent->driver_data].name,
8116                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8117                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8118                 bnx2_bus_string(bp, str),
8119                 dev->base_addr,
8120                 bp->pdev->irq, dev->dev_addr);
8121
8122         return 0;
8123
8124 error:
8125         if (bp->mips_firmware)
8126                 release_firmware(bp->mips_firmware);
8127         if (bp->rv2p_firmware)
8128                 release_firmware(bp->rv2p_firmware);
8129
8130         if (bp->regview)
8131                 iounmap(bp->regview);
8132         pci_release_regions(pdev);
8133         pci_disable_device(pdev);
8134         pci_set_drvdata(pdev, NULL);
8135         free_netdev(dev);
8136         return rc;
8137 }
8138
8139 static void __devexit
8140 bnx2_remove_one(struct pci_dev *pdev)
8141 {
8142         struct net_device *dev = pci_get_drvdata(pdev);
8143         struct bnx2 *bp = netdev_priv(dev);
8144
8145         flush_scheduled_work();
8146
8147         unregister_netdev(dev);
8148
8149         if (bp->mips_firmware)
8150                 release_firmware(bp->mips_firmware);
8151         if (bp->rv2p_firmware)
8152                 release_firmware(bp->rv2p_firmware);
8153
8154         if (bp->regview)
8155                 iounmap(bp->regview);
8156
8157         free_netdev(dev);
8158         pci_release_regions(pdev);
8159         pci_disable_device(pdev);
8160         pci_set_drvdata(pdev, NULL);
8161 }
8162
8163 static int
8164 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8165 {
8166         struct net_device *dev = pci_get_drvdata(pdev);
8167         struct bnx2 *bp = netdev_priv(dev);
8168
8169         /* PCI register 4 needs to be saved whether netif_running() or not.
8170          * MSI address and data need to be saved if using MSI and
8171          * netif_running().
8172          */
8173         pci_save_state(pdev);
8174         if (!netif_running(dev))
8175                 return 0;
8176
8177         flush_scheduled_work();
8178         bnx2_netif_stop(bp);
8179         netif_device_detach(dev);
8180         del_timer_sync(&bp->timer);
8181         bnx2_shutdown_chip(bp);
8182         bnx2_free_skbs(bp);
8183         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8184         return 0;
8185 }
8186
8187 static int
8188 bnx2_resume(struct pci_dev *pdev)
8189 {
8190         struct net_device *dev = pci_get_drvdata(pdev);
8191         struct bnx2 *bp = netdev_priv(dev);
8192
8193         pci_restore_state(pdev);
8194         if (!netif_running(dev))
8195                 return 0;
8196
8197         bnx2_set_power_state(bp, PCI_D0);
8198         netif_device_attach(dev);
8199         bnx2_init_nic(bp, 1);
8200         bnx2_netif_start(bp);
8201         return 0;
8202 }
8203
8204 /**
8205  * bnx2_io_error_detected - called when PCI error is detected
8206  * @pdev: Pointer to PCI device
8207  * @state: The current pci connection state
8208  *
8209  * This function is called after a PCI bus error affecting
8210  * this device has been detected.
8211  */
8212 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8213                                                pci_channel_state_t state)
8214 {
8215         struct net_device *dev = pci_get_drvdata(pdev);
8216         struct bnx2 *bp = netdev_priv(dev);
8217
8218         rtnl_lock();
8219         netif_device_detach(dev);
8220
8221         if (state == pci_channel_io_perm_failure) {
8222                 rtnl_unlock();
8223                 return PCI_ERS_RESULT_DISCONNECT;
8224         }
8225
8226         if (netif_running(dev)) {
8227                 bnx2_netif_stop(bp);
8228                 del_timer_sync(&bp->timer);
8229                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8230         }
8231
8232         pci_disable_device(pdev);
8233         rtnl_unlock();
8234
8235         /* Request a slot slot reset. */
8236         return PCI_ERS_RESULT_NEED_RESET;
8237 }
8238
8239 /**
8240  * bnx2_io_slot_reset - called after the pci bus has been reset.
8241  * @pdev: Pointer to PCI device
8242  *
8243  * Restart the card from scratch, as if from a cold-boot.
8244  */
8245 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8246 {
8247         struct net_device *dev = pci_get_drvdata(pdev);
8248         struct bnx2 *bp = netdev_priv(dev);
8249
8250         rtnl_lock();
8251         if (pci_enable_device(pdev)) {
8252                 dev_err(&pdev->dev,
8253                         "Cannot re-enable PCI device after reset.\n");
8254                 rtnl_unlock();
8255                 return PCI_ERS_RESULT_DISCONNECT;
8256         }
8257         pci_set_master(pdev);
8258         pci_restore_state(pdev);
8259
8260         if (netif_running(dev)) {
8261                 bnx2_set_power_state(bp, PCI_D0);
8262                 bnx2_init_nic(bp, 1);
8263         }
8264
8265         rtnl_unlock();
8266         return PCI_ERS_RESULT_RECOVERED;
8267 }
8268
8269 /**
8270  * bnx2_io_resume - called when traffic can start flowing again.
8271  * @pdev: Pointer to PCI device
8272  *
8273  * This callback is called when the error recovery driver tells us that
8274  * its OK to resume normal operation.
8275  */
8276 static void bnx2_io_resume(struct pci_dev *pdev)
8277 {
8278         struct net_device *dev = pci_get_drvdata(pdev);
8279         struct bnx2 *bp = netdev_priv(dev);
8280
8281         rtnl_lock();
8282         if (netif_running(dev))
8283                 bnx2_netif_start(bp);
8284
8285         netif_device_attach(dev);
8286         rtnl_unlock();
8287 }
8288
8289 static struct pci_error_handlers bnx2_err_handler = {
8290         .error_detected = bnx2_io_error_detected,
8291         .slot_reset     = bnx2_io_slot_reset,
8292         .resume         = bnx2_io_resume,
8293 };
8294
8295 static struct pci_driver bnx2_pci_driver = {
8296         .name           = DRV_MODULE_NAME,
8297         .id_table       = bnx2_pci_tbl,
8298         .probe          = bnx2_init_one,
8299         .remove         = __devexit_p(bnx2_remove_one),
8300         .suspend        = bnx2_suspend,
8301         .resume         = bnx2_resume,
8302         .err_handler    = &bnx2_err_handler,
8303 };
8304
8305 static int __init bnx2_init(void)
8306 {
8307         return pci_register_driver(&bnx2_pci_driver);
8308 }
8309
8310 static void __exit bnx2_cleanup(void)
8311 {
8312         pci_unregister_driver(&bnx2_pci_driver);
8313 }
8314
8315 module_init(bnx2_init);
8316 module_exit(bnx2_cleanup);
8317
8318
8319