bnx2: Protect tx timeout reset with rtnl_lock().
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.2"
63 #define DRV_MODULE_RELDATE      "Aug 21, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         mutex_lock(&bp->cnic_lock);
405         cp->drv_state = 0;
406         bnapi->cnic_present = 0;
407         rcu_assign_pointer(bp->cnic_ops, NULL);
408         mutex_unlock(&bp->cnic_lock);
409         synchronize_rcu();
410         return 0;
411 }
412
413 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
414 {
415         struct bnx2 *bp = netdev_priv(dev);
416         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
417
418         cp->drv_owner = THIS_MODULE;
419         cp->chip_id = bp->chip_id;
420         cp->pdev = bp->pdev;
421         cp->io_base = bp->regview;
422         cp->drv_ctl = bnx2_drv_ctl;
423         cp->drv_register_cnic = bnx2_register_cnic;
424         cp->drv_unregister_cnic = bnx2_unregister_cnic;
425
426         return cp;
427 }
428 EXPORT_SYMBOL(bnx2_cnic_probe);
429
430 static void
431 bnx2_cnic_stop(struct bnx2 *bp)
432 {
433         struct cnic_ops *c_ops;
434         struct cnic_ctl_info info;
435
436         mutex_lock(&bp->cnic_lock);
437         c_ops = bp->cnic_ops;
438         if (c_ops) {
439                 info.cmd = CNIC_CTL_STOP_CMD;
440                 c_ops->cnic_ctl(bp->cnic_data, &info);
441         }
442         mutex_unlock(&bp->cnic_lock);
443 }
444
445 static void
446 bnx2_cnic_start(struct bnx2 *bp)
447 {
448         struct cnic_ops *c_ops;
449         struct cnic_ctl_info info;
450
451         mutex_lock(&bp->cnic_lock);
452         c_ops = bp->cnic_ops;
453         if (c_ops) {
454                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
455                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
456
457                         bnapi->cnic_tag = bnapi->last_status_idx;
458                 }
459                 info.cmd = CNIC_CTL_START_CMD;
460                 c_ops->cnic_ctl(bp->cnic_data, &info);
461         }
462         mutex_unlock(&bp->cnic_lock);
463 }
464
465 #else
466
467 static void
468 bnx2_cnic_stop(struct bnx2 *bp)
469 {
470 }
471
472 static void
473 bnx2_cnic_start(struct bnx2 *bp)
474 {
475 }
476
477 #endif
478
479 static int
480 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
481 {
482         u32 val1;
483         int i, ret;
484
485         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
486                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
488
489                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
490                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491
492                 udelay(40);
493         }
494
495         val1 = (bp->phy_addr << 21) | (reg << 16) |
496                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
497                 BNX2_EMAC_MDIO_COMM_START_BUSY;
498         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
499
500         for (i = 0; i < 50; i++) {
501                 udelay(10);
502
503                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
505                         udelay(5);
506
507                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509
510                         break;
511                 }
512         }
513
514         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
515                 *val = 0x0;
516                 ret = -EBUSY;
517         }
518         else {
519                 *val = val1;
520                 ret = 0;
521         }
522
523         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
524                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
526
527                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
528                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529
530                 udelay(40);
531         }
532
533         return ret;
534 }
535
536 static int
537 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
538 {
539         u32 val1;
540         int i, ret;
541
542         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
543                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
545
546                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
547                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548
549                 udelay(40);
550         }
551
552         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
553                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
554                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
555         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
556
557         for (i = 0; i < 50; i++) {
558                 udelay(10);
559
560                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
561                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562                         udelay(5);
563                         break;
564                 }
565         }
566
567         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
568                 ret = -EBUSY;
569         else
570                 ret = 0;
571
572         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
573                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
575
576                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
577                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578
579                 udelay(40);
580         }
581
582         return ret;
583 }
584
585 static void
586 bnx2_disable_int(struct bnx2 *bp)
587 {
588         int i;
589         struct bnx2_napi *bnapi;
590
591         for (i = 0; i < bp->irq_nvecs; i++) {
592                 bnapi = &bp->bnx2_napi[i];
593                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
594                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
595         }
596         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
597 }
598
599 static void
600 bnx2_enable_int(struct bnx2 *bp)
601 {
602         int i;
603         struct bnx2_napi *bnapi;
604
605         for (i = 0; i < bp->irq_nvecs; i++) {
606                 bnapi = &bp->bnx2_napi[i];
607
608                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
611                        bnapi->last_status_idx);
612
613                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
614                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
615                        bnapi->last_status_idx);
616         }
617         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
618 }
619
620 static void
621 bnx2_disable_int_sync(struct bnx2 *bp)
622 {
623         int i;
624
625         atomic_inc(&bp->intr_sem);
626         if (!netif_running(bp->dev))
627                 return;
628
629         bnx2_disable_int(bp);
630         for (i = 0; i < bp->irq_nvecs; i++)
631                 synchronize_irq(bp->irq_tbl[i].vector);
632 }
633
634 static void
635 bnx2_napi_disable(struct bnx2 *bp)
636 {
637         int i;
638
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 napi_disable(&bp->bnx2_napi[i].napi);
641 }
642
643 static void
644 bnx2_napi_enable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_enable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_netif_stop(struct bnx2 *bp)
654 {
655         bnx2_cnic_stop(bp);
656         bnx2_disable_int_sync(bp);
657         if (netif_running(bp->dev)) {
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
661         }
662 }
663
664 static void
665 bnx2_netif_start(struct bnx2 *bp)
666 {
667         if (atomic_dec_and_test(&bp->intr_sem)) {
668                 if (netif_running(bp->dev)) {
669                         netif_tx_wake_all_queues(bp->dev);
670                         bnx2_napi_enable(bp);
671                         bnx2_enable_int(bp);
672                         bnx2_cnic_start(bp);
673                 }
674         }
675 }
676
677 static void
678 bnx2_free_tx_mem(struct bnx2 *bp)
679 {
680         int i;
681
682         for (i = 0; i < bp->num_tx_rings; i++) {
683                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
684                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
685
686                 if (txr->tx_desc_ring) {
687                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
688                                             txr->tx_desc_ring,
689                                             txr->tx_desc_mapping);
690                         txr->tx_desc_ring = NULL;
691                 }
692                 kfree(txr->tx_buf_ring);
693                 txr->tx_buf_ring = NULL;
694         }
695 }
696
697 static void
698 bnx2_free_rx_mem(struct bnx2 *bp)
699 {
700         int i;
701
702         for (i = 0; i < bp->num_rx_rings; i++) {
703                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
704                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
705                 int j;
706
707                 for (j = 0; j < bp->rx_max_ring; j++) {
708                         if (rxr->rx_desc_ring[j])
709                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
710                                                     rxr->rx_desc_ring[j],
711                                                     rxr->rx_desc_mapping[j]);
712                         rxr->rx_desc_ring[j] = NULL;
713                 }
714                 vfree(rxr->rx_buf_ring);
715                 rxr->rx_buf_ring = NULL;
716
717                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
718                         if (rxr->rx_pg_desc_ring[j])
719                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
720                                                     rxr->rx_pg_desc_ring[j],
721                                                     rxr->rx_pg_desc_mapping[j]);
722                         rxr->rx_pg_desc_ring[j] = NULL;
723                 }
724                 vfree(rxr->rx_pg_ring);
725                 rxr->rx_pg_ring = NULL;
726         }
727 }
728
729 static int
730 bnx2_alloc_tx_mem(struct bnx2 *bp)
731 {
732         int i;
733
734         for (i = 0; i < bp->num_tx_rings; i++) {
735                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
736                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
737
738                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
739                 if (txr->tx_buf_ring == NULL)
740                         return -ENOMEM;
741
742                 txr->tx_desc_ring =
743                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
744                                              &txr->tx_desc_mapping);
745                 if (txr->tx_desc_ring == NULL)
746                         return -ENOMEM;
747         }
748         return 0;
749 }
750
751 static int
752 bnx2_alloc_rx_mem(struct bnx2 *bp)
753 {
754         int i;
755
756         for (i = 0; i < bp->num_rx_rings; i++) {
757                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
758                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
759                 int j;
760
761                 rxr->rx_buf_ring =
762                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
763                 if (rxr->rx_buf_ring == NULL)
764                         return -ENOMEM;
765
766                 memset(rxr->rx_buf_ring, 0,
767                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
768
769                 for (j = 0; j < bp->rx_max_ring; j++) {
770                         rxr->rx_desc_ring[j] =
771                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
772                                                      &rxr->rx_desc_mapping[j]);
773                         if (rxr->rx_desc_ring[j] == NULL)
774                                 return -ENOMEM;
775
776                 }
777
778                 if (bp->rx_pg_ring_size) {
779                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
780                                                   bp->rx_max_pg_ring);
781                         if (rxr->rx_pg_ring == NULL)
782                                 return -ENOMEM;
783
784                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
785                                bp->rx_max_pg_ring);
786                 }
787
788                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
789                         rxr->rx_pg_desc_ring[j] =
790                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
791                                                 &rxr->rx_pg_desc_mapping[j]);
792                         if (rxr->rx_pg_desc_ring[j] == NULL)
793                                 return -ENOMEM;
794
795                 }
796         }
797         return 0;
798 }
799
800 static void
801 bnx2_free_mem(struct bnx2 *bp)
802 {
803         int i;
804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
805
806         bnx2_free_tx_mem(bp);
807         bnx2_free_rx_mem(bp);
808
809         for (i = 0; i < bp->ctx_pages; i++) {
810                 if (bp->ctx_blk[i]) {
811                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
812                                             bp->ctx_blk[i],
813                                             bp->ctx_blk_mapping[i]);
814                         bp->ctx_blk[i] = NULL;
815                 }
816         }
817         if (bnapi->status_blk.msi) {
818                 pci_free_consistent(bp->pdev, bp->status_stats_size,
819                                     bnapi->status_blk.msi,
820                                     bp->status_blk_mapping);
821                 bnapi->status_blk.msi = NULL;
822                 bp->stats_blk = NULL;
823         }
824 }
825
826 static int
827 bnx2_alloc_mem(struct bnx2 *bp)
828 {
829         int i, status_blk_size, err;
830         struct bnx2_napi *bnapi;
831         void *status_blk;
832
833         /* Combine status and statistics blocks into one allocation. */
834         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835         if (bp->flags & BNX2_FLAG_MSIX_CAP)
836                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
838         bp->status_stats_size = status_blk_size +
839                                 sizeof(struct statistics_block);
840
841         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
842                                           &bp->status_blk_mapping);
843         if (status_blk == NULL)
844                 goto alloc_mem_err;
845
846         memset(status_blk, 0, bp->status_stats_size);
847
848         bnapi = &bp->bnx2_napi[0];
849         bnapi->status_blk.msi = status_blk;
850         bnapi->hw_tx_cons_ptr =
851                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
852         bnapi->hw_rx_cons_ptr =
853                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
854         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
855                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
856                         struct status_block_msix *sblk;
857
858                         bnapi = &bp->bnx2_napi[i];
859
860                         sblk = (void *) (status_blk +
861                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
862                         bnapi->status_blk.msix = sblk;
863                         bnapi->hw_tx_cons_ptr =
864                                 &sblk->status_tx_quick_consumer_index;
865                         bnapi->hw_rx_cons_ptr =
866                                 &sblk->status_rx_quick_consumer_index;
867                         bnapi->int_num = i << 24;
868                 }
869         }
870
871         bp->stats_blk = status_blk + status_blk_size;
872
873         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
874
875         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
876                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
877                 if (bp->ctx_pages == 0)
878                         bp->ctx_pages = 1;
879                 for (i = 0; i < bp->ctx_pages; i++) {
880                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
881                                                 BCM_PAGE_SIZE,
882                                                 &bp->ctx_blk_mapping[i]);
883                         if (bp->ctx_blk[i] == NULL)
884                                 goto alloc_mem_err;
885                 }
886         }
887
888         err = bnx2_alloc_rx_mem(bp);
889         if (err)
890                 goto alloc_mem_err;
891
892         err = bnx2_alloc_tx_mem(bp);
893         if (err)
894                 goto alloc_mem_err;
895
896         return 0;
897
898 alloc_mem_err:
899         bnx2_free_mem(bp);
900         return -ENOMEM;
901 }
902
903 static void
904 bnx2_report_fw_link(struct bnx2 *bp)
905 {
906         u32 fw_link_status = 0;
907
908         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
909                 return;
910
911         if (bp->link_up) {
912                 u32 bmsr;
913
914                 switch (bp->line_speed) {
915                 case SPEED_10:
916                         if (bp->duplex == DUPLEX_HALF)
917                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
918                         else
919                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
920                         break;
921                 case SPEED_100:
922                         if (bp->duplex == DUPLEX_HALF)
923                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
924                         else
925                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
926                         break;
927                 case SPEED_1000:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
932                         break;
933                 case SPEED_2500:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
938                         break;
939                 }
940
941                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
942
943                 if (bp->autoneg) {
944                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
945
946                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
947                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
948
949                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
950                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
951                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
952                         else
953                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
954                 }
955         }
956         else
957                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
958
959         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
960 }
961
962 static char *
963 bnx2_xceiver_str(struct bnx2 *bp)
964 {
965         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
966                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
967                  "Copper"));
968 }
969
970 static void
971 bnx2_report_link(struct bnx2 *bp)
972 {
973         if (bp->link_up) {
974                 netif_carrier_on(bp->dev);
975                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
976                        bnx2_xceiver_str(bp));
977
978                 printk("%d Mbps ", bp->line_speed);
979
980                 if (bp->duplex == DUPLEX_FULL)
981                         printk("full duplex");
982                 else
983                         printk("half duplex");
984
985                 if (bp->flow_ctrl) {
986                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
987                                 printk(", receive ");
988                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
989                                         printk("& transmit ");
990                         }
991                         else {
992                                 printk(", transmit ");
993                         }
994                         printk("flow control ON");
995                 }
996                 printk("\n");
997         }
998         else {
999                 netif_carrier_off(bp->dev);
1000                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1001                        bnx2_xceiver_str(bp));
1002         }
1003
1004         bnx2_report_fw_link(bp);
1005 }
1006
1007 static void
1008 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1009 {
1010         u32 local_adv, remote_adv;
1011
1012         bp->flow_ctrl = 0;
1013         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1014                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1015
1016                 if (bp->duplex == DUPLEX_FULL) {
1017                         bp->flow_ctrl = bp->req_flow_ctrl;
1018                 }
1019                 return;
1020         }
1021
1022         if (bp->duplex != DUPLEX_FULL) {
1023                 return;
1024         }
1025
1026         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1027             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1028                 u32 val;
1029
1030                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1031                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_TX;
1033                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_RX;
1035                 return;
1036         }
1037
1038         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1039         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040
1041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1042                 u32 new_local_adv = 0;
1043                 u32 new_remote_adv = 0;
1044
1045                 if (local_adv & ADVERTISE_1000XPAUSE)
1046                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1047                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1048                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1049                 if (remote_adv & ADVERTISE_1000XPAUSE)
1050                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1051                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1052                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1053
1054                 local_adv = new_local_adv;
1055                 remote_adv = new_remote_adv;
1056         }
1057
1058         /* See Table 28B-3 of 802.3ab-1999 spec. */
1059         if (local_adv & ADVERTISE_PAUSE_CAP) {
1060                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1061                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1062                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1063                         }
1064                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1065                                 bp->flow_ctrl = FLOW_CTRL_RX;
1066                         }
1067                 }
1068                 else {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                 }
1073         }
1074         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1075                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1076                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1077
1078                         bp->flow_ctrl = FLOW_CTRL_TX;
1079                 }
1080         }
1081 }
1082
1083 static int
1084 bnx2_5709s_linkup(struct bnx2 *bp)
1085 {
1086         u32 val, speed;
1087
1088         bp->link_up = 1;
1089
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1091         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1095                 bp->line_speed = bp->req_line_speed;
1096                 bp->duplex = bp->req_duplex;
1097                 return 0;
1098         }
1099         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1100         switch (speed) {
1101                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1102                         bp->line_speed = SPEED_10;
1103                         break;
1104                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1105                         bp->line_speed = SPEED_100;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1109                         bp->line_speed = SPEED_1000;
1110                         break;
1111                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1112                         bp->line_speed = SPEED_2500;
1113                         break;
1114         }
1115         if (val & MII_BNX2_GP_TOP_AN_FD)
1116                 bp->duplex = DUPLEX_FULL;
1117         else
1118                 bp->duplex = DUPLEX_HALF;
1119         return 0;
1120 }
1121
1122 static int
1123 bnx2_5708s_linkup(struct bnx2 *bp)
1124 {
1125         u32 val;
1126
1127         bp->link_up = 1;
1128         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1129         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1130                 case BCM5708S_1000X_STAT1_SPEED_10:
1131                         bp->line_speed = SPEED_10;
1132                         break;
1133                 case BCM5708S_1000X_STAT1_SPEED_100:
1134                         bp->line_speed = SPEED_100;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_1G:
1137                         bp->line_speed = SPEED_1000;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1140                         bp->line_speed = SPEED_2500;
1141                         break;
1142         }
1143         if (val & BCM5708S_1000X_STAT1_FD)
1144                 bp->duplex = DUPLEX_FULL;
1145         else
1146                 bp->duplex = DUPLEX_HALF;
1147
1148         return 0;
1149 }
1150
1151 static int
1152 bnx2_5706s_linkup(struct bnx2 *bp)
1153 {
1154         u32 bmcr, local_adv, remote_adv, common;
1155
1156         bp->link_up = 1;
1157         bp->line_speed = SPEED_1000;
1158
1159         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1160         if (bmcr & BMCR_FULLDPLX) {
1161                 bp->duplex = DUPLEX_FULL;
1162         }
1163         else {
1164                 bp->duplex = DUPLEX_HALF;
1165         }
1166
1167         if (!(bmcr & BMCR_ANENABLE)) {
1168                 return 0;
1169         }
1170
1171         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1172         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1173
1174         common = local_adv & remote_adv;
1175         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1176
1177                 if (common & ADVERTISE_1000XFULL) {
1178                         bp->duplex = DUPLEX_FULL;
1179                 }
1180                 else {
1181                         bp->duplex = DUPLEX_HALF;
1182                 }
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int
1189 bnx2_copper_linkup(struct bnx2 *bp)
1190 {
1191         u32 bmcr;
1192
1193         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1194         if (bmcr & BMCR_ANENABLE) {
1195                 u32 local_adv, remote_adv, common;
1196
1197                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1198                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1199
1200                 common = local_adv & (remote_adv >> 2);
1201                 if (common & ADVERTISE_1000FULL) {
1202                         bp->line_speed = SPEED_1000;
1203                         bp->duplex = DUPLEX_FULL;
1204                 }
1205                 else if (common & ADVERTISE_1000HALF) {
1206                         bp->line_speed = SPEED_1000;
1207                         bp->duplex = DUPLEX_HALF;
1208                 }
1209                 else {
1210                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1211                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1212
1213                         common = local_adv & remote_adv;
1214                         if (common & ADVERTISE_100FULL) {
1215                                 bp->line_speed = SPEED_100;
1216                                 bp->duplex = DUPLEX_FULL;
1217                         }
1218                         else if (common & ADVERTISE_100HALF) {
1219                                 bp->line_speed = SPEED_100;
1220                                 bp->duplex = DUPLEX_HALF;
1221                         }
1222                         else if (common & ADVERTISE_10FULL) {
1223                                 bp->line_speed = SPEED_10;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_10HALF) {
1227                                 bp->line_speed = SPEED_10;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else {
1231                                 bp->line_speed = 0;
1232                                 bp->link_up = 0;
1233                         }
1234                 }
1235         }
1236         else {
1237                 if (bmcr & BMCR_SPEED100) {
1238                         bp->line_speed = SPEED_100;
1239                 }
1240                 else {
1241                         bp->line_speed = SPEED_10;
1242                 }
1243                 if (bmcr & BMCR_FULLDPLX) {
1244                         bp->duplex = DUPLEX_FULL;
1245                 }
1246                 else {
1247                         bp->duplex = DUPLEX_HALF;
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void
1255 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1256 {
1257         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1258
1259         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1260         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1261         val |= 0x02 << 8;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                 u32 lo_water, hi_water;
1265
1266                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1268                 else
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1270                 if (lo_water >= bp->rx_ring_size)
1271                         lo_water = 0;
1272
1273                 hi_water = bp->rx_ring_size / 4;
1274
1275                 if (hi_water <= lo_water)
1276                         lo_water = 0;
1277
1278                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1279                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1280
1281                 if (hi_water > 0xf)
1282                         hi_water = 0xf;
1283                 else if (hi_water == 0)
1284                         lo_water = 0;
1285                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1286         }
1287         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1288 }
1289
1290 static void
1291 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1292 {
1293         int i;
1294         u32 cid;
1295
1296         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1297                 if (i == 1)
1298                         cid = RX_RSS_CID;
1299                 bnx2_init_rx_context(bp, cid);
1300         }
1301 }
1302
1303 static void
1304 bnx2_set_mac_link(struct bnx2 *bp)
1305 {
1306         u32 val;
1307
1308         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1309         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1310                 (bp->duplex == DUPLEX_HALF)) {
1311                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1312         }
1313
1314         /* Configure the EMAC mode register. */
1315         val = REG_RD(bp, BNX2_EMAC_MODE);
1316
1317         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1318                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1319                 BNX2_EMAC_MODE_25G_MODE);
1320
1321         if (bp->link_up) {
1322                 switch (bp->line_speed) {
1323                         case SPEED_10:
1324                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1325                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1326                                         break;
1327                                 }
1328                                 /* fall through */
1329                         case SPEED_100:
1330                                 val |= BNX2_EMAC_MODE_PORT_MII;
1331                                 break;
1332                         case SPEED_2500:
1333                                 val |= BNX2_EMAC_MODE_25G_MODE;
1334                                 /* fall through */
1335                         case SPEED_1000:
1336                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1337                                 break;
1338                 }
1339         }
1340         else {
1341                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342         }
1343
1344         /* Set the MAC to operate in the appropriate duplex mode. */
1345         if (bp->duplex == DUPLEX_HALF)
1346                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1347         REG_WR(bp, BNX2_EMAC_MODE, val);
1348
1349         /* Enable/disable rx PAUSE. */
1350         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1351
1352         if (bp->flow_ctrl & FLOW_CTRL_RX)
1353                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1354         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1355
1356         /* Enable/disable tx PAUSE. */
1357         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1358         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_TX)
1361                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1362         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1363
1364         /* Acknowledge the interrupt. */
1365         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1366
1367         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1368                 bnx2_init_all_rx_contexts(bp);
1369 }
1370
1371 static void
1372 bnx2_enable_bmsr1(struct bnx2 *bp)
1373 {
1374         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1375             (CHIP_NUM(bp) == CHIP_NUM_5709))
1376                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1377                                MII_BNX2_BLK_ADDR_GP_STATUS);
1378 }
1379
1380 static void
1381 bnx2_disable_bmsr1(struct bnx2 *bp)
1382 {
1383         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384             (CHIP_NUM(bp) == CHIP_NUM_5709))
1385                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1387 }
1388
1389 static int
1390 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1391 {
1392         u32 up1;
1393         int ret = 1;
1394
1395         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1396                 return 0;
1397
1398         if (bp->autoneg & AUTONEG_SPEED)
1399                 bp->advertising |= ADVERTISED_2500baseX_Full;
1400
1401         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1402                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1403
1404         bnx2_read_phy(bp, bp->mii_up1, &up1);
1405         if (!(up1 & BCM5708S_UP1_2G5)) {
1406                 up1 |= BCM5708S_UP1_2G5;
1407                 bnx2_write_phy(bp, bp->mii_up1, up1);
1408                 ret = 0;
1409         }
1410
1411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1413                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1414
1415         return ret;
1416 }
1417
1418 static int
1419 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1420 {
1421         u32 up1;
1422         int ret = 0;
1423
1424         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1425                 return 0;
1426
1427         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1428                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1429
1430         bnx2_read_phy(bp, bp->mii_up1, &up1);
1431         if (up1 & BCM5708S_UP1_2G5) {
1432                 up1 &= ~BCM5708S_UP1_2G5;
1433                 bnx2_write_phy(bp, bp->mii_up1, up1);
1434                 ret = 1;
1435         }
1436
1437         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1438                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1439                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1440
1441         return ret;
1442 }
1443
1444 static void
1445 bnx2_enable_forced_2g5(struct bnx2 *bp)
1446 {
1447         u32 bmcr;
1448
1449         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1450                 return;
1451
1452         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1453                 u32 val;
1454
1455                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1457                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1458                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1459                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1460                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1461
1462                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1464                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465
1466         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469         } else {
1470                 return;
1471         }
1472
1473         if (bp->autoneg & AUTONEG_SPEED) {
1474                 bmcr &= ~BMCR_ANENABLE;
1475                 if (bp->req_duplex == DUPLEX_FULL)
1476                         bmcr |= BMCR_FULLDPLX;
1477         }
1478         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1479 }
1480
1481 static void
1482 bnx2_disable_forced_2g5(struct bnx2 *bp)
1483 {
1484         u32 bmcr;
1485
1486         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1487                 return;
1488
1489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1490                 u32 val;
1491
1492                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1494                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1495                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1496                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1497
1498                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1499                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1500                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501
1502         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1503                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505         } else {
1506                 return;
1507         }
1508
1509         if (bp->autoneg & AUTONEG_SPEED)
1510                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517         u32 val;
1518
1519         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521         if (start)
1522                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523         else
1524                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530         u32 bmsr;
1531         u8 link_up;
1532
1533         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534                 bp->link_up = 1;
1535                 return 0;
1536         }
1537
1538         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539                 return 0;
1540
1541         link_up = bp->link_up;
1542
1543         bnx2_enable_bmsr1(bp);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_disable_bmsr1(bp);
1547
1548         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550                 u32 val, an_dbg;
1551
1552                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553                         bnx2_5706s_force_link_dn(bp, 0);
1554                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555                 }
1556                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564                         bmsr |= BMSR_LSTATUS;
1565                 else
1566                         bmsr &= ~BMSR_LSTATUS;
1567         }
1568
1569         if (bmsr & BMSR_LSTATUS) {
1570                 bp->link_up = 1;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574                                 bnx2_5706s_linkup(bp);
1575                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576                                 bnx2_5708s_linkup(bp);
1577                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578                                 bnx2_5709s_linkup(bp);
1579                 }
1580                 else {
1581                         bnx2_copper_linkup(bp);
1582                 }
1583                 bnx2_resolve_flow_ctrl(bp);
1584         }
1585         else {
1586                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587                     (bp->autoneg & AUTONEG_SPEED))
1588                         bnx2_disable_forced_2g5(bp);
1589
1590                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591                         u32 bmcr;
1592
1593                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594                         bmcr |= BMCR_ANENABLE;
1595                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598                 }
1599                 bp->link_up = 0;
1600         }
1601
1602         if (bp->link_up != link_up) {
1603                 bnx2_report_link(bp);
1604         }
1605
1606         bnx2_set_mac_link(bp);
1607
1608         return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614         int i;
1615         u32 reg;
1616
1617         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621                 udelay(10);
1622
1623                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624                 if (!(reg & BMCR_RESET)) {
1625                         udelay(20);
1626                         break;
1627                 }
1628         }
1629         if (i == PHY_RESET_MAX_WAIT) {
1630                 return -EBUSY;
1631         }
1632         return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638         u32 adv = 0;
1639
1640         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPAUSE;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_CAP;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676         u32 speed_arg = 0, pause_adv;
1677
1678         pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680         if (bp->autoneg & AUTONEG_SPEED) {
1681                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682                 if (bp->advertising & ADVERTISED_10baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684                 if (bp->advertising & ADVERTISED_10baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686                 if (bp->advertising & ADVERTISED_100baseT_Half)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688                 if (bp->advertising & ADVERTISED_100baseT_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694         } else {
1695                 if (bp->req_line_speed == SPEED_2500)
1696                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697                 else if (bp->req_line_speed == SPEED_1000)
1698                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 else if (bp->req_line_speed == SPEED_100) {
1700                         if (bp->req_duplex == DUPLEX_FULL)
1701                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702                         else
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704                 } else if (bp->req_line_speed == SPEED_10) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 }
1710         }
1711
1712         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717         if (port == PORT_TP)
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723         spin_unlock_bh(&bp->phy_lock);
1724         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725         spin_lock_bh(&bp->phy_lock);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735         u32 adv, bmcr;
1736         u32 new_adv = 0;
1737
1738         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739                 return (bnx2_setup_remote_phy(bp, port));
1740
1741         if (!(bp->autoneg & AUTONEG_SPEED)) {
1742                 u32 new_bmcr;
1743                 int force_link_down = 0;
1744
1745                 if (bp->req_line_speed == SPEED_2500) {
1746                         if (!bnx2_test_and_enable_2g5(bp))
1747                                 force_link_down = 1;
1748                 } else if (bp->req_line_speed == SPEED_1000) {
1749                         if (bnx2_test_and_disable_2g5(bp))
1750                                 force_link_down = 1;
1751                 }
1752                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757                 new_bmcr |= BMCR_SPEED1000;
1758
1759                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 bnx2_enable_forced_2g5(bp);
1762                         else if (bp->req_line_speed == SPEED_1000) {
1763                                 bnx2_disable_forced_2g5(bp);
1764                                 new_bmcr &= ~0x2000;
1765                         }
1766
1767                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770                         else
1771                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772                 }
1773
1774                 if (bp->req_duplex == DUPLEX_FULL) {
1775                         adv |= ADVERTISE_1000XFULL;
1776                         new_bmcr |= BMCR_FULLDPLX;
1777                 }
1778                 else {
1779                         adv |= ADVERTISE_1000XHALF;
1780                         new_bmcr &= ~BMCR_FULLDPLX;
1781                 }
1782                 if ((new_bmcr != bmcr) || (force_link_down)) {
1783                         /* Force a link down visible on the other side */
1784                         if (bp->link_up) {
1785                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1786                                                ~(ADVERTISE_1000XFULL |
1787                                                  ADVERTISE_1000XHALF));
1788                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789                                         BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791                                 bp->link_up = 0;
1792                                 netif_carrier_off(bp->dev);
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                                 bnx2_report_link(bp);
1795                         }
1796                         bnx2_write_phy(bp, bp->mii_adv, adv);
1797                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798                 } else {
1799                         bnx2_resolve_flow_ctrl(bp);
1800                         bnx2_set_mac_link(bp);
1801                 }
1802                 return 0;
1803         }
1804
1805         bnx2_test_and_enable_2g5(bp);
1806
1807         if (bp->advertising & ADVERTISED_1000baseT_Full)
1808                 new_adv |= ADVERTISE_1000XFULL;
1809
1810         new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812         bnx2_read_phy(bp, bp->mii_adv, &adv);
1813         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815         bp->serdes_an_pending = 0;
1816         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817                 /* Force a link down visible on the other side */
1818                 if (bp->link_up) {
1819                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820                         spin_unlock_bh(&bp->phy_lock);
1821                         msleep(20);
1822                         spin_lock_bh(&bp->phy_lock);
1823                 }
1824
1825                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827                         BMCR_ANENABLE);
1828                 /* Speed up link-up time when the link partner
1829                  * does not autonegotiate which is very common
1830                  * in blade servers. Some blade servers use
1831                  * IPMI for kerboard input and it's important
1832                  * to minimize link disruptions. Autoneg. involves
1833                  * exchanging base pages plus 3 next pages and
1834                  * normally completes in about 120 msec.
1835                  */
1836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837                 bp->serdes_an_pending = 1;
1838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839         } else {
1840                 bnx2_resolve_flow_ctrl(bp);
1841                 bnx2_set_mac_link(bp);
1842         }
1843
1844         return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1848         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1849                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850                 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1853         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1854         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1855         ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865         u32 link;
1866
1867         if (bp->phy_port == PORT_TP)
1868                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869         else
1870                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873                 bp->req_line_speed = 0;
1874                 bp->autoneg |= AUTONEG_SPEED;
1875                 bp->advertising = ADVERTISED_Autoneg;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877                         bp->advertising |= ADVERTISED_10baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879                         bp->advertising |= ADVERTISED_10baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881                         bp->advertising |= ADVERTISED_100baseT_Half;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883                         bp->advertising |= ADVERTISED_100baseT_Full;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885                         bp->advertising |= ADVERTISED_1000baseT_Full;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887                         bp->advertising |= ADVERTISED_2500baseX_Full;
1888         } else {
1889                 bp->autoneg = 0;
1890                 bp->advertising = 0;
1891                 bp->req_duplex = DUPLEX_FULL;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893                         bp->req_line_speed = SPEED_10;
1894                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895                                 bp->req_duplex = DUPLEX_HALF;
1896                 }
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898                         bp->req_line_speed = SPEED_100;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903                         bp->req_line_speed = SPEED_1000;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905                         bp->req_line_speed = SPEED_2500;
1906         }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913                 bnx2_set_default_remote_link(bp);
1914                 return;
1915         }
1916
1917         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918         bp->req_line_speed = 0;
1919         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920                 u32 reg;
1921
1922                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927                         bp->autoneg = 0;
1928                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1929                         bp->req_duplex = DUPLEX_FULL;
1930                 }
1931         } else
1932                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938         u32 msg;
1939         u32 addr;
1940
1941         spin_lock(&bp->indirect_lock);
1942         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946         spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952         u32 msg;
1953         u8 link_up = bp->link_up;
1954         u8 old_port;
1955
1956         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959                 bnx2_send_heart_beat(bp);
1960
1961         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964                 bp->link_up = 0;
1965         else {
1966                 u32 speed;
1967
1968                 bp->link_up = 1;
1969                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970                 bp->duplex = DUPLEX_FULL;
1971                 switch (speed) {
1972                         case BNX2_LINK_STATUS_10HALF:
1973                                 bp->duplex = DUPLEX_HALF;
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_100BASE_T4:
1980                         case BNX2_LINK_STATUS_100FULL:
1981                                 bp->line_speed = SPEED_100;
1982                                 break;
1983                         case BNX2_LINK_STATUS_1000HALF:
1984                                 bp->duplex = DUPLEX_HALF;
1985                         case BNX2_LINK_STATUS_1000FULL:
1986                                 bp->line_speed = SPEED_1000;
1987                                 break;
1988                         case BNX2_LINK_STATUS_2500HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_2500FULL:
1991                                 bp->line_speed = SPEED_2500;
1992                                 break;
1993                         default:
1994                                 bp->line_speed = 0;
1995                                 break;
1996                 }
1997
1998                 bp->flow_ctrl = 0;
1999                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001                         if (bp->duplex == DUPLEX_FULL)
2002                                 bp->flow_ctrl = bp->req_flow_ctrl;
2003                 } else {
2004                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2006                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2008                 }
2009
2010                 old_port = bp->phy_port;
2011                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012                         bp->phy_port = PORT_FIBRE;
2013                 else
2014                         bp->phy_port = PORT_TP;
2015
2016                 if (old_port != bp->phy_port)
2017                         bnx2_set_default_link(bp);
2018
2019         }
2020         if (bp->link_up != link_up)
2021                 bnx2_report_link(bp);
2022
2023         bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029         u32 evt_code;
2030
2031         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032         switch (evt_code) {
2033                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034                         bnx2_remote_phy_event(bp);
2035                         break;
2036                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037                 default:
2038                         bnx2_send_heart_beat(bp);
2039                         break;
2040         }
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049         u32 bmcr;
2050         u32 new_bmcr;
2051
2052         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054         if (bp->autoneg & AUTONEG_SPEED) {
2055                 u32 adv_reg, adv1000_reg;
2056                 u32 new_adv_reg = 0;
2057                 u32 new_adv1000_reg = 0;
2058
2059                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061                         ADVERTISE_PAUSE_ASYM);
2062
2063                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064                 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066                 if (bp->advertising & ADVERTISED_10baseT_Half)
2067                         new_adv_reg |= ADVERTISE_10HALF;
2068                 if (bp->advertising & ADVERTISED_10baseT_Full)
2069                         new_adv_reg |= ADVERTISE_10FULL;
2070                 if (bp->advertising & ADVERTISED_100baseT_Half)
2071                         new_adv_reg |= ADVERTISE_100HALF;
2072                 if (bp->advertising & ADVERTISED_100baseT_Full)
2073                         new_adv_reg |= ADVERTISE_100FULL;
2074                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075                         new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077                 new_adv_reg |= ADVERTISE_CSMA;
2078
2079                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081                 if ((adv1000_reg != new_adv1000_reg) ||
2082                         (adv_reg != new_adv_reg) ||
2083                         ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088                                 BMCR_ANENABLE);
2089                 }
2090                 else if (bp->link_up) {
2091                         /* Flow ctrl may have changed from auto to forced */
2092                         /* or vice-versa. */
2093
2094                         bnx2_resolve_flow_ctrl(bp);
2095                         bnx2_set_mac_link(bp);
2096                 }
2097                 return 0;
2098         }
2099
2100         new_bmcr = 0;
2101         if (bp->req_line_speed == SPEED_100) {
2102                 new_bmcr |= BMCR_SPEED100;
2103         }
2104         if (bp->req_duplex == DUPLEX_FULL) {
2105                 new_bmcr |= BMCR_FULLDPLX;
2106         }
2107         if (new_bmcr != bmcr) {
2108                 u32 bmsr;
2109
2110                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113                 if (bmsr & BMSR_LSTATUS) {
2114                         /* Force link down */
2115                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116                         spin_unlock_bh(&bp->phy_lock);
2117                         msleep(50);
2118                         spin_lock_bh(&bp->phy_lock);
2119
2120                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                 }
2123
2124                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126                 /* Normally, the new speed is setup after the link has
2127                  * gone down and up again. In some cases, link will not go
2128                  * down so we need to set up the new speed here.
2129                  */
2130                 if (bmsr & BMSR_LSTATUS) {
2131                         bp->line_speed = bp->req_line_speed;
2132                         bp->duplex = bp->req_duplex;
2133                         bnx2_resolve_flow_ctrl(bp);
2134                         bnx2_set_mac_link(bp);
2135                 }
2136         } else {
2137                 bnx2_resolve_flow_ctrl(bp);
2138                 bnx2_set_mac_link(bp);
2139         }
2140         return 0;
2141 }
2142
2143 static int
2144 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145 __releases(&bp->phy_lock)
2146 __acquires(&bp->phy_lock)
2147 {
2148         if (bp->loopback == MAC_LOOPBACK)
2149                 return 0;
2150
2151         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152                 return (bnx2_setup_serdes_phy(bp, port));
2153         }
2154         else {
2155                 return (bnx2_setup_copper_phy(bp));
2156         }
2157 }
2158
2159 static int
2160 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161 {
2162         u32 val;
2163
2164         bp->mii_bmcr = MII_BMCR + 0x10;
2165         bp->mii_bmsr = MII_BMSR + 0x10;
2166         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167         bp->mii_adv = MII_ADVERTISE + 0x10;
2168         bp->mii_lpa = MII_LPA + 0x10;
2169         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175         if (reset_phy)
2176                 bnx2_reset_phy(bp);
2177
2178         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188                 val |= BCM5708S_UP1_2G5;
2189         else
2190                 val &= ~BCM5708S_UP1_2G5;
2191         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206         return 0;
2207 }
2208
2209 static int
2210 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211 {
2212         u32 val;
2213
2214         if (reset_phy)
2215                 bnx2_reset_phy(bp);
2216
2217         bp->mii_up1 = BCM5708S_UP1;
2218
2219         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233                 val |= BCM5708S_UP1_2G5;
2234                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235         }
2236
2237         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240                 /* increase tx signal amplitude */
2241                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242                                BCM5708S_BLK_ADDR_TX_MISC);
2243                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247         }
2248
2249         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252         if (val) {
2253                 u32 is_backplane;
2254
2255                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258                                        BCM5708S_BLK_ADDR_TX_MISC);
2259                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                        BCM5708S_BLK_ADDR_DIG);
2262                 }
2263         }
2264         return 0;
2265 }
2266
2267 static int
2268 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 {
2270         if (reset_phy)
2271                 bnx2_reset_phy(bp);
2272
2273         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278         if (bp->dev->mtu > 1500) {
2279                 u32 val;
2280
2281                 /* Set extended packet length bit */
2282                 bnx2_write_phy(bp, 0x18, 0x7);
2283                 bnx2_read_phy(bp, 0x18, &val);
2284                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287                 bnx2_read_phy(bp, 0x1c, &val);
2288                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289         }
2290         else {
2291                 u32 val;
2292
2293                 bnx2_write_phy(bp, 0x18, 0x7);
2294                 bnx2_read_phy(bp, 0x18, &val);
2295                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298                 bnx2_read_phy(bp, 0x1c, &val);
2299                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int
2306 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307 {
2308         u32 val;
2309
2310         if (reset_phy)
2311                 bnx2_reset_phy(bp);
2312
2313         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314                 bnx2_write_phy(bp, 0x18, 0x0c00);
2315                 bnx2_write_phy(bp, 0x17, 0x000a);
2316                 bnx2_write_phy(bp, 0x15, 0x310b);
2317                 bnx2_write_phy(bp, 0x17, 0x201f);
2318                 bnx2_write_phy(bp, 0x15, 0x9506);
2319                 bnx2_write_phy(bp, 0x17, 0x401f);
2320                 bnx2_write_phy(bp, 0x15, 0x14e2);
2321                 bnx2_write_phy(bp, 0x18, 0x0400);
2322         }
2323
2324         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2327                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328                 val &= ~(1 << 8);
2329                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330         }
2331
2332         if (bp->dev->mtu > 1500) {
2333                 /* Set extended packet length bit */
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val | 0x1);
2340         }
2341         else {
2342                 bnx2_write_phy(bp, 0x18, 0x7);
2343                 bnx2_read_phy(bp, 0x18, &val);
2344                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346                 bnx2_read_phy(bp, 0x10, &val);
2347                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348         }
2349
2350         /* ethernet@wirespeed */
2351         bnx2_write_phy(bp, 0x18, 0x7007);
2352         bnx2_read_phy(bp, 0x18, &val);
2353         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354         return 0;
2355 }
2356
2357
2358 static int
2359 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360 __releases(&bp->phy_lock)
2361 __acquires(&bp->phy_lock)
2362 {
2363         u32 val;
2364         int rc = 0;
2365
2366         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369         bp->mii_bmcr = MII_BMCR;
2370         bp->mii_bmsr = MII_BMSR;
2371         bp->mii_bmsr1 = MII_BMSR;
2372         bp->mii_adv = MII_ADVERTISE;
2373         bp->mii_lpa = MII_LPA;
2374
2375         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378                 goto setup_phy;
2379
2380         bnx2_read_phy(bp, MII_PHYSID1, &val);
2381         bp->phy_id = val << 16;
2382         bnx2_read_phy(bp, MII_PHYSID2, &val);
2383         bp->phy_id |= val & 0xffff;
2384
2385         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2388                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2390                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2392         }
2393         else {
2394                 rc = bnx2_init_copper_phy(bp, reset_phy);
2395         }
2396
2397 setup_phy:
2398         if (!rc)
2399                 rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401         return rc;
2402 }
2403
2404 static int
2405 bnx2_set_mac_loopback(struct bnx2 *bp)
2406 {
2407         u32 mac_mode;
2408
2409         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413         bp->link_up = 1;
2414         return 0;
2415 }
2416
2417 static int bnx2_test_link(struct bnx2 *);
2418
2419 static int
2420 bnx2_set_phy_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423         int rc, i;
2424
2425         spin_lock_bh(&bp->phy_lock);
2426         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427                             BMCR_SPEED1000);
2428         spin_unlock_bh(&bp->phy_lock);
2429         if (rc)
2430                 return rc;
2431
2432         for (i = 0; i < 10; i++) {
2433                 if (bnx2_test_link(bp) == 0)
2434                         break;
2435                 msleep(100);
2436         }
2437
2438         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441                       BNX2_EMAC_MODE_25G_MODE);
2442
2443         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445         bp->link_up = 1;
2446         return 0;
2447 }
2448
2449 static int
2450 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2451 {
2452         int i;
2453         u32 val;
2454
2455         bp->fw_wr_seq++;
2456         msg_data |= bp->fw_wr_seq;
2457
2458         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2459
2460         if (!ack)
2461                 return 0;
2462
2463         /* wait for an acknowledgement. */
2464         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2465                 msleep(10);
2466
2467                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2468
2469                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2470                         break;
2471         }
2472         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2473                 return 0;
2474
2475         /* If we timed out, inform the firmware that this is the case. */
2476         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2477                 if (!silent)
2478                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2479                                             "%x\n", msg_data);
2480
2481                 msg_data &= ~BNX2_DRV_MSG_CODE;
2482                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2483
2484                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2485
2486                 return -EBUSY;
2487         }
2488
2489         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490                 return -EIO;
2491
2492         return 0;
2493 }
2494
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2497 {
2498         int i, ret = 0;
2499         u32 val;
2500
2501         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502         val |= (BCM_PAGE_BITS - 8) << 16;
2503         REG_WR(bp, BNX2_CTX_COMMAND, val);
2504         for (i = 0; i < 10; i++) {
2505                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507                         break;
2508                 udelay(2);
2509         }
2510         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511                 return -EBUSY;
2512
2513         for (i = 0; i < bp->ctx_pages; i++) {
2514                 int j;
2515
2516                 if (bp->ctx_blk[i])
2517                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518                 else
2519                         return -ENOMEM;
2520
2521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525                        (u64) bp->ctx_blk_mapping[i] >> 32);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528                 for (j = 0; j < 10; j++) {
2529
2530                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532                                 break;
2533                         udelay(5);
2534                 }
2535                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536                         ret = -EBUSY;
2537                         break;
2538                 }
2539         }
2540         return ret;
2541 }
2542
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2545 {
2546         u32 vcid;
2547
2548         vcid = 96;
2549         while (vcid) {
2550                 u32 vcid_addr, pcid_addr, offset;
2551                 int i;
2552
2553                 vcid--;
2554
2555                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556                         u32 new_vcid;
2557
2558                         vcid_addr = GET_PCID_ADDR(vcid);
2559                         if (vcid & 0x8) {
2560                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2561                         }
2562                         else {
2563                                 new_vcid = vcid;
2564                         }
2565                         pcid_addr = GET_PCID_ADDR(new_vcid);
2566                 }
2567                 else {
2568                         vcid_addr = GET_CID_ADDR(vcid);
2569                         pcid_addr = vcid_addr;
2570                 }
2571
2572                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573                         vcid_addr += (i << PHY_CTX_SHIFT);
2574                         pcid_addr += (i << PHY_CTX_SHIFT);
2575
2576                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2578
2579                         /* Zero out the context. */
2580                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2582                 }
2583         }
2584 }
2585
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 {
2589         u16 *good_mbuf;
2590         u32 good_mbuf_cnt;
2591         u32 val;
2592
2593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594         if (good_mbuf == NULL) {
2595                 printk(KERN_ERR PFX "Failed to allocate memory in "
2596                                     "bnx2_alloc_bad_rbuf\n");
2597                 return -ENOMEM;
2598         }
2599
2600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2602
2603         good_mbuf_cnt = 0;
2604
2605         /* Allocate a bunch of mbufs and save the good ones in an array. */
2606         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2608                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2609                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2610
2611                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2612
2613                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2614
2615                 /* The addresses with Bit 9 set are bad memory blocks. */
2616                 if (!(val & (1 << 9))) {
2617                         good_mbuf[good_mbuf_cnt] = (u16) val;
2618                         good_mbuf_cnt++;
2619                 }
2620
2621                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2622         }
2623
2624         /* Free the good ones back to the mbuf pool thus discarding
2625          * all the bad ones. */
2626         while (good_mbuf_cnt) {
2627                 good_mbuf_cnt--;
2628
2629                 val = good_mbuf[good_mbuf_cnt];
2630                 val = (val << 9) | val | 1;
2631
2632                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2633         }
2634         kfree(good_mbuf);
2635         return 0;
2636 }
2637
2638 static void
2639 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2640 {
2641         u32 val;
2642
2643         val = (mac_addr[0] << 8) | mac_addr[1];
2644
2645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2646
2647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2648                 (mac_addr[4] << 8) | mac_addr[5];
2649
2650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2651 }
2652
2653 static inline int
2654 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2655 {
2656         dma_addr_t mapping;
2657         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2658         struct rx_bd *rxbd =
2659                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2660         struct page *page = alloc_page(GFP_ATOMIC);
2661
2662         if (!page)
2663                 return -ENOMEM;
2664         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2665                                PCI_DMA_FROMDEVICE);
2666         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2667                 __free_page(page);
2668                 return -EIO;
2669         }
2670
2671         rx_pg->page = page;
2672         pci_unmap_addr_set(rx_pg, mapping, mapping);
2673         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2674         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2675         return 0;
2676 }
2677
2678 static void
2679 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2680 {
2681         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2682         struct page *page = rx_pg->page;
2683
2684         if (!page)
2685                 return;
2686
2687         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2688                        PCI_DMA_FROMDEVICE);
2689
2690         __free_page(page);
2691         rx_pg->page = NULL;
2692 }
2693
2694 static inline int
2695 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2696 {
2697         struct sk_buff *skb;
2698         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2699         dma_addr_t mapping;
2700         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2701         unsigned long align;
2702
2703         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2704         if (skb == NULL) {
2705                 return -ENOMEM;
2706         }
2707
2708         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2709                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2710
2711         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2712                 PCI_DMA_FROMDEVICE);
2713         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2714                 dev_kfree_skb(skb);
2715                 return -EIO;
2716         }
2717
2718         rx_buf->skb = skb;
2719         pci_unmap_addr_set(rx_buf, mapping, mapping);
2720
2721         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2722         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2723
2724         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2725
2726         return 0;
2727 }
2728
2729 static int
2730 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2731 {
2732         struct status_block *sblk = bnapi->status_blk.msi;
2733         u32 new_link_state, old_link_state;
2734         int is_set = 1;
2735
2736         new_link_state = sblk->status_attn_bits & event;
2737         old_link_state = sblk->status_attn_bits_ack & event;
2738         if (new_link_state != old_link_state) {
2739                 if (new_link_state)
2740                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2741                 else
2742                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2743         } else
2744                 is_set = 0;
2745
2746         return is_set;
2747 }
2748
2749 static void
2750 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2751 {
2752         spin_lock(&bp->phy_lock);
2753
2754         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2755                 bnx2_set_link(bp);
2756         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2757                 bnx2_set_remote_link(bp);
2758
2759         spin_unlock(&bp->phy_lock);
2760
2761 }
2762
2763 static inline u16
2764 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2765 {
2766         u16 cons;
2767
2768         /* Tell compiler that status block fields can change. */
2769         barrier();
2770         cons = *bnapi->hw_tx_cons_ptr;
2771         barrier();
2772         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2773                 cons++;
2774         return cons;
2775 }
2776
2777 static int
2778 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2779 {
2780         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2781         u16 hw_cons, sw_cons, sw_ring_cons;
2782         int tx_pkt = 0, index;
2783         struct netdev_queue *txq;
2784
2785         index = (bnapi - bp->bnx2_napi);
2786         txq = netdev_get_tx_queue(bp->dev, index);
2787
2788         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2789         sw_cons = txr->tx_cons;
2790
2791         while (sw_cons != hw_cons) {
2792                 struct sw_tx_bd *tx_buf;
2793                 struct sk_buff *skb;
2794                 int i, last;
2795
2796                 sw_ring_cons = TX_RING_IDX(sw_cons);
2797
2798                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2799                 skb = tx_buf->skb;
2800
2801                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2802                 prefetch(&skb->end);
2803
2804                 /* partial BD completions possible with TSO packets */
2805                 if (tx_buf->is_gso) {
2806                         u16 last_idx, last_ring_idx;
2807
2808                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2809                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2810                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2811                                 last_idx++;
2812                         }
2813                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2814                                 break;
2815                         }
2816                 }
2817
2818                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2819                         skb_headlen(skb), PCI_DMA_TODEVICE);
2820
2821                 tx_buf->skb = NULL;
2822                 last = tx_buf->nr_frags;
2823
2824                 for (i = 0; i < last; i++) {
2825                         sw_cons = NEXT_TX_BD(sw_cons);
2826
2827                         pci_unmap_page(bp->pdev,
2828                                 pci_unmap_addr(
2829                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2830                                         mapping),
2831                                 skb_shinfo(skb)->frags[i].size,
2832                                 PCI_DMA_TODEVICE);
2833                 }
2834
2835                 sw_cons = NEXT_TX_BD(sw_cons);
2836
2837                 dev_kfree_skb(skb);
2838                 tx_pkt++;
2839                 if (tx_pkt == budget)
2840                         break;
2841
2842                 if (hw_cons == sw_cons)
2843                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2844         }
2845
2846         txr->hw_tx_cons = hw_cons;
2847         txr->tx_cons = sw_cons;
2848
2849         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2850          * before checking for netif_tx_queue_stopped().  Without the
2851          * memory barrier, there is a small possibility that bnx2_start_xmit()
2852          * will miss it and cause the queue to be stopped forever.
2853          */
2854         smp_mb();
2855
2856         if (unlikely(netif_tx_queue_stopped(txq)) &&
2857                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2858                 __netif_tx_lock(txq, smp_processor_id());
2859                 if ((netif_tx_queue_stopped(txq)) &&
2860                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2861                         netif_tx_wake_queue(txq);
2862                 __netif_tx_unlock(txq);
2863         }
2864
2865         return tx_pkt;
2866 }
2867
2868 static void
2869 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2870                         struct sk_buff *skb, int count)
2871 {
2872         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2873         struct rx_bd *cons_bd, *prod_bd;
2874         int i;
2875         u16 hw_prod, prod;
2876         u16 cons = rxr->rx_pg_cons;
2877
2878         cons_rx_pg = &rxr->rx_pg_ring[cons];
2879
2880         /* The caller was unable to allocate a new page to replace the
2881          * last one in the frags array, so we need to recycle that page
2882          * and then free the skb.
2883          */
2884         if (skb) {
2885                 struct page *page;
2886                 struct skb_shared_info *shinfo;
2887
2888                 shinfo = skb_shinfo(skb);
2889                 shinfo->nr_frags--;
2890                 page = shinfo->frags[shinfo->nr_frags].page;
2891                 shinfo->frags[shinfo->nr_frags].page = NULL;
2892
2893                 cons_rx_pg->page = page;
2894                 dev_kfree_skb(skb);
2895         }
2896
2897         hw_prod = rxr->rx_pg_prod;
2898
2899         for (i = 0; i < count; i++) {
2900                 prod = RX_PG_RING_IDX(hw_prod);
2901
2902                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2903                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2904                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2905                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2906
2907                 if (prod != cons) {
2908                         prod_rx_pg->page = cons_rx_pg->page;
2909                         cons_rx_pg->page = NULL;
2910                         pci_unmap_addr_set(prod_rx_pg, mapping,
2911                                 pci_unmap_addr(cons_rx_pg, mapping));
2912
2913                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2914                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2915
2916                 }
2917                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2918                 hw_prod = NEXT_RX_BD(hw_prod);
2919         }
2920         rxr->rx_pg_prod = hw_prod;
2921         rxr->rx_pg_cons = cons;
2922 }
2923
2924 static inline void
2925 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2926                   struct sk_buff *skb, u16 cons, u16 prod)
2927 {
2928         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2929         struct rx_bd *cons_bd, *prod_bd;
2930
2931         cons_rx_buf = &rxr->rx_buf_ring[cons];
2932         prod_rx_buf = &rxr->rx_buf_ring[prod];
2933
2934         pci_dma_sync_single_for_device(bp->pdev,
2935                 pci_unmap_addr(cons_rx_buf, mapping),
2936                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2937
2938         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2939
2940         prod_rx_buf->skb = skb;
2941
2942         if (cons == prod)
2943                 return;
2944
2945         pci_unmap_addr_set(prod_rx_buf, mapping,
2946                         pci_unmap_addr(cons_rx_buf, mapping));
2947
2948         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2949         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2950         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2952 }
2953
2954 static int
2955 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2956             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2957             u32 ring_idx)
2958 {
2959         int err;
2960         u16 prod = ring_idx & 0xffff;
2961
2962         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2963         if (unlikely(err)) {
2964                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2965                 if (hdr_len) {
2966                         unsigned int raw_len = len + 4;
2967                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2968
2969                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2970                 }
2971                 return err;
2972         }
2973
2974         skb_reserve(skb, BNX2_RX_OFFSET);
2975         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2976                          PCI_DMA_FROMDEVICE);
2977
2978         if (hdr_len == 0) {
2979                 skb_put(skb, len);
2980                 return 0;
2981         } else {
2982                 unsigned int i, frag_len, frag_size, pages;
2983                 struct sw_pg *rx_pg;
2984                 u16 pg_cons = rxr->rx_pg_cons;
2985                 u16 pg_prod = rxr->rx_pg_prod;
2986
2987                 frag_size = len + 4 - hdr_len;
2988                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2989                 skb_put(skb, hdr_len);
2990
2991                 for (i = 0; i < pages; i++) {
2992                         dma_addr_t mapping_old;
2993
2994                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2995                         if (unlikely(frag_len <= 4)) {
2996                                 unsigned int tail = 4 - frag_len;
2997
2998                                 rxr->rx_pg_cons = pg_cons;
2999                                 rxr->rx_pg_prod = pg_prod;
3000                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3001                                                         pages - i);
3002                                 skb->len -= tail;
3003                                 if (i == 0) {
3004                                         skb->tail -= tail;
3005                                 } else {
3006                                         skb_frag_t *frag =
3007                                                 &skb_shinfo(skb)->frags[i - 1];
3008                                         frag->size -= tail;
3009                                         skb->data_len -= tail;
3010                                         skb->truesize -= tail;
3011                                 }
3012                                 return 0;
3013                         }
3014                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3015
3016                         /* Don't unmap yet.  If we're unable to allocate a new
3017                          * page, we need to recycle the page and the DMA addr.
3018                          */
3019                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3020                         if (i == pages - 1)
3021                                 frag_len -= 4;
3022
3023                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3024                         rx_pg->page = NULL;
3025
3026                         err = bnx2_alloc_rx_page(bp, rxr,
3027                                                  RX_PG_RING_IDX(pg_prod));
3028                         if (unlikely(err)) {
3029                                 rxr->rx_pg_cons = pg_cons;
3030                                 rxr->rx_pg_prod = pg_prod;
3031                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3032                                                         pages - i);
3033                                 return err;
3034                         }
3035
3036                         pci_unmap_page(bp->pdev, mapping_old,
3037                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3038
3039                         frag_size -= frag_len;
3040                         skb->data_len += frag_len;
3041                         skb->truesize += frag_len;
3042                         skb->len += frag_len;
3043
3044                         pg_prod = NEXT_RX_BD(pg_prod);
3045                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3046                 }
3047                 rxr->rx_pg_prod = pg_prod;
3048                 rxr->rx_pg_cons = pg_cons;
3049         }
3050         return 0;
3051 }
3052
3053 static inline u16
3054 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3055 {
3056         u16 cons;
3057
3058         /* Tell compiler that status block fields can change. */
3059         barrier();
3060         cons = *bnapi->hw_rx_cons_ptr;
3061         barrier();
3062         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3063                 cons++;
3064         return cons;
3065 }
3066
3067 static int
3068 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3069 {
3070         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3071         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3072         struct l2_fhdr *rx_hdr;
3073         int rx_pkt = 0, pg_ring_used = 0;
3074
3075         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3076         sw_cons = rxr->rx_cons;
3077         sw_prod = rxr->rx_prod;
3078
3079         /* Memory barrier necessary as speculative reads of the rx
3080          * buffer can be ahead of the index in the status block
3081          */
3082         rmb();
3083         while (sw_cons != hw_cons) {
3084                 unsigned int len, hdr_len;
3085                 u32 status;
3086                 struct sw_bd *rx_buf;
3087                 struct sk_buff *skb;
3088                 dma_addr_t dma_addr;
3089                 u16 vtag = 0;
3090                 int hw_vlan __maybe_unused = 0;
3091
3092                 sw_ring_cons = RX_RING_IDX(sw_cons);
3093                 sw_ring_prod = RX_RING_IDX(sw_prod);
3094
3095                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3096                 skb = rx_buf->skb;
3097
3098                 rx_buf->skb = NULL;
3099
3100                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3101
3102                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3103                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3104                         PCI_DMA_FROMDEVICE);
3105
3106                 rx_hdr = (struct l2_fhdr *) skb->data;
3107                 len = rx_hdr->l2_fhdr_pkt_len;
3108                 status = rx_hdr->l2_fhdr_status;
3109
3110                 hdr_len = 0;
3111                 if (status & L2_FHDR_STATUS_SPLIT) {
3112                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3113                         pg_ring_used = 1;
3114                 } else if (len > bp->rx_jumbo_thresh) {
3115                         hdr_len = bp->rx_jumbo_thresh;
3116                         pg_ring_used = 1;
3117                 }
3118
3119                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3120                                        L2_FHDR_ERRORS_PHY_DECODE |
3121                                        L2_FHDR_ERRORS_ALIGNMENT |
3122                                        L2_FHDR_ERRORS_TOO_SHORT |
3123                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3124
3125                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3126                                           sw_ring_prod);
3127                         if (pg_ring_used) {
3128                                 int pages;
3129
3130                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3131
3132                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3133                         }
3134                         goto next_rx;
3135                 }
3136
3137                 len -= 4;
3138
3139                 if (len <= bp->rx_copy_thresh) {
3140                         struct sk_buff *new_skb;
3141
3142                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3143                         if (new_skb == NULL) {
3144                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3145                                                   sw_ring_prod);
3146                                 goto next_rx;
3147                         }
3148
3149                         /* aligned copy */
3150                         skb_copy_from_linear_data_offset(skb,
3151                                                          BNX2_RX_OFFSET - 6,
3152                                       new_skb->data, len + 6);
3153                         skb_reserve(new_skb, 6);
3154                         skb_put(new_skb, len);
3155
3156                         bnx2_reuse_rx_skb(bp, rxr, skb,
3157                                 sw_ring_cons, sw_ring_prod);
3158
3159                         skb = new_skb;
3160                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3161                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3162                         goto next_rx;
3163
3164                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3165                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3166                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3167 #ifdef BCM_VLAN
3168                         if (bp->vlgrp)
3169                                 hw_vlan = 1;
3170                         else
3171 #endif
3172                         {
3173                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3174                                         __skb_push(skb, 4);
3175
3176                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3177                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3178                                 ve->h_vlan_TCI = htons(vtag);
3179                                 len += 4;
3180                         }
3181                 }
3182
3183                 skb->protocol = eth_type_trans(skb, bp->dev);
3184
3185                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3186                         (ntohs(skb->protocol) != 0x8100)) {
3187
3188                         dev_kfree_skb(skb);
3189                         goto next_rx;
3190
3191                 }
3192
3193                 skb->ip_summed = CHECKSUM_NONE;
3194                 if (bp->rx_csum &&
3195                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3196                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3197
3198                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3199                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3200                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3201                 }
3202
3203                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3204
3205 #ifdef BCM_VLAN
3206                 if (hw_vlan)
3207                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3208                 else
3209 #endif
3210                         netif_receive_skb(skb);
3211
3212                 rx_pkt++;
3213
3214 next_rx:
3215                 sw_cons = NEXT_RX_BD(sw_cons);
3216                 sw_prod = NEXT_RX_BD(sw_prod);
3217
3218                 if ((rx_pkt == budget))
3219                         break;
3220
3221                 /* Refresh hw_cons to see if there is new work */
3222                 if (sw_cons == hw_cons) {
3223                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3224                         rmb();
3225                 }
3226         }
3227         rxr->rx_cons = sw_cons;
3228         rxr->rx_prod = sw_prod;
3229
3230         if (pg_ring_used)
3231                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3232
3233         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3234
3235         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3236
3237         mmiowb();
3238
3239         return rx_pkt;
3240
3241 }
3242
3243 /* MSI ISR - The only difference between this and the INTx ISR
3244  * is that the MSI interrupt is always serviced.
3245  */
3246 static irqreturn_t
3247 bnx2_msi(int irq, void *dev_instance)
3248 {
3249         struct bnx2_napi *bnapi = dev_instance;
3250         struct bnx2 *bp = bnapi->bp;
3251
3252         prefetch(bnapi->status_blk.msi);
3253         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3254                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3255                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3256
3257         /* Return here if interrupt is disabled. */
3258         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3259                 return IRQ_HANDLED;
3260
3261         napi_schedule(&bnapi->napi);
3262
3263         return IRQ_HANDLED;
3264 }
3265
3266 static irqreturn_t
3267 bnx2_msi_1shot(int irq, void *dev_instance)
3268 {
3269         struct bnx2_napi *bnapi = dev_instance;
3270         struct bnx2 *bp = bnapi->bp;
3271
3272         prefetch(bnapi->status_blk.msi);
3273
3274         /* Return here if interrupt is disabled. */
3275         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3276                 return IRQ_HANDLED;
3277
3278         napi_schedule(&bnapi->napi);
3279
3280         return IRQ_HANDLED;
3281 }
3282
3283 static irqreturn_t
3284 bnx2_interrupt(int irq, void *dev_instance)
3285 {
3286         struct bnx2_napi *bnapi = dev_instance;
3287         struct bnx2 *bp = bnapi->bp;
3288         struct status_block *sblk = bnapi->status_blk.msi;
3289
3290         /* When using INTx, it is possible for the interrupt to arrive
3291          * at the CPU before the status block posted prior to the
3292          * interrupt. Reading a register will flush the status block.
3293          * When using MSI, the MSI message will always complete after
3294          * the status block write.
3295          */
3296         if ((sblk->status_idx == bnapi->last_status_idx) &&
3297             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3298              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3299                 return IRQ_NONE;
3300
3301         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3302                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3303                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3304
3305         /* Read back to deassert IRQ immediately to avoid too many
3306          * spurious interrupts.
3307          */
3308         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3309
3310         /* Return here if interrupt is shared and is disabled. */
3311         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3312                 return IRQ_HANDLED;
3313
3314         if (napi_schedule_prep(&bnapi->napi)) {
3315                 bnapi->last_status_idx = sblk->status_idx;
3316                 __napi_schedule(&bnapi->napi);
3317         }
3318
3319         return IRQ_HANDLED;
3320 }
3321
3322 static inline int
3323 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3324 {
3325         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3326         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3327
3328         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3329             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3330                 return 1;
3331         return 0;
3332 }
3333
3334 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3335                                  STATUS_ATTN_BITS_TIMER_ABORT)
3336
3337 static inline int
3338 bnx2_has_work(struct bnx2_napi *bnapi)
3339 {
3340         struct status_block *sblk = bnapi->status_blk.msi;
3341
3342         if (bnx2_has_fast_work(bnapi))
3343                 return 1;
3344
3345 #ifdef BCM_CNIC
3346         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3347                 return 1;
3348 #endif
3349
3350         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3351             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3352                 return 1;
3353
3354         return 0;
3355 }
3356
3357 static void
3358 bnx2_chk_missed_msi(struct bnx2 *bp)
3359 {
3360         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3361         u32 msi_ctrl;
3362
3363         if (bnx2_has_work(bnapi)) {
3364                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3365                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3366                         return;
3367
3368                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3369                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3370                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3371                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3372                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3373                 }
3374         }
3375
3376         bp->idle_chk_status_idx = bnapi->last_status_idx;
3377 }
3378
3379 #ifdef BCM_CNIC
3380 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3381 {
3382         struct cnic_ops *c_ops;
3383
3384         if (!bnapi->cnic_present)
3385                 return;
3386
3387         rcu_read_lock();
3388         c_ops = rcu_dereference(bp->cnic_ops);
3389         if (c_ops)
3390                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3391                                                       bnapi->status_blk.msi);
3392         rcu_read_unlock();
3393 }
3394 #endif
3395
3396 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3397 {
3398         struct status_block *sblk = bnapi->status_blk.msi;
3399         u32 status_attn_bits = sblk->status_attn_bits;
3400         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3401
3402         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3403             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3404
3405                 bnx2_phy_int(bp, bnapi);
3406
3407                 /* This is needed to take care of transient status
3408                  * during link changes.
3409                  */
3410                 REG_WR(bp, BNX2_HC_COMMAND,
3411                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3412                 REG_RD(bp, BNX2_HC_COMMAND);
3413         }
3414 }
3415
3416 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3417                           int work_done, int budget)
3418 {
3419         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3420         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3421
3422         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3423                 bnx2_tx_int(bp, bnapi, 0);
3424
3425         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3426                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3427
3428         return work_done;
3429 }
3430
3431 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3432 {
3433         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3434         struct bnx2 *bp = bnapi->bp;
3435         int work_done = 0;
3436         struct status_block_msix *sblk = bnapi->status_blk.msix;
3437
3438         while (1) {
3439                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3440                 if (unlikely(work_done >= budget))
3441                         break;
3442
3443                 bnapi->last_status_idx = sblk->status_idx;
3444                 /* status idx must be read before checking for more work. */
3445                 rmb();
3446                 if (likely(!bnx2_has_fast_work(bnapi))) {
3447
3448                         napi_complete(napi);
3449                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3450                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3451                                bnapi->last_status_idx);
3452                         break;
3453                 }
3454         }
3455         return work_done;
3456 }
3457
3458 static int bnx2_poll(struct napi_struct *napi, int budget)
3459 {
3460         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3461         struct bnx2 *bp = bnapi->bp;
3462         int work_done = 0;
3463         struct status_block *sblk = bnapi->status_blk.msi;
3464
3465         while (1) {
3466                 bnx2_poll_link(bp, bnapi);
3467
3468                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469
3470 #ifdef BCM_CNIC
3471                 bnx2_poll_cnic(bp, bnapi);
3472 #endif
3473
3474                 /* bnapi->last_status_idx is used below to tell the hw how
3475                  * much work has been processed, so we must read it before
3476                  * checking for more work.
3477                  */
3478                 bnapi->last_status_idx = sblk->status_idx;
3479
3480                 if (unlikely(work_done >= budget))
3481                         break;
3482
3483                 rmb();
3484                 if (likely(!bnx2_has_work(bnapi))) {
3485                         napi_complete(napi);
3486                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3487                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3488                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3489                                        bnapi->last_status_idx);
3490                                 break;
3491                         }
3492                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3493                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3494                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3495                                bnapi->last_status_idx);
3496
3497                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3498                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3499                                bnapi->last_status_idx);
3500                         break;
3501                 }
3502         }
3503
3504         return work_done;
3505 }
3506
3507 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3508  * from set_multicast.
3509  */
3510 static void
3511 bnx2_set_rx_mode(struct net_device *dev)
3512 {
3513         struct bnx2 *bp = netdev_priv(dev);
3514         u32 rx_mode, sort_mode;
3515         struct netdev_hw_addr *ha;
3516         int i;
3517
3518         if (!netif_running(dev))
3519                 return;
3520
3521         spin_lock_bh(&bp->phy_lock);
3522
3523         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3524                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3525         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3526 #ifdef BCM_VLAN
3527         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3528                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3529 #else
3530         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3531                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3532 #endif
3533         if (dev->flags & IFF_PROMISC) {
3534                 /* Promiscuous mode. */
3535                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3536                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3537                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3538         }
3539         else if (dev->flags & IFF_ALLMULTI) {
3540                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3541                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3542                                0xffffffff);
3543                 }
3544                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3545         }
3546         else {
3547                 /* Accept one or more multicast(s). */
3548                 struct dev_mc_list *mclist;
3549                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3550                 u32 regidx;
3551                 u32 bit;
3552                 u32 crc;
3553
3554                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3555
3556                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3557                      i++, mclist = mclist->next) {
3558
3559                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3560                         bit = crc & 0xff;
3561                         regidx = (bit & 0xe0) >> 5;
3562                         bit &= 0x1f;
3563                         mc_filter[regidx] |= (1 << bit);
3564                 }
3565
3566                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568                                mc_filter[i]);
3569                 }
3570
3571                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3572         }
3573
3574         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3575                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3576                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3577                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3578         } else if (!(dev->flags & IFF_PROMISC)) {
3579                 /* Add all entries into to the match filter list */
3580                 i = 0;
3581                 list_for_each_entry(ha, &dev->uc.list, list) {
3582                         bnx2_set_mac_addr(bp, ha->addr,
3583                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3584                         sort_mode |= (1 <<
3585                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3586                         i++;
3587                 }
3588
3589         }
3590
3591         if (rx_mode != bp->rx_mode) {
3592                 bp->rx_mode = rx_mode;
3593                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3594         }
3595
3596         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3597         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3598         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3599
3600         spin_unlock_bh(&bp->phy_lock);
3601 }
3602
3603 static int __devinit
3604 check_fw_section(const struct firmware *fw,
3605                  const struct bnx2_fw_file_section *section,
3606                  u32 alignment, bool non_empty)
3607 {
3608         u32 offset = be32_to_cpu(section->offset);
3609         u32 len = be32_to_cpu(section->len);
3610
3611         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3612                 return -EINVAL;
3613         if ((non_empty && len == 0) || len > fw->size - offset ||
3614             len & (alignment - 1))
3615                 return -EINVAL;
3616         return 0;
3617 }
3618
3619 static int __devinit
3620 check_mips_fw_entry(const struct firmware *fw,
3621                     const struct bnx2_mips_fw_file_entry *entry)
3622 {
3623         if (check_fw_section(fw, &entry->text, 4, true) ||
3624             check_fw_section(fw, &entry->data, 4, false) ||
3625             check_fw_section(fw, &entry->rodata, 4, false))
3626                 return -EINVAL;
3627         return 0;
3628 }
3629
3630 static int __devinit
3631 bnx2_request_firmware(struct bnx2 *bp)
3632 {
3633         const char *mips_fw_file, *rv2p_fw_file;
3634         const struct bnx2_mips_fw_file *mips_fw;
3635         const struct bnx2_rv2p_fw_file *rv2p_fw;
3636         int rc;
3637
3638         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639                 mips_fw_file = FW_MIPS_FILE_09;
3640                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3641                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3642                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3643                 else
3644                         rv2p_fw_file = FW_RV2P_FILE_09;
3645         } else {
3646                 mips_fw_file = FW_MIPS_FILE_06;
3647                 rv2p_fw_file = FW_RV2P_FILE_06;
3648         }
3649
3650         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3651         if (rc) {
3652                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3653                        mips_fw_file);
3654                 return rc;
3655         }
3656
3657         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3658         if (rc) {
3659                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3660                        rv2p_fw_file);
3661                 return rc;
3662         }
3663         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3664         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3665         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3666             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3667             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3668             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3669             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3670             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3671                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3672                        mips_fw_file);
3673                 return -EINVAL;
3674         }
3675         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3676             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3677             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3678                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3679                        rv2p_fw_file);
3680                 return -EINVAL;
3681         }
3682
3683         return 0;
3684 }
3685
3686 static u32
3687 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3688 {
3689         switch (idx) {
3690         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3691                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3692                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3693                 break;
3694         }
3695         return rv2p_code;
3696 }
3697
3698 static int
3699 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3700              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3701 {
3702         u32 rv2p_code_len, file_offset;
3703         __be32 *rv2p_code;
3704         int i;
3705         u32 val, cmd, addr;
3706
3707         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3708         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3709
3710         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3711
3712         if (rv2p_proc == RV2P_PROC1) {
3713                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3714                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3715         } else {
3716                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3717                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3718         }
3719
3720         for (i = 0; i < rv2p_code_len; i += 8) {
3721                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3722                 rv2p_code++;
3723                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3724                 rv2p_code++;
3725
3726                 val = (i / 8) | cmd;
3727                 REG_WR(bp, addr, val);
3728         }
3729
3730         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3731         for (i = 0; i < 8; i++) {
3732                 u32 loc, code;
3733
3734                 loc = be32_to_cpu(fw_entry->fixup[i]);
3735                 if (loc && ((loc * 4) < rv2p_code_len)) {
3736                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3737                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3738                         code = be32_to_cpu(*(rv2p_code + loc));
3739                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3740                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3741
3742                         val = (loc / 2) | cmd;
3743                         REG_WR(bp, addr, val);
3744                 }
3745         }
3746
3747         /* Reset the processor, un-stall is done later. */
3748         if (rv2p_proc == RV2P_PROC1) {
3749                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3750         }
3751         else {
3752                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3753         }
3754
3755         return 0;
3756 }
3757
3758 static int
3759 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3760             const struct bnx2_mips_fw_file_entry *fw_entry)
3761 {
3762         u32 addr, len, file_offset;
3763         __be32 *data;
3764         u32 offset;
3765         u32 val;
3766
3767         /* Halt the CPU. */
3768         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3769         val |= cpu_reg->mode_value_halt;
3770         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3771         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3772
3773         /* Load the Text area. */
3774         addr = be32_to_cpu(fw_entry->text.addr);
3775         len = be32_to_cpu(fw_entry->text.len);
3776         file_offset = be32_to_cpu(fw_entry->text.offset);
3777         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3778
3779         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3780         if (len) {
3781                 int j;
3782
3783                 for (j = 0; j < (len / 4); j++, offset += 4)
3784                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3785         }
3786
3787         /* Load the Data area. */
3788         addr = be32_to_cpu(fw_entry->data.addr);
3789         len = be32_to_cpu(fw_entry->data.len);
3790         file_offset = be32_to_cpu(fw_entry->data.offset);
3791         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3792
3793         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3794         if (len) {
3795                 int j;
3796
3797                 for (j = 0; j < (len / 4); j++, offset += 4)
3798                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3799         }
3800
3801         /* Load the Read-Only area. */
3802         addr = be32_to_cpu(fw_entry->rodata.addr);
3803         len = be32_to_cpu(fw_entry->rodata.len);
3804         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3805         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3806
3807         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3808         if (len) {
3809                 int j;
3810
3811                 for (j = 0; j < (len / 4); j++, offset += 4)
3812                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3813         }
3814
3815         /* Clear the pre-fetch instruction. */
3816         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3817
3818         val = be32_to_cpu(fw_entry->start_addr);
3819         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3820
3821         /* Start the CPU. */
3822         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3823         val &= ~cpu_reg->mode_value_halt;
3824         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3825         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3826
3827         return 0;
3828 }
3829
3830 static int
3831 bnx2_init_cpus(struct bnx2 *bp)
3832 {
3833         const struct bnx2_mips_fw_file *mips_fw =
3834                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3835         const struct bnx2_rv2p_fw_file *rv2p_fw =
3836                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3837         int rc;
3838
3839         /* Initialize the RV2P processor. */
3840         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3841         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3842
3843         /* Initialize the RX Processor. */
3844         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3845         if (rc)
3846                 goto init_cpu_err;
3847
3848         /* Initialize the TX Processor. */
3849         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3850         if (rc)
3851                 goto init_cpu_err;
3852
3853         /* Initialize the TX Patch-up Processor. */
3854         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3855         if (rc)
3856                 goto init_cpu_err;
3857
3858         /* Initialize the Completion Processor. */
3859         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3860         if (rc)
3861                 goto init_cpu_err;
3862
3863         /* Initialize the Command Processor. */
3864         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3865
3866 init_cpu_err:
3867         return rc;
3868 }
3869
3870 static int
3871 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3872 {
3873         u16 pmcsr;
3874
3875         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3876
3877         switch (state) {
3878         case PCI_D0: {
3879                 u32 val;
3880
3881                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3882                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3883                         PCI_PM_CTRL_PME_STATUS);
3884
3885                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3886                         /* delay required during transition out of D3hot */
3887                         msleep(20);
3888
3889                 val = REG_RD(bp, BNX2_EMAC_MODE);
3890                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3891                 val &= ~BNX2_EMAC_MODE_MPKT;
3892                 REG_WR(bp, BNX2_EMAC_MODE, val);
3893
3894                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3895                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3896                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3897                 break;
3898         }
3899         case PCI_D3hot: {
3900                 int i;
3901                 u32 val, wol_msg;
3902
3903                 if (bp->wol) {
3904                         u32 advertising;
3905                         u8 autoneg;
3906
3907                         autoneg = bp->autoneg;
3908                         advertising = bp->advertising;
3909
3910                         if (bp->phy_port == PORT_TP) {
3911                                 bp->autoneg = AUTONEG_SPEED;
3912                                 bp->advertising = ADVERTISED_10baseT_Half |
3913                                         ADVERTISED_10baseT_Full |
3914                                         ADVERTISED_100baseT_Half |
3915                                         ADVERTISED_100baseT_Full |
3916                                         ADVERTISED_Autoneg;
3917                         }
3918
3919                         spin_lock_bh(&bp->phy_lock);
3920                         bnx2_setup_phy(bp, bp->phy_port);
3921                         spin_unlock_bh(&bp->phy_lock);
3922
3923                         bp->autoneg = autoneg;
3924                         bp->advertising = advertising;
3925
3926                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3927
3928                         val = REG_RD(bp, BNX2_EMAC_MODE);
3929
3930                         /* Enable port mode. */
3931                         val &= ~BNX2_EMAC_MODE_PORT;
3932                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3933                                BNX2_EMAC_MODE_ACPI_RCVD |
3934                                BNX2_EMAC_MODE_MPKT;
3935                         if (bp->phy_port == PORT_TP)
3936                                 val |= BNX2_EMAC_MODE_PORT_MII;
3937                         else {
3938                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3939                                 if (bp->line_speed == SPEED_2500)
3940                                         val |= BNX2_EMAC_MODE_25G_MODE;
3941                         }
3942
3943                         REG_WR(bp, BNX2_EMAC_MODE, val);
3944
3945                         /* receive all multicast */
3946                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3947                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3948                                        0xffffffff);
3949                         }
3950                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3951                                BNX2_EMAC_RX_MODE_SORT_MODE);
3952
3953                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3954                               BNX2_RPM_SORT_USER0_MC_EN;
3955                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3956                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3957                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3958                                BNX2_RPM_SORT_USER0_ENA);
3959
3960                         /* Need to enable EMAC and RPM for WOL. */
3961                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3962                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3963                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3964                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3965
3966                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3967                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3968                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3969
3970                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3971                 }
3972                 else {
3973                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3974                 }
3975
3976                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3977                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3978                                      1, 0);
3979
3980                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3981                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3982                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3983
3984                         if (bp->wol)
3985                                 pmcsr |= 3;
3986                 }
3987                 else {
3988                         pmcsr |= 3;
3989                 }
3990                 if (bp->wol) {
3991                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3992                 }
3993                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3994                                       pmcsr);
3995
3996                 /* No more memory access after this point until
3997                  * device is brought back to D0.
3998                  */
3999                 udelay(50);
4000                 break;
4001         }
4002         default:
4003                 return -EINVAL;
4004         }
4005         return 0;
4006 }
4007
4008 static int
4009 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4010 {
4011         u32 val;
4012         int j;
4013
4014         /* Request access to the flash interface. */
4015         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4016         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4017                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4018                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4019                         break;
4020
4021                 udelay(5);
4022         }
4023
4024         if (j >= NVRAM_TIMEOUT_COUNT)
4025                 return -EBUSY;
4026
4027         return 0;
4028 }
4029
4030 static int
4031 bnx2_release_nvram_lock(struct bnx2 *bp)
4032 {
4033         int j;
4034         u32 val;
4035
4036         /* Relinquish nvram interface. */
4037         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4038
4039         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4040                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4041                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4042                         break;
4043
4044                 udelay(5);
4045         }
4046
4047         if (j >= NVRAM_TIMEOUT_COUNT)
4048                 return -EBUSY;
4049
4050         return 0;
4051 }
4052
4053
4054 static int
4055 bnx2_enable_nvram_write(struct bnx2 *bp)
4056 {
4057         u32 val;
4058
4059         val = REG_RD(bp, BNX2_MISC_CFG);
4060         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4061
4062         if (bp->flash_info->flags & BNX2_NV_WREN) {
4063                 int j;
4064
4065                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4066                 REG_WR(bp, BNX2_NVM_COMMAND,
4067                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4068
4069                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4070                         udelay(5);
4071
4072                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4073                         if (val & BNX2_NVM_COMMAND_DONE)
4074                                 break;
4075                 }
4076
4077                 if (j >= NVRAM_TIMEOUT_COUNT)
4078                         return -EBUSY;
4079         }
4080         return 0;
4081 }
4082
4083 static void
4084 bnx2_disable_nvram_write(struct bnx2 *bp)
4085 {
4086         u32 val;
4087
4088         val = REG_RD(bp, BNX2_MISC_CFG);
4089         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4090 }
4091
4092
4093 static void
4094 bnx2_enable_nvram_access(struct bnx2 *bp)
4095 {
4096         u32 val;
4097
4098         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4099         /* Enable both bits, even on read. */
4100         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4101                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4102 }
4103
4104 static void
4105 bnx2_disable_nvram_access(struct bnx2 *bp)
4106 {
4107         u32 val;
4108
4109         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4110         /* Disable both bits, even after read. */
4111         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4112                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4113                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4114 }
4115
4116 static int
4117 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4118 {
4119         u32 cmd;
4120         int j;
4121
4122         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4123                 /* Buffered flash, no erase needed */
4124                 return 0;
4125
4126         /* Build an erase command */
4127         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4128               BNX2_NVM_COMMAND_DOIT;
4129
4130         /* Need to clear DONE bit separately. */
4131         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4132
4133         /* Address of the NVRAM to read from. */
4134         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4135
4136         /* Issue an erase command. */
4137         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4138
4139         /* Wait for completion. */
4140         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4141                 u32 val;
4142
4143                 udelay(5);
4144
4145                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4146                 if (val & BNX2_NVM_COMMAND_DONE)
4147                         break;
4148         }
4149
4150         if (j >= NVRAM_TIMEOUT_COUNT)
4151                 return -EBUSY;
4152
4153         return 0;
4154 }
4155
4156 static int
4157 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4158 {
4159         u32 cmd;
4160         int j;
4161
4162         /* Build the command word. */
4163         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4164
4165         /* Calculate an offset of a buffered flash, not needed for 5709. */
4166         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4167                 offset = ((offset / bp->flash_info->page_size) <<
4168                            bp->flash_info->page_bits) +
4169                           (offset % bp->flash_info->page_size);
4170         }
4171
4172         /* Need to clear DONE bit separately. */
4173         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4174
4175         /* Address of the NVRAM to read from. */
4176         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4177
4178         /* Issue a read command. */
4179         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4180
4181         /* Wait for completion. */
4182         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4183                 u32 val;
4184
4185                 udelay(5);
4186
4187                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4188                 if (val & BNX2_NVM_COMMAND_DONE) {
4189                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4190                         memcpy(ret_val, &v, 4);
4191                         break;
4192                 }
4193         }
4194         if (j >= NVRAM_TIMEOUT_COUNT)
4195                 return -EBUSY;
4196
4197         return 0;
4198 }
4199
4200
4201 static int
4202 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4203 {
4204         u32 cmd;
4205         __be32 val32;
4206         int j;
4207
4208         /* Build the command word. */
4209         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4210
4211         /* Calculate an offset of a buffered flash, not needed for 5709. */
4212         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4213                 offset = ((offset / bp->flash_info->page_size) <<
4214                           bp->flash_info->page_bits) +
4215                          (offset % bp->flash_info->page_size);
4216         }
4217
4218         /* Need to clear DONE bit separately. */
4219         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4220
4221         memcpy(&val32, val, 4);
4222
4223         /* Write the data. */
4224         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4225
4226         /* Address of the NVRAM to write to. */
4227         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4228
4229         /* Issue the write command. */
4230         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4231
4232         /* Wait for completion. */
4233         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4234                 udelay(5);
4235
4236                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4237                         break;
4238         }
4239         if (j >= NVRAM_TIMEOUT_COUNT)
4240                 return -EBUSY;
4241
4242         return 0;
4243 }
4244
4245 static int
4246 bnx2_init_nvram(struct bnx2 *bp)
4247 {
4248         u32 val;
4249         int j, entry_count, rc = 0;
4250         const struct flash_spec *flash;
4251
4252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4253                 bp->flash_info = &flash_5709;
4254                 goto get_flash_size;
4255         }
4256
4257         /* Determine the selected interface. */
4258         val = REG_RD(bp, BNX2_NVM_CFG1);
4259
4260         entry_count = ARRAY_SIZE(flash_table);
4261
4262         if (val & 0x40000000) {
4263
4264                 /* Flash interface has been reconfigured */
4265                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4266                      j++, flash++) {
4267                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4268                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4269                                 bp->flash_info = flash;
4270                                 break;
4271                         }
4272                 }
4273         }
4274         else {
4275                 u32 mask;
4276                 /* Not yet been reconfigured */
4277
4278                 if (val & (1 << 23))
4279                         mask = FLASH_BACKUP_STRAP_MASK;
4280                 else
4281                         mask = FLASH_STRAP_MASK;
4282
4283                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4284                         j++, flash++) {
4285
4286                         if ((val & mask) == (flash->strapping & mask)) {
4287                                 bp->flash_info = flash;
4288
4289                                 /* Request access to the flash interface. */
4290                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4291                                         return rc;
4292
4293                                 /* Enable access to flash interface */
4294                                 bnx2_enable_nvram_access(bp);
4295
4296                                 /* Reconfigure the flash interface */
4297                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4298                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4299                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4300                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4301
4302                                 /* Disable access to flash interface */
4303                                 bnx2_disable_nvram_access(bp);
4304                                 bnx2_release_nvram_lock(bp);
4305
4306                                 break;
4307                         }
4308                 }
4309         } /* if (val & 0x40000000) */
4310
4311         if (j == entry_count) {
4312                 bp->flash_info = NULL;
4313                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4314                 return -ENODEV;
4315         }
4316
4317 get_flash_size:
4318         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4319         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4320         if (val)
4321                 bp->flash_size = val;
4322         else
4323                 bp->flash_size = bp->flash_info->total_size;
4324
4325         return rc;
4326 }
4327
4328 static int
4329 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4330                 int buf_size)
4331 {
4332         int rc = 0;
4333         u32 cmd_flags, offset32, len32, extra;
4334
4335         if (buf_size == 0)
4336                 return 0;
4337
4338         /* Request access to the flash interface. */
4339         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4340                 return rc;
4341
4342         /* Enable access to flash interface */
4343         bnx2_enable_nvram_access(bp);
4344
4345         len32 = buf_size;
4346         offset32 = offset;
4347         extra = 0;
4348
4349         cmd_flags = 0;
4350
4351         if (offset32 & 3) {
4352                 u8 buf[4];
4353                 u32 pre_len;
4354
4355                 offset32 &= ~3;
4356                 pre_len = 4 - (offset & 3);
4357
4358                 if (pre_len >= len32) {
4359                         pre_len = len32;
4360                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4361                                     BNX2_NVM_COMMAND_LAST;
4362                 }
4363                 else {
4364                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4365                 }
4366
4367                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4368
4369                 if (rc)
4370                         return rc;
4371
4372                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4373
4374                 offset32 += 4;
4375                 ret_buf += pre_len;
4376                 len32 -= pre_len;
4377         }
4378         if (len32 & 3) {
4379                 extra = 4 - (len32 & 3);
4380                 len32 = (len32 + 4) & ~3;
4381         }
4382
4383         if (len32 == 4) {
4384                 u8 buf[4];
4385
4386                 if (cmd_flags)
4387                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4388                 else
4389                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4390                                     BNX2_NVM_COMMAND_LAST;
4391
4392                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393
4394                 memcpy(ret_buf, buf, 4 - extra);
4395         }
4396         else if (len32 > 0) {
4397                 u8 buf[4];
4398
4399                 /* Read the first word. */
4400                 if (cmd_flags)
4401                         cmd_flags = 0;
4402                 else
4403                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4404
4405                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4406
4407                 /* Advance to the next dword. */
4408                 offset32 += 4;
4409                 ret_buf += 4;
4410                 len32 -= 4;
4411
4412                 while (len32 > 4 && rc == 0) {
4413                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4414
4415                         /* Advance to the next dword. */
4416                         offset32 += 4;
4417                         ret_buf += 4;
4418                         len32 -= 4;
4419                 }
4420
4421                 if (rc)
4422                         return rc;
4423
4424                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4425                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4426
4427                 memcpy(ret_buf, buf, 4 - extra);
4428         }
4429
4430         /* Disable access to flash interface */
4431         bnx2_disable_nvram_access(bp);
4432
4433         bnx2_release_nvram_lock(bp);
4434
4435         return rc;
4436 }
4437
4438 static int
4439 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4440                 int buf_size)
4441 {
4442         u32 written, offset32, len32;
4443         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4444         int rc = 0;
4445         int align_start, align_end;
4446
4447         buf = data_buf;
4448         offset32 = offset;
4449         len32 = buf_size;
4450         align_start = align_end = 0;
4451
4452         if ((align_start = (offset32 & 3))) {
4453                 offset32 &= ~3;
4454                 len32 += align_start;
4455                 if (len32 < 4)
4456                         len32 = 4;
4457                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4458                         return rc;
4459         }
4460
4461         if (len32 & 3) {
4462                 align_end = 4 - (len32 & 3);
4463                 len32 += align_end;
4464                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4465                         return rc;
4466         }
4467
4468         if (align_start || align_end) {
4469                 align_buf = kmalloc(len32, GFP_KERNEL);
4470                 if (align_buf == NULL)
4471                         return -ENOMEM;
4472                 if (align_start) {
4473                         memcpy(align_buf, start, 4);
4474                 }
4475                 if (align_end) {
4476                         memcpy(align_buf + len32 - 4, end, 4);
4477                 }
4478                 memcpy(align_buf + align_start, data_buf, buf_size);
4479                 buf = align_buf;
4480         }
4481
4482         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4483                 flash_buffer = kmalloc(264, GFP_KERNEL);
4484                 if (flash_buffer == NULL) {
4485                         rc = -ENOMEM;
4486                         goto nvram_write_end;
4487                 }
4488         }
4489
4490         written = 0;
4491         while ((written < len32) && (rc == 0)) {
4492                 u32 page_start, page_end, data_start, data_end;
4493                 u32 addr, cmd_flags;
4494                 int i;
4495
4496                 /* Find the page_start addr */
4497                 page_start = offset32 + written;
4498                 page_start -= (page_start % bp->flash_info->page_size);
4499                 /* Find the page_end addr */
4500                 page_end = page_start + bp->flash_info->page_size;
4501                 /* Find the data_start addr */
4502                 data_start = (written == 0) ? offset32 : page_start;
4503                 /* Find the data_end addr */
4504                 data_end = (page_end > offset32 + len32) ?
4505                         (offset32 + len32) : page_end;
4506
4507                 /* Request access to the flash interface. */
4508                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4509                         goto nvram_write_end;
4510
4511                 /* Enable access to flash interface */
4512                 bnx2_enable_nvram_access(bp);
4513
4514                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4515                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4516                         int j;
4517
4518                         /* Read the whole page into the buffer
4519                          * (non-buffer flash only) */
4520                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4521                                 if (j == (bp->flash_info->page_size - 4)) {
4522                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4523                                 }
4524                                 rc = bnx2_nvram_read_dword(bp,
4525                                         page_start + j,
4526                                         &flash_buffer[j],
4527                                         cmd_flags);
4528
4529                                 if (rc)
4530                                         goto nvram_write_end;
4531
4532                                 cmd_flags = 0;
4533                         }
4534                 }
4535
4536                 /* Enable writes to flash interface (unlock write-protect) */
4537                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4538                         goto nvram_write_end;
4539
4540                 /* Loop to write back the buffer data from page_start to
4541                  * data_start */
4542                 i = 0;
4543                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4544                         /* Erase the page */
4545                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4546                                 goto nvram_write_end;
4547
4548                         /* Re-enable the write again for the actual write */
4549                         bnx2_enable_nvram_write(bp);
4550
4551                         for (addr = page_start; addr < data_start;
4552                                 addr += 4, i += 4) {
4553
4554                                 rc = bnx2_nvram_write_dword(bp, addr,
4555                                         &flash_buffer[i], cmd_flags);
4556
4557                                 if (rc != 0)
4558                                         goto nvram_write_end;
4559
4560                                 cmd_flags = 0;
4561                         }
4562                 }
4563
4564                 /* Loop to write the new data from data_start to data_end */
4565                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4566                         if ((addr == page_end - 4) ||
4567                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4568                                  (addr == data_end - 4))) {
4569
4570                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4571                         }
4572                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4573                                 cmd_flags);
4574
4575                         if (rc != 0)
4576                                 goto nvram_write_end;
4577
4578                         cmd_flags = 0;
4579                         buf += 4;
4580                 }
4581
4582                 /* Loop to write back the buffer data from data_end
4583                  * to page_end */
4584                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4585                         for (addr = data_end; addr < page_end;
4586                                 addr += 4, i += 4) {
4587
4588                                 if (addr == page_end-4) {
4589                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4590                                 }
4591                                 rc = bnx2_nvram_write_dword(bp, addr,
4592                                         &flash_buffer[i], cmd_flags);
4593
4594                                 if (rc != 0)
4595                                         goto nvram_write_end;
4596
4597                                 cmd_flags = 0;
4598                         }
4599                 }
4600
4601                 /* Disable writes to flash interface (lock write-protect) */
4602                 bnx2_disable_nvram_write(bp);
4603
4604                 /* Disable access to flash interface */
4605                 bnx2_disable_nvram_access(bp);
4606                 bnx2_release_nvram_lock(bp);
4607
4608                 /* Increment written */
4609                 written += data_end - data_start;
4610         }
4611
4612 nvram_write_end:
4613         kfree(flash_buffer);
4614         kfree(align_buf);
4615         return rc;
4616 }
4617
4618 static void
4619 bnx2_init_fw_cap(struct bnx2 *bp)
4620 {
4621         u32 val, sig = 0;
4622
4623         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4624         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4625
4626         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4627                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4628
4629         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4630         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4631                 return;
4632
4633         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4634                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4635                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4636         }
4637
4638         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4639             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4640                 u32 link;
4641
4642                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4643
4644                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4645                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4646                         bp->phy_port = PORT_FIBRE;
4647                 else
4648                         bp->phy_port = PORT_TP;
4649
4650                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4651                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4652         }
4653
4654         if (netif_running(bp->dev) && sig)
4655                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4656 }
4657
4658 static void
4659 bnx2_setup_msix_tbl(struct bnx2 *bp)
4660 {
4661         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4662
4663         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4664         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4665 }
4666
4667 static int
4668 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4669 {
4670         u32 val;
4671         int i, rc = 0;
4672         u8 old_port;
4673
4674         /* Wait for the current PCI transaction to complete before
4675          * issuing a reset. */
4676         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4677                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4678                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4679                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4680                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4681         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4682         udelay(5);
4683
4684         /* Wait for the firmware to tell us it is ok to issue a reset. */
4685         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4686
4687         /* Deposit a driver reset signature so the firmware knows that
4688          * this is a soft reset. */
4689         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4690                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4691
4692         /* Do a dummy read to force the chip to complete all current transaction
4693          * before we issue a reset. */
4694         val = REG_RD(bp, BNX2_MISC_ID);
4695
4696         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4697                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4698                 REG_RD(bp, BNX2_MISC_COMMAND);
4699                 udelay(5);
4700
4701                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4702                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4703
4704                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4705
4706         } else {
4707                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4708                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4709                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4710
4711                 /* Chip reset. */
4712                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4713
4714                 /* Reading back any register after chip reset will hang the
4715                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4716                  * of margin for write posting.
4717                  */
4718                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4719                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4720                         msleep(20);
4721
4722                 /* Reset takes approximate 30 usec */
4723                 for (i = 0; i < 10; i++) {
4724                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4725                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4726                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4727                                 break;
4728                         udelay(10);
4729                 }
4730
4731                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4733                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4734                         return -EBUSY;
4735                 }
4736         }
4737
4738         /* Make sure byte swapping is properly configured. */
4739         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4740         if (val != 0x01020304) {
4741                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4742                 return -ENODEV;
4743         }
4744
4745         /* Wait for the firmware to finish its initialization. */
4746         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4747         if (rc)
4748                 return rc;
4749
4750         spin_lock_bh(&bp->phy_lock);
4751         old_port = bp->phy_port;
4752         bnx2_init_fw_cap(bp);
4753         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4754             old_port != bp->phy_port)
4755                 bnx2_set_default_remote_link(bp);
4756         spin_unlock_bh(&bp->phy_lock);
4757
4758         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4759                 /* Adjust the voltage regular to two steps lower.  The default
4760                  * of this register is 0x0000000e. */
4761                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4762
4763                 /* Remove bad rbuf memory from the free pool. */
4764                 rc = bnx2_alloc_bad_rbuf(bp);
4765         }
4766
4767         if (bp->flags & BNX2_FLAG_USING_MSIX)
4768                 bnx2_setup_msix_tbl(bp);
4769
4770         return rc;
4771 }
4772
4773 static int
4774 bnx2_init_chip(struct bnx2 *bp)
4775 {
4776         u32 val, mtu;
4777         int rc, i;
4778
4779         /* Make sure the interrupt is not active. */
4780         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4781
4782         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4783               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4784 #ifdef __BIG_ENDIAN
4785               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4786 #endif
4787               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4788               DMA_READ_CHANS << 12 |
4789               DMA_WRITE_CHANS << 16;
4790
4791         val |= (0x2 << 20) | (1 << 11);
4792
4793         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4794                 val |= (1 << 23);
4795
4796         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4797             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4798                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4799
4800         REG_WR(bp, BNX2_DMA_CONFIG, val);
4801
4802         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4803                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4804                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4805                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4806         }
4807
4808         if (bp->flags & BNX2_FLAG_PCIX) {
4809                 u16 val16;
4810
4811                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4812                                      &val16);
4813                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4814                                       val16 & ~PCI_X_CMD_ERO);
4815         }
4816
4817         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4818                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4819                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4820                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4821
4822         /* Initialize context mapping and zero out the quick contexts.  The
4823          * context block must have already been enabled. */
4824         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4825                 rc = bnx2_init_5709_context(bp);
4826                 if (rc)
4827                         return rc;
4828         } else
4829                 bnx2_init_context(bp);
4830
4831         if ((rc = bnx2_init_cpus(bp)) != 0)
4832                 return rc;
4833
4834         bnx2_init_nvram(bp);
4835
4836         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4837
4838         val = REG_RD(bp, BNX2_MQ_CONFIG);
4839         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4840         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4841         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4842                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4843                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4844                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4845         }
4846
4847         REG_WR(bp, BNX2_MQ_CONFIG, val);
4848
4849         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4850         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4851         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4852
4853         val = (BCM_PAGE_BITS - 8) << 24;
4854         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4855
4856         /* Configure page size. */
4857         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4858         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4859         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4860         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4861
4862         val = bp->mac_addr[0] +
4863               (bp->mac_addr[1] << 8) +
4864               (bp->mac_addr[2] << 16) +
4865               bp->mac_addr[3] +
4866               (bp->mac_addr[4] << 8) +
4867               (bp->mac_addr[5] << 16);
4868         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4869
4870         /* Program the MTU.  Also include 4 bytes for CRC32. */
4871         mtu = bp->dev->mtu;
4872         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4873         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4874                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4875         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4876
4877         if (mtu < 1500)
4878                 mtu = 1500;
4879
4880         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4881         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4882         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4883
4884         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4885         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4886                 bp->bnx2_napi[i].last_status_idx = 0;
4887
4888         bp->idle_chk_status_idx = 0xffff;
4889
4890         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4891
4892         /* Set up how to generate a link change interrupt. */
4893         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4894
4895         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4896                (u64) bp->status_blk_mapping & 0xffffffff);
4897         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4898
4899         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4900                (u64) bp->stats_blk_mapping & 0xffffffff);
4901         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4902                (u64) bp->stats_blk_mapping >> 32);
4903
4904         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4905                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4906
4907         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4908                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4909
4910         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4911                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4912
4913         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4914
4915         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4916
4917         REG_WR(bp, BNX2_HC_COM_TICKS,
4918                (bp->com_ticks_int << 16) | bp->com_ticks);
4919
4920         REG_WR(bp, BNX2_HC_CMD_TICKS,
4921                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4922
4923         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4924                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4925         else
4926                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4927         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4928
4929         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4930                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4931         else {
4932                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4933                       BNX2_HC_CONFIG_COLLECT_STATS;
4934         }
4935
4936         if (bp->irq_nvecs > 1) {
4937                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4938                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4939
4940                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4941         }
4942
4943         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4944                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4945
4946         REG_WR(bp, BNX2_HC_CONFIG, val);
4947
4948         for (i = 1; i < bp->irq_nvecs; i++) {
4949                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4950                            BNX2_HC_SB_CONFIG_1;
4951
4952                 REG_WR(bp, base,
4953                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4954                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4955                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4956
4957                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4958                         (bp->tx_quick_cons_trip_int << 16) |
4959                          bp->tx_quick_cons_trip);
4960
4961                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4962                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4963
4964                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4965                        (bp->rx_quick_cons_trip_int << 16) |
4966                         bp->rx_quick_cons_trip);
4967
4968                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4969                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4970         }
4971
4972         /* Clear internal stats counters. */
4973         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4974
4975         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4976
4977         /* Initialize the receive filter. */
4978         bnx2_set_rx_mode(bp->dev);
4979
4980         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4981                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4982                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4983                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4984         }
4985         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4986                           1, 0);
4987
4988         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4989         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4990
4991         udelay(20);
4992
4993         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4994
4995         return rc;
4996 }
4997
4998 static void
4999 bnx2_clear_ring_states(struct bnx2 *bp)
5000 {
5001         struct bnx2_napi *bnapi;
5002         struct bnx2_tx_ring_info *txr;
5003         struct bnx2_rx_ring_info *rxr;
5004         int i;
5005
5006         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5007                 bnapi = &bp->bnx2_napi[i];
5008                 txr = &bnapi->tx_ring;
5009                 rxr = &bnapi->rx_ring;
5010
5011                 txr->tx_cons = 0;
5012                 txr->hw_tx_cons = 0;
5013                 rxr->rx_prod_bseq = 0;
5014                 rxr->rx_prod = 0;
5015                 rxr->rx_cons = 0;
5016                 rxr->rx_pg_prod = 0;
5017                 rxr->rx_pg_cons = 0;
5018         }
5019 }
5020
5021 static void
5022 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5023 {
5024         u32 val, offset0, offset1, offset2, offset3;
5025         u32 cid_addr = GET_CID_ADDR(cid);
5026
5027         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5028                 offset0 = BNX2_L2CTX_TYPE_XI;
5029                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5030                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5031                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5032         } else {
5033                 offset0 = BNX2_L2CTX_TYPE;
5034                 offset1 = BNX2_L2CTX_CMD_TYPE;
5035                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5036                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5037         }
5038         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5039         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5040
5041         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5042         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5043
5044         val = (u64) txr->tx_desc_mapping >> 32;
5045         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5046
5047         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5048         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5049 }
5050
5051 static void
5052 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5053 {
5054         struct tx_bd *txbd;
5055         u32 cid = TX_CID;
5056         struct bnx2_napi *bnapi;
5057         struct bnx2_tx_ring_info *txr;
5058
5059         bnapi = &bp->bnx2_napi[ring_num];
5060         txr = &bnapi->tx_ring;
5061
5062         if (ring_num == 0)
5063                 cid = TX_CID;
5064         else
5065                 cid = TX_TSS_CID + ring_num - 1;
5066
5067         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5068
5069         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5070
5071         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5072         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5073
5074         txr->tx_prod = 0;
5075         txr->tx_prod_bseq = 0;
5076
5077         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5078         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5079
5080         bnx2_init_tx_context(bp, cid, txr);
5081 }
5082
5083 static void
5084 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5085                      int num_rings)
5086 {
5087         int i;
5088         struct rx_bd *rxbd;
5089
5090         for (i = 0; i < num_rings; i++) {
5091                 int j;
5092
5093                 rxbd = &rx_ring[i][0];
5094                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5095                         rxbd->rx_bd_len = buf_size;
5096                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5097                 }
5098                 if (i == (num_rings - 1))
5099                         j = 0;
5100                 else
5101                         j = i + 1;
5102                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5103                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5104         }
5105 }
5106
5107 static void
5108 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5109 {
5110         int i;
5111         u16 prod, ring_prod;
5112         u32 cid, rx_cid_addr, val;
5113         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5115
5116         if (ring_num == 0)
5117                 cid = RX_CID;
5118         else
5119                 cid = RX_RSS_CID + ring_num - 1;
5120
5121         rx_cid_addr = GET_CID_ADDR(cid);
5122
5123         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5124                              bp->rx_buf_use_size, bp->rx_max_ring);
5125
5126         bnx2_init_rx_context(bp, cid);
5127
5128         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5129                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5130                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5131         }
5132
5133         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5134         if (bp->rx_pg_ring_size) {
5135                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5136                                      rxr->rx_pg_desc_mapping,
5137                                      PAGE_SIZE, bp->rx_max_pg_ring);
5138                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5141                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5142
5143                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5144                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5145
5146                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5147                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5148
5149                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5150                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5151         }
5152
5153         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5154         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5155
5156         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5157         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5158
5159         ring_prod = prod = rxr->rx_pg_prod;
5160         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5161                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5162                         break;
5163                 prod = NEXT_RX_BD(prod);
5164                 ring_prod = RX_PG_RING_IDX(prod);
5165         }
5166         rxr->rx_pg_prod = prod;
5167
5168         ring_prod = prod = rxr->rx_prod;
5169         for (i = 0; i < bp->rx_ring_size; i++) {
5170                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5171                         break;
5172                 prod = NEXT_RX_BD(prod);
5173                 ring_prod = RX_RING_IDX(prod);
5174         }
5175         rxr->rx_prod = prod;
5176
5177         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5178         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5179         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5180
5181         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5182         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5183
5184         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5185 }
5186
5187 static void
5188 bnx2_init_all_rings(struct bnx2 *bp)
5189 {
5190         int i;
5191         u32 val;
5192
5193         bnx2_clear_ring_states(bp);
5194
5195         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5196         for (i = 0; i < bp->num_tx_rings; i++)
5197                 bnx2_init_tx_ring(bp, i);
5198
5199         if (bp->num_tx_rings > 1)
5200                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5201                        (TX_TSS_CID << 7));
5202
5203         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5204         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5205
5206         for (i = 0; i < bp->num_rx_rings; i++)
5207                 bnx2_init_rx_ring(bp, i);
5208
5209         if (bp->num_rx_rings > 1) {
5210                 u32 tbl_32;
5211                 u8 *tbl = (u8 *) &tbl_32;
5212
5213                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5214                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5215
5216                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5217                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5218                         if ((i % 4) == 3)
5219                                 bnx2_reg_wr_ind(bp,
5220                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5221                                                 cpu_to_be32(tbl_32));
5222                 }
5223
5224                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5225                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5226
5227                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5228
5229         }
5230 }
5231
5232 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5233 {
5234         u32 max, num_rings = 1;
5235
5236         while (ring_size > MAX_RX_DESC_CNT) {
5237                 ring_size -= MAX_RX_DESC_CNT;
5238                 num_rings++;
5239         }
5240         /* round to next power of 2 */
5241         max = max_size;
5242         while ((max & num_rings) == 0)
5243                 max >>= 1;
5244
5245         if (num_rings != max)
5246                 max <<= 1;
5247
5248         return max;
5249 }
5250
5251 static void
5252 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5253 {
5254         u32 rx_size, rx_space, jumbo_size;
5255
5256         /* 8 for CRC and VLAN */
5257         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5258
5259         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5260                 sizeof(struct skb_shared_info);
5261
5262         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5263         bp->rx_pg_ring_size = 0;
5264         bp->rx_max_pg_ring = 0;
5265         bp->rx_max_pg_ring_idx = 0;
5266         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5267                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5268
5269                 jumbo_size = size * pages;
5270                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5271                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5272
5273                 bp->rx_pg_ring_size = jumbo_size;
5274                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5275                                                         MAX_RX_PG_RINGS);
5276                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5277                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5278                 bp->rx_copy_thresh = 0;
5279         }
5280
5281         bp->rx_buf_use_size = rx_size;
5282         /* hw alignment */
5283         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5284         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5285         bp->rx_ring_size = size;
5286         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5287         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5288 }
5289
5290 static void
5291 bnx2_free_tx_skbs(struct bnx2 *bp)
5292 {
5293         int i;
5294
5295         for (i = 0; i < bp->num_tx_rings; i++) {
5296                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5297                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5298                 int j;
5299
5300                 if (txr->tx_buf_ring == NULL)
5301                         continue;
5302
5303                 for (j = 0; j < TX_DESC_CNT; ) {
5304                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5305                         struct sk_buff *skb = tx_buf->skb;
5306                         int k, last;
5307
5308                         if (skb == NULL) {
5309                                 j++;
5310                                 continue;
5311                         }
5312
5313                         pci_unmap_single(bp->pdev,
5314                                          pci_unmap_addr(tx_buf, mapping),
5315                                          skb_headlen(skb),
5316                                          PCI_DMA_TODEVICE);
5317
5318                         tx_buf->skb = NULL;
5319
5320                         last = tx_buf->nr_frags;
5321                         j++;
5322                         for (k = 0; k < last; k++, j++) {
5323                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5324                                 pci_unmap_page(bp->pdev,
5325                                         pci_unmap_addr(tx_buf, mapping),
5326                                         skb_shinfo(skb)->frags[k].size,
5327                                         PCI_DMA_TODEVICE);
5328                         }
5329                         dev_kfree_skb(skb);
5330                 }
5331         }
5332 }
5333
5334 static void
5335 bnx2_free_rx_skbs(struct bnx2 *bp)
5336 {
5337         int i;
5338
5339         for (i = 0; i < bp->num_rx_rings; i++) {
5340                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5341                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5342                 int j;
5343
5344                 if (rxr->rx_buf_ring == NULL)
5345                         return;
5346
5347                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5348                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5349                         struct sk_buff *skb = rx_buf->skb;
5350
5351                         if (skb == NULL)
5352                                 continue;
5353
5354                         pci_unmap_single(bp->pdev,
5355                                          pci_unmap_addr(rx_buf, mapping),
5356                                          bp->rx_buf_use_size,
5357                                          PCI_DMA_FROMDEVICE);
5358
5359                         rx_buf->skb = NULL;
5360
5361                         dev_kfree_skb(skb);
5362                 }
5363                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5364                         bnx2_free_rx_page(bp, rxr, j);
5365         }
5366 }
5367
5368 static void
5369 bnx2_free_skbs(struct bnx2 *bp)
5370 {
5371         bnx2_free_tx_skbs(bp);
5372         bnx2_free_rx_skbs(bp);
5373 }
5374
5375 static int
5376 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5377 {
5378         int rc;
5379
5380         rc = bnx2_reset_chip(bp, reset_code);
5381         bnx2_free_skbs(bp);
5382         if (rc)
5383                 return rc;
5384
5385         if ((rc = bnx2_init_chip(bp)) != 0)
5386                 return rc;
5387
5388         bnx2_init_all_rings(bp);
5389         return 0;
5390 }
5391
5392 static int
5393 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5394 {
5395         int rc;
5396
5397         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5398                 return rc;
5399
5400         spin_lock_bh(&bp->phy_lock);
5401         bnx2_init_phy(bp, reset_phy);
5402         bnx2_set_link(bp);
5403         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5404                 bnx2_remote_phy_event(bp);
5405         spin_unlock_bh(&bp->phy_lock);
5406         return 0;
5407 }
5408
5409 static int
5410 bnx2_shutdown_chip(struct bnx2 *bp)
5411 {
5412         u32 reset_code;
5413
5414         if (bp->flags & BNX2_FLAG_NO_WOL)
5415                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5416         else if (bp->wol)
5417                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5418         else
5419                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5420
5421         return bnx2_reset_chip(bp, reset_code);
5422 }
5423
5424 static int
5425 bnx2_test_registers(struct bnx2 *bp)
5426 {
5427         int ret;
5428         int i, is_5709;
5429         static const struct {
5430                 u16   offset;
5431                 u16   flags;
5432 #define BNX2_FL_NOT_5709        1
5433                 u32   rw_mask;
5434                 u32   ro_mask;
5435         } reg_tbl[] = {
5436                 { 0x006c, 0, 0x00000000, 0x0000003f },
5437                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5438                 { 0x0094, 0, 0x00000000, 0x00000000 },
5439
5440                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5441                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5442                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5444                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5445                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5446                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5447                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449
5450                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456
5457                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5458                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5459                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5460
5461                 { 0x1000, 0, 0x00000000, 0x00000001 },
5462                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5463
5464                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5465                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5466                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5467                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5468                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5469                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5470                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5471                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5472                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5473                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5474
5475                 { 0x1800, 0, 0x00000000, 0x00000001 },
5476                 { 0x1804, 0, 0x00000000, 0x00000003 },
5477
5478                 { 0x2800, 0, 0x00000000, 0x00000001 },
5479                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5480                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5481                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5482                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5483                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5484                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5485                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5486                 { 0x2840, 0, 0x00000000, 0xffffffff },
5487                 { 0x2844, 0, 0x00000000, 0xffffffff },
5488                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5489                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5490
5491                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5492                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5493
5494                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5495                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5496                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5497                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5498                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5499                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5500                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5501                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5502                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5503
5504                 { 0x5004, 0, 0x00000000, 0x0000007f },
5505                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5506
5507                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5508                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5509                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5510                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5511                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5512                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5513                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5514                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5515                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5516
5517                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5518                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5519                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5520                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5521                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5522                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5523                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5524                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5525                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5526                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5527                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5528                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5529                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5530                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5531                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5532                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5533                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5534                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5535                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5536                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5537                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5538                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5539                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5540
5541                 { 0xffff, 0, 0x00000000, 0x00000000 },
5542         };
5543
5544         ret = 0;
5545         is_5709 = 0;
5546         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5547                 is_5709 = 1;
5548
5549         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5550                 u32 offset, rw_mask, ro_mask, save_val, val;
5551                 u16 flags = reg_tbl[i].flags;
5552
5553                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5554                         continue;
5555
5556                 offset = (u32) reg_tbl[i].offset;
5557                 rw_mask = reg_tbl[i].rw_mask;
5558                 ro_mask = reg_tbl[i].ro_mask;
5559
5560                 save_val = readl(bp->regview + offset);
5561
5562                 writel(0, bp->regview + offset);
5563
5564                 val = readl(bp->regview + offset);
5565                 if ((val & rw_mask) != 0) {
5566                         goto reg_test_err;
5567                 }
5568
5569                 if ((val & ro_mask) != (save_val & ro_mask)) {
5570                         goto reg_test_err;
5571                 }
5572
5573                 writel(0xffffffff, bp->regview + offset);
5574
5575                 val = readl(bp->regview + offset);
5576                 if ((val & rw_mask) != rw_mask) {
5577                         goto reg_test_err;
5578                 }
5579
5580                 if ((val & ro_mask) != (save_val & ro_mask)) {
5581                         goto reg_test_err;
5582                 }
5583
5584                 writel(save_val, bp->regview + offset);
5585                 continue;
5586
5587 reg_test_err:
5588                 writel(save_val, bp->regview + offset);
5589                 ret = -ENODEV;
5590                 break;
5591         }
5592         return ret;
5593 }
5594
5595 static int
5596 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5597 {
5598         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5599                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5600         int i;
5601
5602         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5603                 u32 offset;
5604
5605                 for (offset = 0; offset < size; offset += 4) {
5606
5607                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5608
5609                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5610                                 test_pattern[i]) {
5611                                 return -ENODEV;
5612                         }
5613                 }
5614         }
5615         return 0;
5616 }
5617
5618 static int
5619 bnx2_test_memory(struct bnx2 *bp)
5620 {
5621         int ret = 0;
5622         int i;
5623         static struct mem_entry {
5624                 u32   offset;
5625                 u32   len;
5626         } mem_tbl_5706[] = {
5627                 { 0x60000,  0x4000 },
5628                 { 0xa0000,  0x3000 },
5629                 { 0xe0000,  0x4000 },
5630                 { 0x120000, 0x4000 },
5631                 { 0x1a0000, 0x4000 },
5632                 { 0x160000, 0x4000 },
5633                 { 0xffffffff, 0    },
5634         },
5635         mem_tbl_5709[] = {
5636                 { 0x60000,  0x4000 },
5637                 { 0xa0000,  0x3000 },
5638                 { 0xe0000,  0x4000 },
5639                 { 0x120000, 0x4000 },
5640                 { 0x1a0000, 0x4000 },
5641                 { 0xffffffff, 0    },
5642         };
5643         struct mem_entry *mem_tbl;
5644
5645         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5646                 mem_tbl = mem_tbl_5709;
5647         else
5648                 mem_tbl = mem_tbl_5706;
5649
5650         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5651                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5652                         mem_tbl[i].len)) != 0) {
5653                         return ret;
5654                 }
5655         }
5656
5657         return ret;
5658 }
5659
5660 #define BNX2_MAC_LOOPBACK       0
5661 #define BNX2_PHY_LOOPBACK       1
5662
5663 static int
5664 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5665 {
5666         unsigned int pkt_size, num_pkts, i;
5667         struct sk_buff *skb, *rx_skb;
5668         unsigned char *packet;
5669         u16 rx_start_idx, rx_idx;
5670         dma_addr_t map;
5671         struct tx_bd *txbd;
5672         struct sw_bd *rx_buf;
5673         struct l2_fhdr *rx_hdr;
5674         int ret = -ENODEV;
5675         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5676         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5677         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5678
5679         tx_napi = bnapi;
5680
5681         txr = &tx_napi->tx_ring;
5682         rxr = &bnapi->rx_ring;
5683         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5684                 bp->loopback = MAC_LOOPBACK;
5685                 bnx2_set_mac_loopback(bp);
5686         }
5687         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5688                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5689                         return 0;
5690
5691                 bp->loopback = PHY_LOOPBACK;
5692                 bnx2_set_phy_loopback(bp);
5693         }
5694         else
5695                 return -EINVAL;
5696
5697         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5698         skb = netdev_alloc_skb(bp->dev, pkt_size);
5699         if (!skb)
5700                 return -ENOMEM;
5701         packet = skb_put(skb, pkt_size);
5702         memcpy(packet, bp->dev->dev_addr, 6);
5703         memset(packet + 6, 0x0, 8);
5704         for (i = 14; i < pkt_size; i++)
5705                 packet[i] = (unsigned char) (i & 0xff);
5706
5707         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5708                 PCI_DMA_TODEVICE);
5709         if (pci_dma_mapping_error(bp->pdev, map)) {
5710                 dev_kfree_skb(skb);
5711                 return -EIO;
5712         }
5713
5714         REG_WR(bp, BNX2_HC_COMMAND,
5715                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5716
5717         REG_RD(bp, BNX2_HC_COMMAND);
5718
5719         udelay(5);
5720         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5721
5722         num_pkts = 0;
5723
5724         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5725
5726         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5727         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5728         txbd->tx_bd_mss_nbytes = pkt_size;
5729         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5730
5731         num_pkts++;
5732         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5733         txr->tx_prod_bseq += pkt_size;
5734
5735         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5736         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5737
5738         udelay(100);
5739
5740         REG_WR(bp, BNX2_HC_COMMAND,
5741                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5742
5743         REG_RD(bp, BNX2_HC_COMMAND);
5744
5745         udelay(5);
5746
5747         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5748         dev_kfree_skb(skb);
5749
5750         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5751                 goto loopback_test_done;
5752
5753         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5754         if (rx_idx != rx_start_idx + num_pkts) {
5755                 goto loopback_test_done;
5756         }
5757
5758         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5759         rx_skb = rx_buf->skb;
5760
5761         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5762         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5763
5764         pci_dma_sync_single_for_cpu(bp->pdev,
5765                 pci_unmap_addr(rx_buf, mapping),
5766                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5767
5768         if (rx_hdr->l2_fhdr_status &
5769                 (L2_FHDR_ERRORS_BAD_CRC |
5770                 L2_FHDR_ERRORS_PHY_DECODE |
5771                 L2_FHDR_ERRORS_ALIGNMENT |
5772                 L2_FHDR_ERRORS_TOO_SHORT |
5773                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5774
5775                 goto loopback_test_done;
5776         }
5777
5778         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5779                 goto loopback_test_done;
5780         }
5781
5782         for (i = 14; i < pkt_size; i++) {
5783                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5784                         goto loopback_test_done;
5785                 }
5786         }
5787
5788         ret = 0;
5789
5790 loopback_test_done:
5791         bp->loopback = 0;
5792         return ret;
5793 }
5794
5795 #define BNX2_MAC_LOOPBACK_FAILED        1
5796 #define BNX2_PHY_LOOPBACK_FAILED        2
5797 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5798                                          BNX2_PHY_LOOPBACK_FAILED)
5799
5800 static int
5801 bnx2_test_loopback(struct bnx2 *bp)
5802 {
5803         int rc = 0;
5804
5805         if (!netif_running(bp->dev))
5806                 return BNX2_LOOPBACK_FAILED;
5807
5808         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5809         spin_lock_bh(&bp->phy_lock);
5810         bnx2_init_phy(bp, 1);
5811         spin_unlock_bh(&bp->phy_lock);
5812         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5813                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5814         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5815                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5816         return rc;
5817 }
5818
5819 #define NVRAM_SIZE 0x200
5820 #define CRC32_RESIDUAL 0xdebb20e3
5821
5822 static int
5823 bnx2_test_nvram(struct bnx2 *bp)
5824 {
5825         __be32 buf[NVRAM_SIZE / 4];
5826         u8 *data = (u8 *) buf;
5827         int rc = 0;
5828         u32 magic, csum;
5829
5830         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5831                 goto test_nvram_done;
5832
5833         magic = be32_to_cpu(buf[0]);
5834         if (magic != 0x669955aa) {
5835                 rc = -ENODEV;
5836                 goto test_nvram_done;
5837         }
5838
5839         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5840                 goto test_nvram_done;
5841
5842         csum = ether_crc_le(0x100, data);
5843         if (csum != CRC32_RESIDUAL) {
5844                 rc = -ENODEV;
5845                 goto test_nvram_done;
5846         }
5847
5848         csum = ether_crc_le(0x100, data + 0x100);
5849         if (csum != CRC32_RESIDUAL) {
5850                 rc = -ENODEV;
5851         }
5852
5853 test_nvram_done:
5854         return rc;
5855 }
5856
5857 static int
5858 bnx2_test_link(struct bnx2 *bp)
5859 {
5860         u32 bmsr;
5861
5862         if (!netif_running(bp->dev))
5863                 return -ENODEV;
5864
5865         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5866                 if (bp->link_up)
5867                         return 0;
5868                 return -ENODEV;
5869         }
5870         spin_lock_bh(&bp->phy_lock);
5871         bnx2_enable_bmsr1(bp);
5872         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5873         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874         bnx2_disable_bmsr1(bp);
5875         spin_unlock_bh(&bp->phy_lock);
5876
5877         if (bmsr & BMSR_LSTATUS) {
5878                 return 0;
5879         }
5880         return -ENODEV;
5881 }
5882
5883 static int
5884 bnx2_test_intr(struct bnx2 *bp)
5885 {
5886         int i;
5887         u16 status_idx;
5888
5889         if (!netif_running(bp->dev))
5890                 return -ENODEV;
5891
5892         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5893
5894         /* This register is not touched during run-time. */
5895         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5896         REG_RD(bp, BNX2_HC_COMMAND);
5897
5898         for (i = 0; i < 10; i++) {
5899                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5900                         status_idx) {
5901
5902                         break;
5903                 }
5904
5905                 msleep_interruptible(10);
5906         }
5907         if (i < 10)
5908                 return 0;
5909
5910         return -ENODEV;
5911 }
5912
5913 /* Determining link for parallel detection. */
5914 static int
5915 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5916 {
5917         u32 mode_ctl, an_dbg, exp;
5918
5919         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5920                 return 0;
5921
5922         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5923         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5924
5925         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5926                 return 0;
5927
5928         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5929         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5930         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931
5932         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5933                 return 0;
5934
5935         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5936         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5937         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938
5939         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5940                 return 0;
5941
5942         return 1;
5943 }
5944
5945 static void
5946 bnx2_5706_serdes_timer(struct bnx2 *bp)
5947 {
5948         int check_link = 1;
5949
5950         spin_lock(&bp->phy_lock);
5951         if (bp->serdes_an_pending) {
5952                 bp->serdes_an_pending--;
5953                 check_link = 0;
5954         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5955                 u32 bmcr;
5956
5957                 bp->current_interval = BNX2_TIMER_INTERVAL;
5958
5959                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5960
5961                 if (bmcr & BMCR_ANENABLE) {
5962                         if (bnx2_5706_serdes_has_link(bp)) {
5963                                 bmcr &= ~BMCR_ANENABLE;
5964                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5965                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5966                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5967                         }
5968                 }
5969         }
5970         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5971                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5972                 u32 phy2;
5973
5974                 bnx2_write_phy(bp, 0x17, 0x0f01);
5975                 bnx2_read_phy(bp, 0x15, &phy2);
5976                 if (phy2 & 0x20) {
5977                         u32 bmcr;
5978
5979                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5980                         bmcr |= BMCR_ANENABLE;
5981                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5982
5983                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5984                 }
5985         } else
5986                 bp->current_interval = BNX2_TIMER_INTERVAL;
5987
5988         if (check_link) {
5989                 u32 val;
5990
5991                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5993                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994
5995                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5996                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5997                                 bnx2_5706s_force_link_dn(bp, 1);
5998                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5999                         } else
6000                                 bnx2_set_link(bp);
6001                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6002                         bnx2_set_link(bp);
6003         }
6004         spin_unlock(&bp->phy_lock);
6005 }
6006
6007 static void
6008 bnx2_5708_serdes_timer(struct bnx2 *bp)
6009 {
6010         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6011                 return;
6012
6013         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6014                 bp->serdes_an_pending = 0;
6015                 return;
6016         }
6017
6018         spin_lock(&bp->phy_lock);
6019         if (bp->serdes_an_pending)
6020                 bp->serdes_an_pending--;
6021         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6022                 u32 bmcr;
6023
6024                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6025                 if (bmcr & BMCR_ANENABLE) {
6026                         bnx2_enable_forced_2g5(bp);
6027                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6028                 } else {
6029                         bnx2_disable_forced_2g5(bp);
6030                         bp->serdes_an_pending = 2;
6031                         bp->current_interval = BNX2_TIMER_INTERVAL;
6032                 }
6033
6034         } else
6035                 bp->current_interval = BNX2_TIMER_INTERVAL;
6036
6037         spin_unlock(&bp->phy_lock);
6038 }
6039
6040 static void
6041 bnx2_timer(unsigned long data)
6042 {
6043         struct bnx2 *bp = (struct bnx2 *) data;
6044
6045         if (!netif_running(bp->dev))
6046                 return;
6047
6048         if (atomic_read(&bp->intr_sem) != 0)
6049                 goto bnx2_restart_timer;
6050
6051         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6052              BNX2_FLAG_USING_MSI)
6053                 bnx2_chk_missed_msi(bp);
6054
6055         bnx2_send_heart_beat(bp);
6056
6057         bp->stats_blk->stat_FwRxDrop =
6058                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6059
6060         /* workaround occasional corrupted counters */
6061         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6062                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6063                                             BNX2_HC_COMMAND_STATS_NOW);
6064
6065         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6066                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6067                         bnx2_5706_serdes_timer(bp);
6068                 else
6069                         bnx2_5708_serdes_timer(bp);
6070         }
6071
6072 bnx2_restart_timer:
6073         mod_timer(&bp->timer, jiffies + bp->current_interval);
6074 }
6075
6076 static int
6077 bnx2_request_irq(struct bnx2 *bp)
6078 {
6079         unsigned long flags;
6080         struct bnx2_irq *irq;
6081         int rc = 0, i;
6082
6083         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6084                 flags = 0;
6085         else
6086                 flags = IRQF_SHARED;
6087
6088         for (i = 0; i < bp->irq_nvecs; i++) {
6089                 irq = &bp->irq_tbl[i];
6090                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6091                                  &bp->bnx2_napi[i]);
6092                 if (rc)
6093                         break;
6094                 irq->requested = 1;
6095         }
6096         return rc;
6097 }
6098
6099 static void
6100 bnx2_free_irq(struct bnx2 *bp)
6101 {
6102         struct bnx2_irq *irq;
6103         int i;
6104
6105         for (i = 0; i < bp->irq_nvecs; i++) {
6106                 irq = &bp->irq_tbl[i];
6107                 if (irq->requested)
6108                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6109                 irq->requested = 0;
6110         }
6111         if (bp->flags & BNX2_FLAG_USING_MSI)
6112                 pci_disable_msi(bp->pdev);
6113         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6114                 pci_disable_msix(bp->pdev);
6115
6116         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6117 }
6118
6119 static void
6120 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6121 {
6122         int i, rc;
6123         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6124         struct net_device *dev = bp->dev;
6125         const int len = sizeof(bp->irq_tbl[0].name);
6126
6127         bnx2_setup_msix_tbl(bp);
6128         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6129         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6130         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6131
6132         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6133                 msix_ent[i].entry = i;
6134                 msix_ent[i].vector = 0;
6135         }
6136
6137         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6138         if (rc != 0)
6139                 return;
6140
6141         bp->irq_nvecs = msix_vecs;
6142         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6143         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6144                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6145                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6146                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6147         }
6148 }
6149
6150 static void
6151 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6152 {
6153         int cpus = num_online_cpus();
6154         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6155
6156         bp->irq_tbl[0].handler = bnx2_interrupt;
6157         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6158         bp->irq_nvecs = 1;
6159         bp->irq_tbl[0].vector = bp->pdev->irq;
6160
6161         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6162                 bnx2_enable_msix(bp, msix_vecs);
6163
6164         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6165             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6166                 if (pci_enable_msi(bp->pdev) == 0) {
6167                         bp->flags |= BNX2_FLAG_USING_MSI;
6168                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6169                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6170                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6171                         } else
6172                                 bp->irq_tbl[0].handler = bnx2_msi;
6173
6174                         bp->irq_tbl[0].vector = bp->pdev->irq;
6175                 }
6176         }
6177
6178         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6179         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6180
6181         bp->num_rx_rings = bp->irq_nvecs;
6182 }
6183
6184 /* Called with rtnl_lock */
6185 static int
6186 bnx2_open(struct net_device *dev)
6187 {
6188         struct bnx2 *bp = netdev_priv(dev);
6189         int rc;
6190
6191         netif_carrier_off(dev);
6192
6193         bnx2_set_power_state(bp, PCI_D0);
6194         bnx2_disable_int(bp);
6195
6196         bnx2_setup_int_mode(bp, disable_msi);
6197         bnx2_napi_enable(bp);
6198         rc = bnx2_alloc_mem(bp);
6199         if (rc)
6200                 goto open_err;
6201
6202         rc = bnx2_request_irq(bp);
6203         if (rc)
6204                 goto open_err;
6205
6206         rc = bnx2_init_nic(bp, 1);
6207         if (rc)
6208                 goto open_err;
6209
6210         mod_timer(&bp->timer, jiffies + bp->current_interval);
6211
6212         atomic_set(&bp->intr_sem, 0);
6213
6214         bnx2_enable_int(bp);
6215
6216         if (bp->flags & BNX2_FLAG_USING_MSI) {
6217                 /* Test MSI to make sure it is working
6218                  * If MSI test fails, go back to INTx mode
6219                  */
6220                 if (bnx2_test_intr(bp) != 0) {
6221                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6222                                " using MSI, switching to INTx mode. Please"
6223                                " report this failure to the PCI maintainer"
6224                                " and include system chipset information.\n",
6225                                bp->dev->name);
6226
6227                         bnx2_disable_int(bp);
6228                         bnx2_free_irq(bp);
6229
6230                         bnx2_setup_int_mode(bp, 1);
6231
6232                         rc = bnx2_init_nic(bp, 0);
6233
6234                         if (!rc)
6235                                 rc = bnx2_request_irq(bp);
6236
6237                         if (rc) {
6238                                 del_timer_sync(&bp->timer);
6239                                 goto open_err;
6240                         }
6241                         bnx2_enable_int(bp);
6242                 }
6243         }
6244         if (bp->flags & BNX2_FLAG_USING_MSI)
6245                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6246         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6247                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6248
6249         netif_tx_start_all_queues(dev);
6250
6251         return 0;
6252
6253 open_err:
6254         bnx2_napi_disable(bp);
6255         bnx2_free_skbs(bp);
6256         bnx2_free_irq(bp);
6257         bnx2_free_mem(bp);
6258         return rc;
6259 }
6260
6261 static void
6262 bnx2_reset_task(struct work_struct *work)
6263 {
6264         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6265
6266         rtnl_lock();
6267         if (!netif_running(bp->dev)) {
6268                 rtnl_unlock();
6269                 return;
6270         }
6271
6272         bnx2_netif_stop(bp);
6273
6274         bnx2_init_nic(bp, 1);
6275
6276         atomic_set(&bp->intr_sem, 1);
6277         bnx2_netif_start(bp);
6278         rtnl_unlock();
6279 }
6280
6281 static void
6282 bnx2_tx_timeout(struct net_device *dev)
6283 {
6284         struct bnx2 *bp = netdev_priv(dev);
6285
6286         /* This allows the netif to be shutdown gracefully before resetting */
6287         schedule_work(&bp->reset_task);
6288 }
6289
6290 #ifdef BCM_VLAN
6291 /* Called with rtnl_lock */
6292 static void
6293 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6294 {
6295         struct bnx2 *bp = netdev_priv(dev);
6296
6297         if (netif_running(dev))
6298                 bnx2_netif_stop(bp);
6299
6300         bp->vlgrp = vlgrp;
6301
6302         if (!netif_running(dev))
6303                 return;
6304
6305         bnx2_set_rx_mode(dev);
6306         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6307                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6308
6309         bnx2_netif_start(bp);
6310 }
6311 #endif
6312
6313 /* Called with netif_tx_lock.
6314  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6315  * netif_wake_queue().
6316  */
6317 static netdev_tx_t
6318 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6319 {
6320         struct bnx2 *bp = netdev_priv(dev);
6321         dma_addr_t mapping;
6322         struct tx_bd *txbd;
6323         struct sw_tx_bd *tx_buf;
6324         u32 len, vlan_tag_flags, last_frag, mss;
6325         u16 prod, ring_prod;
6326         int i;
6327         struct bnx2_napi *bnapi;
6328         struct bnx2_tx_ring_info *txr;
6329         struct netdev_queue *txq;
6330
6331         /*  Determine which tx ring we will be placed on */
6332         i = skb_get_queue_mapping(skb);
6333         bnapi = &bp->bnx2_napi[i];
6334         txr = &bnapi->tx_ring;
6335         txq = netdev_get_tx_queue(dev, i);
6336
6337         if (unlikely(bnx2_tx_avail(bp, txr) <
6338             (skb_shinfo(skb)->nr_frags + 1))) {
6339                 netif_tx_stop_queue(txq);
6340                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6341                         dev->name);
6342
6343                 return NETDEV_TX_BUSY;
6344         }
6345         len = skb_headlen(skb);
6346         prod = txr->tx_prod;
6347         ring_prod = TX_RING_IDX(prod);
6348
6349         vlan_tag_flags = 0;
6350         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6351                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6352         }
6353
6354 #ifdef BCM_VLAN
6355         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6356                 vlan_tag_flags |=
6357                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6358         }
6359 #endif
6360         if ((mss = skb_shinfo(skb)->gso_size)) {
6361                 u32 tcp_opt_len;
6362                 struct iphdr *iph;
6363
6364                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6365
6366                 tcp_opt_len = tcp_optlen(skb);
6367
6368                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6369                         u32 tcp_off = skb_transport_offset(skb) -
6370                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6371
6372                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6373                                           TX_BD_FLAGS_SW_FLAGS;
6374                         if (likely(tcp_off == 0))
6375                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6376                         else {
6377                                 tcp_off >>= 3;
6378                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6379                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6380                                                   ((tcp_off & 0x10) <<
6381                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6382                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6383                         }
6384                 } else {
6385                         iph = ip_hdr(skb);
6386                         if (tcp_opt_len || (iph->ihl > 5)) {
6387                                 vlan_tag_flags |= ((iph->ihl - 5) +
6388                                                    (tcp_opt_len >> 2)) << 8;
6389                         }
6390                 }
6391         } else
6392                 mss = 0;
6393
6394         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6395         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6396                 dev_kfree_skb(skb);
6397                 return NETDEV_TX_OK;
6398         }
6399
6400         tx_buf = &txr->tx_buf_ring[ring_prod];
6401         tx_buf->skb = skb;
6402         pci_unmap_addr_set(tx_buf, mapping, mapping);
6403
6404         txbd = &txr->tx_desc_ring[ring_prod];
6405
6406         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6407         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6408         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6409         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6410
6411         last_frag = skb_shinfo(skb)->nr_frags;
6412         tx_buf->nr_frags = last_frag;
6413         tx_buf->is_gso = skb_is_gso(skb);
6414
6415         for (i = 0; i < last_frag; i++) {
6416                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6417
6418                 prod = NEXT_TX_BD(prod);
6419                 ring_prod = TX_RING_IDX(prod);
6420                 txbd = &txr->tx_desc_ring[ring_prod];
6421
6422                 len = frag->size;
6423                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6424                         len, PCI_DMA_TODEVICE);
6425                 if (pci_dma_mapping_error(bp->pdev, mapping))
6426                         goto dma_error;
6427                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6428                                    mapping);
6429
6430                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6431                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6432                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6433                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6434
6435         }
6436         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6437
6438         prod = NEXT_TX_BD(prod);
6439         txr->tx_prod_bseq += skb->len;
6440
6441         REG_WR16(bp, txr->tx_bidx_addr, prod);
6442         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6443
6444         mmiowb();
6445
6446         txr->tx_prod = prod;
6447
6448         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6449                 netif_tx_stop_queue(txq);
6450                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6451                         netif_tx_wake_queue(txq);
6452         }
6453
6454         return NETDEV_TX_OK;
6455 dma_error:
6456         /* save value of frag that failed */
6457         last_frag = i;
6458
6459         /* start back at beginning and unmap skb */
6460         prod = txr->tx_prod;
6461         ring_prod = TX_RING_IDX(prod);
6462         tx_buf = &txr->tx_buf_ring[ring_prod];
6463         tx_buf->skb = NULL;
6464         pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6465                          skb_headlen(skb), PCI_DMA_TODEVICE);
6466
6467         /* unmap remaining mapped pages */
6468         for (i = 0; i < last_frag; i++) {
6469                 prod = NEXT_TX_BD(prod);
6470                 ring_prod = TX_RING_IDX(prod);
6471                 tx_buf = &txr->tx_buf_ring[ring_prod];
6472                 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6473                                skb_shinfo(skb)->frags[i].size,
6474                                PCI_DMA_TODEVICE);
6475         }
6476
6477         dev_kfree_skb(skb);
6478         return NETDEV_TX_OK;
6479 }
6480
6481 /* Called with rtnl_lock */
6482 static int
6483 bnx2_close(struct net_device *dev)
6484 {
6485         struct bnx2 *bp = netdev_priv(dev);
6486
6487         cancel_work_sync(&bp->reset_task);
6488
6489         bnx2_disable_int_sync(bp);
6490         bnx2_napi_disable(bp);
6491         del_timer_sync(&bp->timer);
6492         bnx2_shutdown_chip(bp);
6493         bnx2_free_irq(bp);
6494         bnx2_free_skbs(bp);
6495         bnx2_free_mem(bp);
6496         bp->link_up = 0;
6497         netif_carrier_off(bp->dev);
6498         bnx2_set_power_state(bp, PCI_D3hot);
6499         return 0;
6500 }
6501
6502 #define GET_NET_STATS64(ctr)                                    \
6503         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6504         (unsigned long) (ctr##_lo)
6505
6506 #define GET_NET_STATS32(ctr)            \
6507         (ctr##_lo)
6508
6509 #if (BITS_PER_LONG == 64)
6510 #define GET_NET_STATS   GET_NET_STATS64
6511 #else
6512 #define GET_NET_STATS   GET_NET_STATS32
6513 #endif
6514
6515 static struct net_device_stats *
6516 bnx2_get_stats(struct net_device *dev)
6517 {
6518         struct bnx2 *bp = netdev_priv(dev);
6519         struct statistics_block *stats_blk = bp->stats_blk;
6520         struct net_device_stats *net_stats = &dev->stats;
6521
6522         if (bp->stats_blk == NULL) {
6523                 return net_stats;
6524         }
6525         net_stats->rx_packets =
6526                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6527                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6528                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6529
6530         net_stats->tx_packets =
6531                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6532                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6533                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6534
6535         net_stats->rx_bytes =
6536                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6537
6538         net_stats->tx_bytes =
6539                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6540
6541         net_stats->multicast =
6542                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6543
6544         net_stats->collisions =
6545                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6546
6547         net_stats->rx_length_errors =
6548                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6549                 stats_blk->stat_EtherStatsOverrsizePkts);
6550
6551         net_stats->rx_over_errors =
6552                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6553                 stats_blk->stat_IfInMBUFDiscards);
6554
6555         net_stats->rx_frame_errors =
6556                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6557
6558         net_stats->rx_crc_errors =
6559                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6560
6561         net_stats->rx_errors = net_stats->rx_length_errors +
6562                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6563                 net_stats->rx_crc_errors;
6564
6565         net_stats->tx_aborted_errors =
6566                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6567                 stats_blk->stat_Dot3StatsLateCollisions);
6568
6569         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6570             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6571                 net_stats->tx_carrier_errors = 0;
6572         else {
6573                 net_stats->tx_carrier_errors =
6574                         (unsigned long)
6575                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6576         }
6577
6578         net_stats->tx_errors =
6579                 (unsigned long)
6580                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6581                 +
6582                 net_stats->tx_aborted_errors +
6583                 net_stats->tx_carrier_errors;
6584
6585         net_stats->rx_missed_errors =
6586                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6587                 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6588
6589         return net_stats;
6590 }
6591
6592 /* All ethtool functions called with rtnl_lock */
6593
6594 static int
6595 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6596 {
6597         struct bnx2 *bp = netdev_priv(dev);
6598         int support_serdes = 0, support_copper = 0;
6599
6600         cmd->supported = SUPPORTED_Autoneg;
6601         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6602                 support_serdes = 1;
6603                 support_copper = 1;
6604         } else if (bp->phy_port == PORT_FIBRE)
6605                 support_serdes = 1;
6606         else
6607                 support_copper = 1;
6608
6609         if (support_serdes) {
6610                 cmd->supported |= SUPPORTED_1000baseT_Full |
6611                         SUPPORTED_FIBRE;
6612                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6613                         cmd->supported |= SUPPORTED_2500baseX_Full;
6614
6615         }
6616         if (support_copper) {
6617                 cmd->supported |= SUPPORTED_10baseT_Half |
6618                         SUPPORTED_10baseT_Full |
6619                         SUPPORTED_100baseT_Half |
6620                         SUPPORTED_100baseT_Full |
6621                         SUPPORTED_1000baseT_Full |
6622                         SUPPORTED_TP;
6623
6624         }
6625
6626         spin_lock_bh(&bp->phy_lock);
6627         cmd->port = bp->phy_port;
6628         cmd->advertising = bp->advertising;
6629
6630         if (bp->autoneg & AUTONEG_SPEED) {
6631                 cmd->autoneg = AUTONEG_ENABLE;
6632         }
6633         else {
6634                 cmd->autoneg = AUTONEG_DISABLE;
6635         }
6636
6637         if (netif_carrier_ok(dev)) {
6638                 cmd->speed = bp->line_speed;
6639                 cmd->duplex = bp->duplex;
6640         }
6641         else {
6642                 cmd->speed = -1;
6643                 cmd->duplex = -1;
6644         }
6645         spin_unlock_bh(&bp->phy_lock);
6646
6647         cmd->transceiver = XCVR_INTERNAL;
6648         cmd->phy_address = bp->phy_addr;
6649
6650         return 0;
6651 }
6652
6653 static int
6654 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6655 {
6656         struct bnx2 *bp = netdev_priv(dev);
6657         u8 autoneg = bp->autoneg;
6658         u8 req_duplex = bp->req_duplex;
6659         u16 req_line_speed = bp->req_line_speed;
6660         u32 advertising = bp->advertising;
6661         int err = -EINVAL;
6662
6663         spin_lock_bh(&bp->phy_lock);
6664
6665         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6666                 goto err_out_unlock;
6667
6668         if (cmd->port != bp->phy_port &&
6669             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6670                 goto err_out_unlock;
6671
6672         /* If device is down, we can store the settings only if the user
6673          * is setting the currently active port.
6674          */
6675         if (!netif_running(dev) && cmd->port != bp->phy_port)
6676                 goto err_out_unlock;
6677
6678         if (cmd->autoneg == AUTONEG_ENABLE) {
6679                 autoneg |= AUTONEG_SPEED;
6680
6681                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6682
6683                 /* allow advertising 1 speed */
6684                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6685                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6686                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6687                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6688
6689                         if (cmd->port == PORT_FIBRE)
6690                                 goto err_out_unlock;
6691
6692                         advertising = cmd->advertising;
6693
6694                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6695                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6696                             (cmd->port == PORT_TP))
6697                                 goto err_out_unlock;
6698                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6699                         advertising = cmd->advertising;
6700                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6701                         goto err_out_unlock;
6702                 else {
6703                         if (cmd->port == PORT_FIBRE)
6704                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6705                         else
6706                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6707                 }
6708                 advertising |= ADVERTISED_Autoneg;
6709         }
6710         else {
6711                 if (cmd->port == PORT_FIBRE) {
6712                         if ((cmd->speed != SPEED_1000 &&
6713                              cmd->speed != SPEED_2500) ||
6714                             (cmd->duplex != DUPLEX_FULL))
6715                                 goto err_out_unlock;
6716
6717                         if (cmd->speed == SPEED_2500 &&
6718                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6719                                 goto err_out_unlock;
6720                 }
6721                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6722                         goto err_out_unlock;
6723
6724                 autoneg &= ~AUTONEG_SPEED;
6725                 req_line_speed = cmd->speed;
6726                 req_duplex = cmd->duplex;
6727                 advertising = 0;
6728         }
6729
6730         bp->autoneg = autoneg;
6731         bp->advertising = advertising;
6732         bp->req_line_speed = req_line_speed;
6733         bp->req_duplex = req_duplex;
6734
6735         err = 0;
6736         /* If device is down, the new settings will be picked up when it is
6737          * brought up.
6738          */
6739         if (netif_running(dev))
6740                 err = bnx2_setup_phy(bp, cmd->port);
6741
6742 err_out_unlock:
6743         spin_unlock_bh(&bp->phy_lock);
6744
6745         return err;
6746 }
6747
6748 static void
6749 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6750 {
6751         struct bnx2 *bp = netdev_priv(dev);
6752
6753         strcpy(info->driver, DRV_MODULE_NAME);
6754         strcpy(info->version, DRV_MODULE_VERSION);
6755         strcpy(info->bus_info, pci_name(bp->pdev));
6756         strcpy(info->fw_version, bp->fw_version);
6757 }
6758
6759 #define BNX2_REGDUMP_LEN                (32 * 1024)
6760
6761 static int
6762 bnx2_get_regs_len(struct net_device *dev)
6763 {
6764         return BNX2_REGDUMP_LEN;
6765 }
6766
6767 static void
6768 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6769 {
6770         u32 *p = _p, i, offset;
6771         u8 *orig_p = _p;
6772         struct bnx2 *bp = netdev_priv(dev);
6773         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6774                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6775                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6776                                  0x1040, 0x1048, 0x1080, 0x10a4,
6777                                  0x1400, 0x1490, 0x1498, 0x14f0,
6778                                  0x1500, 0x155c, 0x1580, 0x15dc,
6779                                  0x1600, 0x1658, 0x1680, 0x16d8,
6780                                  0x1800, 0x1820, 0x1840, 0x1854,
6781                                  0x1880, 0x1894, 0x1900, 0x1984,
6782                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6783                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6784                                  0x2000, 0x2030, 0x23c0, 0x2400,
6785                                  0x2800, 0x2820, 0x2830, 0x2850,
6786                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6787                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6788                                  0x4080, 0x4090, 0x43c0, 0x4458,
6789                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6790                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6791                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6792                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6793                                  0x6800, 0x6848, 0x684c, 0x6860,
6794                                  0x6888, 0x6910, 0x8000 };
6795
6796         regs->version = 0;
6797
6798         memset(p, 0, BNX2_REGDUMP_LEN);
6799
6800         if (!netif_running(bp->dev))
6801                 return;
6802
6803         i = 0;
6804         offset = reg_boundaries[0];
6805         p += offset;
6806         while (offset < BNX2_REGDUMP_LEN) {
6807                 *p++ = REG_RD(bp, offset);
6808                 offset += 4;
6809                 if (offset == reg_boundaries[i + 1]) {
6810                         offset = reg_boundaries[i + 2];
6811                         p = (u32 *) (orig_p + offset);
6812                         i += 2;
6813                 }
6814         }
6815 }
6816
6817 static void
6818 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6819 {
6820         struct bnx2 *bp = netdev_priv(dev);
6821
6822         if (bp->flags & BNX2_FLAG_NO_WOL) {
6823                 wol->supported = 0;
6824                 wol->wolopts = 0;
6825         }
6826         else {
6827                 wol->supported = WAKE_MAGIC;
6828                 if (bp->wol)
6829                         wol->wolopts = WAKE_MAGIC;
6830                 else
6831                         wol->wolopts = 0;
6832         }
6833         memset(&wol->sopass, 0, sizeof(wol->sopass));
6834 }
6835
6836 static int
6837 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6838 {
6839         struct bnx2 *bp = netdev_priv(dev);
6840
6841         if (wol->wolopts & ~WAKE_MAGIC)
6842                 return -EINVAL;
6843
6844         if (wol->wolopts & WAKE_MAGIC) {
6845                 if (bp->flags & BNX2_FLAG_NO_WOL)
6846                         return -EINVAL;
6847
6848                 bp->wol = 1;
6849         }
6850         else {
6851                 bp->wol = 0;
6852         }
6853         return 0;
6854 }
6855
6856 static int
6857 bnx2_nway_reset(struct net_device *dev)
6858 {
6859         struct bnx2 *bp = netdev_priv(dev);
6860         u32 bmcr;
6861
6862         if (!netif_running(dev))
6863                 return -EAGAIN;
6864
6865         if (!(bp->autoneg & AUTONEG_SPEED)) {
6866                 return -EINVAL;
6867         }
6868
6869         spin_lock_bh(&bp->phy_lock);
6870
6871         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6872                 int rc;
6873
6874                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6875                 spin_unlock_bh(&bp->phy_lock);
6876                 return rc;
6877         }
6878
6879         /* Force a link down visible on the other side */
6880         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6881                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6882                 spin_unlock_bh(&bp->phy_lock);
6883
6884                 msleep(20);
6885
6886                 spin_lock_bh(&bp->phy_lock);
6887
6888                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6889                 bp->serdes_an_pending = 1;
6890                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6891         }
6892
6893         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6894         bmcr &= ~BMCR_LOOPBACK;
6895         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6896
6897         spin_unlock_bh(&bp->phy_lock);
6898
6899         return 0;
6900 }
6901
6902 static u32
6903 bnx2_get_link(struct net_device *dev)
6904 {
6905         struct bnx2 *bp = netdev_priv(dev);
6906
6907         return bp->link_up;
6908 }
6909
6910 static int
6911 bnx2_get_eeprom_len(struct net_device *dev)
6912 {
6913         struct bnx2 *bp = netdev_priv(dev);
6914
6915         if (bp->flash_info == NULL)
6916                 return 0;
6917
6918         return (int) bp->flash_size;
6919 }
6920
6921 static int
6922 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6923                 u8 *eebuf)
6924 {
6925         struct bnx2 *bp = netdev_priv(dev);
6926         int rc;
6927
6928         if (!netif_running(dev))
6929                 return -EAGAIN;
6930
6931         /* parameters already validated in ethtool_get_eeprom */
6932
6933         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6934
6935         return rc;
6936 }
6937
6938 static int
6939 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6940                 u8 *eebuf)
6941 {
6942         struct bnx2 *bp = netdev_priv(dev);
6943         int rc;
6944
6945         if (!netif_running(dev))
6946                 return -EAGAIN;
6947
6948         /* parameters already validated in ethtool_set_eeprom */
6949
6950         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6951
6952         return rc;
6953 }
6954
6955 static int
6956 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6957 {
6958         struct bnx2 *bp = netdev_priv(dev);
6959
6960         memset(coal, 0, sizeof(struct ethtool_coalesce));
6961
6962         coal->rx_coalesce_usecs = bp->rx_ticks;
6963         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6964         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6965         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6966
6967         coal->tx_coalesce_usecs = bp->tx_ticks;
6968         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6969         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6970         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6971
6972         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6973
6974         return 0;
6975 }
6976
6977 static int
6978 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6979 {
6980         struct bnx2 *bp = netdev_priv(dev);
6981
6982         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6983         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6984
6985         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6986         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6987
6988         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6989         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6990
6991         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6992         if (bp->rx_quick_cons_trip_int > 0xff)
6993                 bp->rx_quick_cons_trip_int = 0xff;
6994
6995         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6996         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6997
6998         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6999         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7000
7001         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7002         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7003
7004         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7005         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7006                 0xff;
7007
7008         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7009         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7010                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7011                         bp->stats_ticks = USEC_PER_SEC;
7012         }
7013         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7014                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7015         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7016
7017         if (netif_running(bp->dev)) {
7018                 bnx2_netif_stop(bp);
7019                 bnx2_init_nic(bp, 0);
7020                 bnx2_netif_start(bp);
7021         }
7022
7023         return 0;
7024 }
7025
7026 static void
7027 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7028 {
7029         struct bnx2 *bp = netdev_priv(dev);
7030
7031         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7032         ering->rx_mini_max_pending = 0;
7033         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7034
7035         ering->rx_pending = bp->rx_ring_size;
7036         ering->rx_mini_pending = 0;
7037         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7038
7039         ering->tx_max_pending = MAX_TX_DESC_CNT;
7040         ering->tx_pending = bp->tx_ring_size;
7041 }
7042
7043 static int
7044 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7045 {
7046         if (netif_running(bp->dev)) {
7047                 bnx2_netif_stop(bp);
7048                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7049                 bnx2_free_skbs(bp);
7050                 bnx2_free_mem(bp);
7051         }
7052
7053         bnx2_set_rx_ring_size(bp, rx);
7054         bp->tx_ring_size = tx;
7055
7056         if (netif_running(bp->dev)) {
7057                 int rc;
7058
7059                 rc = bnx2_alloc_mem(bp);
7060                 if (!rc)
7061                         rc = bnx2_init_nic(bp, 0);
7062
7063                 if (rc) {
7064                         bnx2_napi_enable(bp);
7065                         dev_close(bp->dev);
7066                         return rc;
7067                 }
7068                 bnx2_netif_start(bp);
7069         }
7070         return 0;
7071 }
7072
7073 static int
7074 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7075 {
7076         struct bnx2 *bp = netdev_priv(dev);
7077         int rc;
7078
7079         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7080                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7081                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7082
7083                 return -EINVAL;
7084         }
7085         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7086         return rc;
7087 }
7088
7089 static void
7090 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7091 {
7092         struct bnx2 *bp = netdev_priv(dev);
7093
7094         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7095         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7096         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7097 }
7098
7099 static int
7100 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7101 {
7102         struct bnx2 *bp = netdev_priv(dev);
7103
7104         bp->req_flow_ctrl = 0;
7105         if (epause->rx_pause)
7106                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7107         if (epause->tx_pause)
7108                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7109
7110         if (epause->autoneg) {
7111                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7112         }
7113         else {
7114                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7115         }
7116
7117         if (netif_running(dev)) {
7118                 spin_lock_bh(&bp->phy_lock);
7119                 bnx2_setup_phy(bp, bp->phy_port);
7120                 spin_unlock_bh(&bp->phy_lock);
7121         }
7122
7123         return 0;
7124 }
7125
7126 static u32
7127 bnx2_get_rx_csum(struct net_device *dev)
7128 {
7129         struct bnx2 *bp = netdev_priv(dev);
7130
7131         return bp->rx_csum;
7132 }
7133
7134 static int
7135 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7136 {
7137         struct bnx2 *bp = netdev_priv(dev);
7138
7139         bp->rx_csum = data;
7140         return 0;
7141 }
7142
7143 static int
7144 bnx2_set_tso(struct net_device *dev, u32 data)
7145 {
7146         struct bnx2 *bp = netdev_priv(dev);
7147
7148         if (data) {
7149                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7150                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7151                         dev->features |= NETIF_F_TSO6;
7152         } else
7153                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7154                                    NETIF_F_TSO_ECN);
7155         return 0;
7156 }
7157
7158 static struct {
7159         char string[ETH_GSTRING_LEN];
7160 } bnx2_stats_str_arr[] = {
7161         { "rx_bytes" },
7162         { "rx_error_bytes" },
7163         { "tx_bytes" },
7164         { "tx_error_bytes" },
7165         { "rx_ucast_packets" },
7166         { "rx_mcast_packets" },
7167         { "rx_bcast_packets" },
7168         { "tx_ucast_packets" },
7169         { "tx_mcast_packets" },
7170         { "tx_bcast_packets" },
7171         { "tx_mac_errors" },
7172         { "tx_carrier_errors" },
7173         { "rx_crc_errors" },
7174         { "rx_align_errors" },
7175         { "tx_single_collisions" },
7176         { "tx_multi_collisions" },
7177         { "tx_deferred" },
7178         { "tx_excess_collisions" },
7179         { "tx_late_collisions" },
7180         { "tx_total_collisions" },
7181         { "rx_fragments" },
7182         { "rx_jabbers" },
7183         { "rx_undersize_packets" },
7184         { "rx_oversize_packets" },
7185         { "rx_64_byte_packets" },
7186         { "rx_65_to_127_byte_packets" },
7187         { "rx_128_to_255_byte_packets" },
7188         { "rx_256_to_511_byte_packets" },
7189         { "rx_512_to_1023_byte_packets" },
7190         { "rx_1024_to_1522_byte_packets" },
7191         { "rx_1523_to_9022_byte_packets" },
7192         { "tx_64_byte_packets" },
7193         { "tx_65_to_127_byte_packets" },
7194         { "tx_128_to_255_byte_packets" },
7195         { "tx_256_to_511_byte_packets" },
7196         { "tx_512_to_1023_byte_packets" },
7197         { "tx_1024_to_1522_byte_packets" },
7198         { "tx_1523_to_9022_byte_packets" },
7199         { "rx_xon_frames" },
7200         { "rx_xoff_frames" },
7201         { "tx_xon_frames" },
7202         { "tx_xoff_frames" },
7203         { "rx_mac_ctrl_frames" },
7204         { "rx_filtered_packets" },
7205         { "rx_ftq_discards" },
7206         { "rx_discards" },
7207         { "rx_fw_discards" },
7208 };
7209
7210 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7211                         sizeof(bnx2_stats_str_arr[0]))
7212
7213 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7214
7215 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7216     STATS_OFFSET32(stat_IfHCInOctets_hi),
7217     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7218     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7219     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7220     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7221     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7222     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7223     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7224     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7225     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7226     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7227     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7228     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7229     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7230     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7231     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7232     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7233     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7234     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7235     STATS_OFFSET32(stat_EtherStatsCollisions),
7236     STATS_OFFSET32(stat_EtherStatsFragments),
7237     STATS_OFFSET32(stat_EtherStatsJabbers),
7238     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7239     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7240     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7241     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7242     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7243     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7244     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7245     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7246     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7247     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7248     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7249     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7250     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7251     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7252     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7253     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7254     STATS_OFFSET32(stat_XonPauseFramesReceived),
7255     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7256     STATS_OFFSET32(stat_OutXonSent),
7257     STATS_OFFSET32(stat_OutXoffSent),
7258     STATS_OFFSET32(stat_MacControlFramesReceived),
7259     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7260     STATS_OFFSET32(stat_IfInFTQDiscards),
7261     STATS_OFFSET32(stat_IfInMBUFDiscards),
7262     STATS_OFFSET32(stat_FwRxDrop),
7263 };
7264
7265 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7266  * skipped because of errata.
7267  */
7268 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7269         8,0,8,8,8,8,8,8,8,8,
7270         4,0,4,4,4,4,4,4,4,4,
7271         4,4,4,4,4,4,4,4,4,4,
7272         4,4,4,4,4,4,4,4,4,4,
7273         4,4,4,4,4,4,4,
7274 };
7275
7276 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7277         8,0,8,8,8,8,8,8,8,8,
7278         4,4,4,4,4,4,4,4,4,4,
7279         4,4,4,4,4,4,4,4,4,4,
7280         4,4,4,4,4,4,4,4,4,4,
7281         4,4,4,4,4,4,4,
7282 };
7283
7284 #define BNX2_NUM_TESTS 6
7285
7286 static struct {
7287         char string[ETH_GSTRING_LEN];
7288 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7289         { "register_test (offline)" },
7290         { "memory_test (offline)" },
7291         { "loopback_test (offline)" },
7292         { "nvram_test (online)" },
7293         { "interrupt_test (online)" },
7294         { "link_test (online)" },
7295 };
7296
7297 static int
7298 bnx2_get_sset_count(struct net_device *dev, int sset)
7299 {
7300         switch (sset) {
7301         case ETH_SS_TEST:
7302                 return BNX2_NUM_TESTS;
7303         case ETH_SS_STATS:
7304                 return BNX2_NUM_STATS;
7305         default:
7306                 return -EOPNOTSUPP;
7307         }
7308 }
7309
7310 static void
7311 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7312 {
7313         struct bnx2 *bp = netdev_priv(dev);
7314
7315         bnx2_set_power_state(bp, PCI_D0);
7316
7317         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7318         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7319                 int i;
7320
7321                 bnx2_netif_stop(bp);
7322                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7323                 bnx2_free_skbs(bp);
7324
7325                 if (bnx2_test_registers(bp) != 0) {
7326                         buf[0] = 1;
7327                         etest->flags |= ETH_TEST_FL_FAILED;
7328                 }
7329                 if (bnx2_test_memory(bp) != 0) {
7330                         buf[1] = 1;
7331                         etest->flags |= ETH_TEST_FL_FAILED;
7332                 }
7333                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7334                         etest->flags |= ETH_TEST_FL_FAILED;
7335
7336                 if (!netif_running(bp->dev))
7337                         bnx2_shutdown_chip(bp);
7338                 else {
7339                         bnx2_init_nic(bp, 1);
7340                         bnx2_netif_start(bp);
7341                 }
7342
7343                 /* wait for link up */
7344                 for (i = 0; i < 7; i++) {
7345                         if (bp->link_up)
7346                                 break;
7347                         msleep_interruptible(1000);
7348                 }
7349         }
7350
7351         if (bnx2_test_nvram(bp) != 0) {
7352                 buf[3] = 1;
7353                 etest->flags |= ETH_TEST_FL_FAILED;
7354         }
7355         if (bnx2_test_intr(bp) != 0) {
7356                 buf[4] = 1;
7357                 etest->flags |= ETH_TEST_FL_FAILED;
7358         }
7359
7360         if (bnx2_test_link(bp) != 0) {
7361                 buf[5] = 1;
7362                 etest->flags |= ETH_TEST_FL_FAILED;
7363
7364         }
7365         if (!netif_running(bp->dev))
7366                 bnx2_set_power_state(bp, PCI_D3hot);
7367 }
7368
7369 static void
7370 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7371 {
7372         switch (stringset) {
7373         case ETH_SS_STATS:
7374                 memcpy(buf, bnx2_stats_str_arr,
7375                         sizeof(bnx2_stats_str_arr));
7376                 break;
7377         case ETH_SS_TEST:
7378                 memcpy(buf, bnx2_tests_str_arr,
7379                         sizeof(bnx2_tests_str_arr));
7380                 break;
7381         }
7382 }
7383
7384 static void
7385 bnx2_get_ethtool_stats(struct net_device *dev,
7386                 struct ethtool_stats *stats, u64 *buf)
7387 {
7388         struct bnx2 *bp = netdev_priv(dev);
7389         int i;
7390         u32 *hw_stats = (u32 *) bp->stats_blk;
7391         u8 *stats_len_arr = NULL;
7392
7393         if (hw_stats == NULL) {
7394                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7395                 return;
7396         }
7397
7398         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7399             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7400             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7401             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7402                 stats_len_arr = bnx2_5706_stats_len_arr;
7403         else
7404                 stats_len_arr = bnx2_5708_stats_len_arr;
7405
7406         for (i = 0; i < BNX2_NUM_STATS; i++) {
7407                 if (stats_len_arr[i] == 0) {
7408                         /* skip this counter */
7409                         buf[i] = 0;
7410                         continue;
7411                 }
7412                 if (stats_len_arr[i] == 4) {
7413                         /* 4-byte counter */
7414                         buf[i] = (u64)
7415                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7416                         continue;
7417                 }
7418                 /* 8-byte counter */
7419                 buf[i] = (((u64) *(hw_stats +
7420                                         bnx2_stats_offset_arr[i])) << 32) +
7421                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7422         }
7423 }
7424
7425 static int
7426 bnx2_phys_id(struct net_device *dev, u32 data)
7427 {
7428         struct bnx2 *bp = netdev_priv(dev);
7429         int i;
7430         u32 save;
7431
7432         bnx2_set_power_state(bp, PCI_D0);
7433
7434         if (data == 0)
7435                 data = 2;
7436
7437         save = REG_RD(bp, BNX2_MISC_CFG);
7438         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7439
7440         for (i = 0; i < (data * 2); i++) {
7441                 if ((i % 2) == 0) {
7442                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7443                 }
7444                 else {
7445                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7446                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7447                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7448                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7449                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7450                                 BNX2_EMAC_LED_TRAFFIC);
7451                 }
7452                 msleep_interruptible(500);
7453                 if (signal_pending(current))
7454                         break;
7455         }
7456         REG_WR(bp, BNX2_EMAC_LED, 0);
7457         REG_WR(bp, BNX2_MISC_CFG, save);
7458
7459         if (!netif_running(dev))
7460                 bnx2_set_power_state(bp, PCI_D3hot);
7461
7462         return 0;
7463 }
7464
7465 static int
7466 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7467 {
7468         struct bnx2 *bp = netdev_priv(dev);
7469
7470         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7471                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7472         else
7473                 return (ethtool_op_set_tx_csum(dev, data));
7474 }
7475
7476 static const struct ethtool_ops bnx2_ethtool_ops = {
7477         .get_settings           = bnx2_get_settings,
7478         .set_settings           = bnx2_set_settings,
7479         .get_drvinfo            = bnx2_get_drvinfo,
7480         .get_regs_len           = bnx2_get_regs_len,
7481         .get_regs               = bnx2_get_regs,
7482         .get_wol                = bnx2_get_wol,
7483         .set_wol                = bnx2_set_wol,
7484         .nway_reset             = bnx2_nway_reset,
7485         .get_link               = bnx2_get_link,
7486         .get_eeprom_len         = bnx2_get_eeprom_len,
7487         .get_eeprom             = bnx2_get_eeprom,
7488         .set_eeprom             = bnx2_set_eeprom,
7489         .get_coalesce           = bnx2_get_coalesce,
7490         .set_coalesce           = bnx2_set_coalesce,
7491         .get_ringparam          = bnx2_get_ringparam,
7492         .set_ringparam          = bnx2_set_ringparam,
7493         .get_pauseparam         = bnx2_get_pauseparam,
7494         .set_pauseparam         = bnx2_set_pauseparam,
7495         .get_rx_csum            = bnx2_get_rx_csum,
7496         .set_rx_csum            = bnx2_set_rx_csum,
7497         .set_tx_csum            = bnx2_set_tx_csum,
7498         .set_sg                 = ethtool_op_set_sg,
7499         .set_tso                = bnx2_set_tso,
7500         .self_test              = bnx2_self_test,
7501         .get_strings            = bnx2_get_strings,
7502         .phys_id                = bnx2_phys_id,
7503         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7504         .get_sset_count         = bnx2_get_sset_count,
7505 };
7506
7507 /* Called with rtnl_lock */
7508 static int
7509 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7510 {
7511         struct mii_ioctl_data *data = if_mii(ifr);
7512         struct bnx2 *bp = netdev_priv(dev);
7513         int err;
7514
7515         switch(cmd) {
7516         case SIOCGMIIPHY:
7517                 data->phy_id = bp->phy_addr;
7518
7519                 /* fallthru */
7520         case SIOCGMIIREG: {
7521                 u32 mii_regval;
7522
7523                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7524                         return -EOPNOTSUPP;
7525
7526                 if (!netif_running(dev))
7527                         return -EAGAIN;
7528
7529                 spin_lock_bh(&bp->phy_lock);
7530                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7531                 spin_unlock_bh(&bp->phy_lock);
7532
7533                 data->val_out = mii_regval;
7534
7535                 return err;
7536         }
7537
7538         case SIOCSMIIREG:
7539                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7540                         return -EOPNOTSUPP;
7541
7542                 if (!netif_running(dev))
7543                         return -EAGAIN;
7544
7545                 spin_lock_bh(&bp->phy_lock);
7546                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7547                 spin_unlock_bh(&bp->phy_lock);
7548
7549                 return err;
7550
7551         default:
7552                 /* do nothing */
7553                 break;
7554         }
7555         return -EOPNOTSUPP;
7556 }
7557
7558 /* Called with rtnl_lock */
7559 static int
7560 bnx2_change_mac_addr(struct net_device *dev, void *p)
7561 {
7562         struct sockaddr *addr = p;
7563         struct bnx2 *bp = netdev_priv(dev);
7564
7565         if (!is_valid_ether_addr(addr->sa_data))
7566                 return -EINVAL;
7567
7568         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7569         if (netif_running(dev))
7570                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7571
7572         return 0;
7573 }
7574
7575 /* Called with rtnl_lock */
7576 static int
7577 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7578 {
7579         struct bnx2 *bp = netdev_priv(dev);
7580
7581         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7582                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7583                 return -EINVAL;
7584
7585         dev->mtu = new_mtu;
7586         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7587 }
7588
7589 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7590 static void
7591 poll_bnx2(struct net_device *dev)
7592 {
7593         struct bnx2 *bp = netdev_priv(dev);
7594         int i;
7595
7596         for (i = 0; i < bp->irq_nvecs; i++) {
7597                 disable_irq(bp->irq_tbl[i].vector);
7598                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7599                 enable_irq(bp->irq_tbl[i].vector);
7600         }
7601 }
7602 #endif
7603
7604 static void __devinit
7605 bnx2_get_5709_media(struct bnx2 *bp)
7606 {
7607         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7608         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7609         u32 strap;
7610
7611         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7612                 return;
7613         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7614                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7615                 return;
7616         }
7617
7618         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7619                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7620         else
7621                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7622
7623         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7624                 switch (strap) {
7625                 case 0x4:
7626                 case 0x5:
7627                 case 0x6:
7628                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7629                         return;
7630                 }
7631         } else {
7632                 switch (strap) {
7633                 case 0x1:
7634                 case 0x2:
7635                 case 0x4:
7636                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7637                         return;
7638                 }
7639         }
7640 }
7641
7642 static void __devinit
7643 bnx2_get_pci_speed(struct bnx2 *bp)
7644 {
7645         u32 reg;
7646
7647         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7648         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7649                 u32 clkreg;
7650
7651                 bp->flags |= BNX2_FLAG_PCIX;
7652
7653                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7654
7655                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7656                 switch (clkreg) {
7657                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7658                         bp->bus_speed_mhz = 133;
7659                         break;
7660
7661                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7662                         bp->bus_speed_mhz = 100;
7663                         break;
7664
7665                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7666                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7667                         bp->bus_speed_mhz = 66;
7668                         break;
7669
7670                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7671                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7672                         bp->bus_speed_mhz = 50;
7673                         break;
7674
7675                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7676                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7677                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7678                         bp->bus_speed_mhz = 33;
7679                         break;
7680                 }
7681         }
7682         else {
7683                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7684                         bp->bus_speed_mhz = 66;
7685                 else
7686                         bp->bus_speed_mhz = 33;
7687         }
7688
7689         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7690                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7691
7692 }
7693
7694 static int __devinit
7695 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7696 {
7697         struct bnx2 *bp;
7698         unsigned long mem_len;
7699         int rc, i, j;
7700         u32 reg;
7701         u64 dma_mask, persist_dma_mask;
7702
7703         SET_NETDEV_DEV(dev, &pdev->dev);
7704         bp = netdev_priv(dev);
7705
7706         bp->flags = 0;
7707         bp->phy_flags = 0;
7708
7709         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7710         rc = pci_enable_device(pdev);
7711         if (rc) {
7712                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7713                 goto err_out;
7714         }
7715
7716         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7717                 dev_err(&pdev->dev,
7718                         "Cannot find PCI device base address, aborting.\n");
7719                 rc = -ENODEV;
7720                 goto err_out_disable;
7721         }
7722
7723         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7724         if (rc) {
7725                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7726                 goto err_out_disable;
7727         }
7728
7729         pci_set_master(pdev);
7730         pci_save_state(pdev);
7731
7732         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7733         if (bp->pm_cap == 0) {
7734                 dev_err(&pdev->dev,
7735                         "Cannot find power management capability, aborting.\n");
7736                 rc = -EIO;
7737                 goto err_out_release;
7738         }
7739
7740         bp->dev = dev;
7741         bp->pdev = pdev;
7742
7743         spin_lock_init(&bp->phy_lock);
7744         spin_lock_init(&bp->indirect_lock);
7745 #ifdef BCM_CNIC
7746         mutex_init(&bp->cnic_lock);
7747 #endif
7748         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7749
7750         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7751         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7752         dev->mem_end = dev->mem_start + mem_len;
7753         dev->irq = pdev->irq;
7754
7755         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7756
7757         if (!bp->regview) {
7758                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7759                 rc = -ENOMEM;
7760                 goto err_out_release;
7761         }
7762
7763         /* Configure byte swap and enable write to the reg_window registers.
7764          * Rely on CPU to do target byte swapping on big endian systems
7765          * The chip's target access swapping will not swap all accesses
7766          */
7767         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7768                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7769                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7770
7771         bnx2_set_power_state(bp, PCI_D0);
7772
7773         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7774
7775         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7776                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7777                         dev_err(&pdev->dev,
7778                                 "Cannot find PCIE capability, aborting.\n");
7779                         rc = -EIO;
7780                         goto err_out_unmap;
7781                 }
7782                 bp->flags |= BNX2_FLAG_PCIE;
7783                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7784                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7785         } else {
7786                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7787                 if (bp->pcix_cap == 0) {
7788                         dev_err(&pdev->dev,
7789                                 "Cannot find PCIX capability, aborting.\n");
7790                         rc = -EIO;
7791                         goto err_out_unmap;
7792                 }
7793                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7794         }
7795
7796         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7797                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7798                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7799         }
7800
7801         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7802                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7803                         bp->flags |= BNX2_FLAG_MSI_CAP;
7804         }
7805
7806         /* 5708 cannot support DMA addresses > 40-bit.  */
7807         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7808                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7809         else
7810                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7811
7812         /* Configure DMA attributes. */
7813         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7814                 dev->features |= NETIF_F_HIGHDMA;
7815                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7816                 if (rc) {
7817                         dev_err(&pdev->dev,
7818                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7819                         goto err_out_unmap;
7820                 }
7821         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7822                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7823                 goto err_out_unmap;
7824         }
7825
7826         if (!(bp->flags & BNX2_FLAG_PCIE))
7827                 bnx2_get_pci_speed(bp);
7828
7829         /* 5706A0 may falsely detect SERR and PERR. */
7830         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7831                 reg = REG_RD(bp, PCI_COMMAND);
7832                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7833                 REG_WR(bp, PCI_COMMAND, reg);
7834         }
7835         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7836                 !(bp->flags & BNX2_FLAG_PCIX)) {
7837
7838                 dev_err(&pdev->dev,
7839                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7840                 goto err_out_unmap;
7841         }
7842
7843         bnx2_init_nvram(bp);
7844
7845         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7846
7847         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7848             BNX2_SHM_HDR_SIGNATURE_SIG) {
7849                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7850
7851                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7852         } else
7853                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7854
7855         /* Get the permanent MAC address.  First we need to make sure the
7856          * firmware is actually running.
7857          */
7858         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7859
7860         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7861             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7862                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7863                 rc = -ENODEV;
7864                 goto err_out_unmap;
7865         }
7866
7867         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7868         for (i = 0, j = 0; i < 3; i++) {
7869                 u8 num, k, skip0;
7870
7871                 num = (u8) (reg >> (24 - (i * 8)));
7872                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7873                         if (num >= k || !skip0 || k == 1) {
7874                                 bp->fw_version[j++] = (num / k) + '0';
7875                                 skip0 = 0;
7876                         }
7877                 }
7878                 if (i != 2)
7879                         bp->fw_version[j++] = '.';
7880         }
7881         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7882         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7883                 bp->wol = 1;
7884
7885         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7886                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7887
7888                 for (i = 0; i < 30; i++) {
7889                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7890                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7891                                 break;
7892                         msleep(10);
7893                 }
7894         }
7895         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7896         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7897         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7898             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7899                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7900
7901                 bp->fw_version[j++] = ' ';
7902                 for (i = 0; i < 3; i++) {
7903                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7904                         reg = swab32(reg);
7905                         memcpy(&bp->fw_version[j], &reg, 4);
7906                         j += 4;
7907                 }
7908         }
7909
7910         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7911         bp->mac_addr[0] = (u8) (reg >> 8);
7912         bp->mac_addr[1] = (u8) reg;
7913
7914         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7915         bp->mac_addr[2] = (u8) (reg >> 24);
7916         bp->mac_addr[3] = (u8) (reg >> 16);
7917         bp->mac_addr[4] = (u8) (reg >> 8);
7918         bp->mac_addr[5] = (u8) reg;
7919
7920         bp->tx_ring_size = MAX_TX_DESC_CNT;
7921         bnx2_set_rx_ring_size(bp, 255);
7922
7923         bp->rx_csum = 1;
7924
7925         bp->tx_quick_cons_trip_int = 2;
7926         bp->tx_quick_cons_trip = 20;
7927         bp->tx_ticks_int = 18;
7928         bp->tx_ticks = 80;
7929
7930         bp->rx_quick_cons_trip_int = 2;
7931         bp->rx_quick_cons_trip = 12;
7932         bp->rx_ticks_int = 18;
7933         bp->rx_ticks = 18;
7934
7935         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7936
7937         bp->current_interval = BNX2_TIMER_INTERVAL;
7938
7939         bp->phy_addr = 1;
7940
7941         /* Disable WOL support if we are running on a SERDES chip. */
7942         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7943                 bnx2_get_5709_media(bp);
7944         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7945                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7946
7947         bp->phy_port = PORT_TP;
7948         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7949                 bp->phy_port = PORT_FIBRE;
7950                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7951                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7952                         bp->flags |= BNX2_FLAG_NO_WOL;
7953                         bp->wol = 0;
7954                 }
7955                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7956                         /* Don't do parallel detect on this board because of
7957                          * some board problems.  The link will not go down
7958                          * if we do parallel detect.
7959                          */
7960                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7961                             pdev->subsystem_device == 0x310c)
7962                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7963                 } else {
7964                         bp->phy_addr = 2;
7965                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7966                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7967                 }
7968         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7969                    CHIP_NUM(bp) == CHIP_NUM_5708)
7970                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7971         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7972                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7973                   CHIP_REV(bp) == CHIP_REV_Bx))
7974                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7975
7976         bnx2_init_fw_cap(bp);
7977
7978         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7979             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7980             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7981             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7982                 bp->flags |= BNX2_FLAG_NO_WOL;
7983                 bp->wol = 0;
7984         }
7985
7986         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7987                 bp->tx_quick_cons_trip_int =
7988                         bp->tx_quick_cons_trip;
7989                 bp->tx_ticks_int = bp->tx_ticks;
7990                 bp->rx_quick_cons_trip_int =
7991                         bp->rx_quick_cons_trip;
7992                 bp->rx_ticks_int = bp->rx_ticks;
7993                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7994                 bp->com_ticks_int = bp->com_ticks;
7995                 bp->cmd_ticks_int = bp->cmd_ticks;
7996         }
7997
7998         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7999          *
8000          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8001          * with byte enables disabled on the unused 32-bit word.  This is legal
8002          * but causes problems on the AMD 8132 which will eventually stop
8003          * responding after a while.
8004          *
8005          * AMD believes this incompatibility is unique to the 5706, and
8006          * prefers to locally disable MSI rather than globally disabling it.
8007          */
8008         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8009                 struct pci_dev *amd_8132 = NULL;
8010
8011                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8012                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8013                                                   amd_8132))) {
8014
8015                         if (amd_8132->revision >= 0x10 &&
8016                             amd_8132->revision <= 0x13) {
8017                                 disable_msi = 1;
8018                                 pci_dev_put(amd_8132);
8019                                 break;
8020                         }
8021                 }
8022         }
8023
8024         bnx2_set_default_link(bp);
8025         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8026
8027         init_timer(&bp->timer);
8028         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8029         bp->timer.data = (unsigned long) bp;
8030         bp->timer.function = bnx2_timer;
8031
8032         return 0;
8033
8034 err_out_unmap:
8035         if (bp->regview) {
8036                 iounmap(bp->regview);
8037                 bp->regview = NULL;
8038         }
8039
8040 err_out_release:
8041         pci_release_regions(pdev);
8042
8043 err_out_disable:
8044         pci_disable_device(pdev);
8045         pci_set_drvdata(pdev, NULL);
8046
8047 err_out:
8048         return rc;
8049 }
8050
8051 static char * __devinit
8052 bnx2_bus_string(struct bnx2 *bp, char *str)
8053 {
8054         char *s = str;
8055
8056         if (bp->flags & BNX2_FLAG_PCIE) {
8057                 s += sprintf(s, "PCI Express");
8058         } else {
8059                 s += sprintf(s, "PCI");
8060                 if (bp->flags & BNX2_FLAG_PCIX)
8061                         s += sprintf(s, "-X");
8062                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8063                         s += sprintf(s, " 32-bit");
8064                 else
8065                         s += sprintf(s, " 64-bit");
8066                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8067         }
8068         return str;
8069 }
8070
8071 static void __devinit
8072 bnx2_init_napi(struct bnx2 *bp)
8073 {
8074         int i;
8075
8076         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8077                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8078                 int (*poll)(struct napi_struct *, int);
8079
8080                 if (i == 0)
8081                         poll = bnx2_poll;
8082                 else
8083                         poll = bnx2_poll_msix;
8084
8085                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8086                 bnapi->bp = bp;
8087         }
8088 }
8089
8090 static const struct net_device_ops bnx2_netdev_ops = {
8091         .ndo_open               = bnx2_open,
8092         .ndo_start_xmit         = bnx2_start_xmit,
8093         .ndo_stop               = bnx2_close,
8094         .ndo_get_stats          = bnx2_get_stats,
8095         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8096         .ndo_do_ioctl           = bnx2_ioctl,
8097         .ndo_validate_addr      = eth_validate_addr,
8098         .ndo_set_mac_address    = bnx2_change_mac_addr,
8099         .ndo_change_mtu         = bnx2_change_mtu,
8100         .ndo_tx_timeout         = bnx2_tx_timeout,
8101 #ifdef BCM_VLAN
8102         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8103 #endif
8104 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8105         .ndo_poll_controller    = poll_bnx2,
8106 #endif
8107 };
8108
8109 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8110 {
8111 #ifdef BCM_VLAN
8112         dev->vlan_features |= flags;
8113 #endif
8114 }
8115
8116 static int __devinit
8117 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8118 {
8119         static int version_printed = 0;
8120         struct net_device *dev = NULL;
8121         struct bnx2 *bp;
8122         int rc;
8123         char str[40];
8124
8125         if (version_printed++ == 0)
8126                 printk(KERN_INFO "%s", version);
8127
8128         /* dev zeroed in init_etherdev */
8129         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8130
8131         if (!dev)
8132                 return -ENOMEM;
8133
8134         rc = bnx2_init_board(pdev, dev);
8135         if (rc < 0) {
8136                 free_netdev(dev);
8137                 return rc;
8138         }
8139
8140         dev->netdev_ops = &bnx2_netdev_ops;
8141         dev->watchdog_timeo = TX_TIMEOUT;
8142         dev->ethtool_ops = &bnx2_ethtool_ops;
8143
8144         bp = netdev_priv(dev);
8145         bnx2_init_napi(bp);
8146
8147         pci_set_drvdata(pdev, dev);
8148
8149         rc = bnx2_request_firmware(bp);
8150         if (rc)
8151                 goto error;
8152
8153         memcpy(dev->dev_addr, bp->mac_addr, 6);
8154         memcpy(dev->perm_addr, bp->mac_addr, 6);
8155
8156         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8157         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8158         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8159                 dev->features |= NETIF_F_IPV6_CSUM;
8160                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8161         }
8162 #ifdef BCM_VLAN
8163         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8164 #endif
8165         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8166         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8167         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8168                 dev->features |= NETIF_F_TSO6;
8169                 vlan_features_add(dev, NETIF_F_TSO6);
8170         }
8171         if ((rc = register_netdev(dev))) {
8172                 dev_err(&pdev->dev, "Cannot register net device\n");
8173                 goto error;
8174         }
8175
8176         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8177                 "IRQ %d, node addr %pM\n",
8178                 dev->name,
8179                 board_info[ent->driver_data].name,
8180                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8181                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8182                 bnx2_bus_string(bp, str),
8183                 dev->base_addr,
8184                 bp->pdev->irq, dev->dev_addr);
8185
8186         return 0;
8187
8188 error:
8189         if (bp->mips_firmware)
8190                 release_firmware(bp->mips_firmware);
8191         if (bp->rv2p_firmware)
8192                 release_firmware(bp->rv2p_firmware);
8193
8194         if (bp->regview)
8195                 iounmap(bp->regview);
8196         pci_release_regions(pdev);
8197         pci_disable_device(pdev);
8198         pci_set_drvdata(pdev, NULL);
8199         free_netdev(dev);
8200         return rc;
8201 }
8202
8203 static void __devexit
8204 bnx2_remove_one(struct pci_dev *pdev)
8205 {
8206         struct net_device *dev = pci_get_drvdata(pdev);
8207         struct bnx2 *bp = netdev_priv(dev);
8208
8209         flush_scheduled_work();
8210
8211         unregister_netdev(dev);
8212
8213         if (bp->mips_firmware)
8214                 release_firmware(bp->mips_firmware);
8215         if (bp->rv2p_firmware)
8216                 release_firmware(bp->rv2p_firmware);
8217
8218         if (bp->regview)
8219                 iounmap(bp->regview);
8220
8221         free_netdev(dev);
8222         pci_release_regions(pdev);
8223         pci_disable_device(pdev);
8224         pci_set_drvdata(pdev, NULL);
8225 }
8226
8227 static int
8228 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8229 {
8230         struct net_device *dev = pci_get_drvdata(pdev);
8231         struct bnx2 *bp = netdev_priv(dev);
8232
8233         /* PCI register 4 needs to be saved whether netif_running() or not.
8234          * MSI address and data need to be saved if using MSI and
8235          * netif_running().
8236          */
8237         pci_save_state(pdev);
8238         if (!netif_running(dev))
8239                 return 0;
8240
8241         flush_scheduled_work();
8242         bnx2_netif_stop(bp);
8243         netif_device_detach(dev);
8244         del_timer_sync(&bp->timer);
8245         bnx2_shutdown_chip(bp);
8246         bnx2_free_skbs(bp);
8247         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8248         return 0;
8249 }
8250
8251 static int
8252 bnx2_resume(struct pci_dev *pdev)
8253 {
8254         struct net_device *dev = pci_get_drvdata(pdev);
8255         struct bnx2 *bp = netdev_priv(dev);
8256
8257         pci_restore_state(pdev);
8258         if (!netif_running(dev))
8259                 return 0;
8260
8261         bnx2_set_power_state(bp, PCI_D0);
8262         netif_device_attach(dev);
8263         bnx2_init_nic(bp, 1);
8264         bnx2_netif_start(bp);
8265         return 0;
8266 }
8267
8268 /**
8269  * bnx2_io_error_detected - called when PCI error is detected
8270  * @pdev: Pointer to PCI device
8271  * @state: The current pci connection state
8272  *
8273  * This function is called after a PCI bus error affecting
8274  * this device has been detected.
8275  */
8276 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8277                                                pci_channel_state_t state)
8278 {
8279         struct net_device *dev = pci_get_drvdata(pdev);
8280         struct bnx2 *bp = netdev_priv(dev);
8281
8282         rtnl_lock();
8283         netif_device_detach(dev);
8284
8285         if (state == pci_channel_io_perm_failure) {
8286                 rtnl_unlock();
8287                 return PCI_ERS_RESULT_DISCONNECT;
8288         }
8289
8290         if (netif_running(dev)) {
8291                 bnx2_netif_stop(bp);
8292                 del_timer_sync(&bp->timer);
8293                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8294         }
8295
8296         pci_disable_device(pdev);
8297         rtnl_unlock();
8298
8299         /* Request a slot slot reset. */
8300         return PCI_ERS_RESULT_NEED_RESET;
8301 }
8302
8303 /**
8304  * bnx2_io_slot_reset - called after the pci bus has been reset.
8305  * @pdev: Pointer to PCI device
8306  *
8307  * Restart the card from scratch, as if from a cold-boot.
8308  */
8309 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8310 {
8311         struct net_device *dev = pci_get_drvdata(pdev);
8312         struct bnx2 *bp = netdev_priv(dev);
8313
8314         rtnl_lock();
8315         if (pci_enable_device(pdev)) {
8316                 dev_err(&pdev->dev,
8317                         "Cannot re-enable PCI device after reset.\n");
8318                 rtnl_unlock();
8319                 return PCI_ERS_RESULT_DISCONNECT;
8320         }
8321         pci_set_master(pdev);
8322         pci_restore_state(pdev);
8323         pci_save_state(pdev);
8324
8325         if (netif_running(dev)) {
8326                 bnx2_set_power_state(bp, PCI_D0);
8327                 bnx2_init_nic(bp, 1);
8328         }
8329
8330         rtnl_unlock();
8331         return PCI_ERS_RESULT_RECOVERED;
8332 }
8333
8334 /**
8335  * bnx2_io_resume - called when traffic can start flowing again.
8336  * @pdev: Pointer to PCI device
8337  *
8338  * This callback is called when the error recovery driver tells us that
8339  * its OK to resume normal operation.
8340  */
8341 static void bnx2_io_resume(struct pci_dev *pdev)
8342 {
8343         struct net_device *dev = pci_get_drvdata(pdev);
8344         struct bnx2 *bp = netdev_priv(dev);
8345
8346         rtnl_lock();
8347         if (netif_running(dev))
8348                 bnx2_netif_start(bp);
8349
8350         netif_device_attach(dev);
8351         rtnl_unlock();
8352 }
8353
8354 static struct pci_error_handlers bnx2_err_handler = {
8355         .error_detected = bnx2_io_error_detected,
8356         .slot_reset     = bnx2_io_slot_reset,
8357         .resume         = bnx2_io_resume,
8358 };
8359
8360 static struct pci_driver bnx2_pci_driver = {
8361         .name           = DRV_MODULE_NAME,
8362         .id_table       = bnx2_pci_tbl,
8363         .probe          = bnx2_init_one,
8364         .remove         = __devexit_p(bnx2_remove_one),
8365         .suspend        = bnx2_suspend,
8366         .resume         = bnx2_resume,
8367         .err_handler    = &bnx2_err_handler,
8368 };
8369
8370 static int __init bnx2_init(void)
8371 {
8372         return pci_register_driver(&bnx2_pci_driver);
8373 }
8374
8375 static void __exit bnx2_cleanup(void)
8376 {
8377         pci_unregister_driver(&bnx2_pci_driver);
8378 }
8379
8380 module_init(bnx2_init);
8381 module_exit(bnx2_cleanup);
8382
8383
8384