e1000: cleanup unused parameters
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.9"
62 #define DRV_MODULE_RELDATE      "April 27, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         smp_mb();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = bp->cnic_ops;
439         if (c_ops) {
440                 info.cmd = CNIC_CTL_STOP_CMD;
441                 c_ops->cnic_ctl(bp->cnic_data, &info);
442         }
443         mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449         struct cnic_ops *c_ops;
450         struct cnic_ctl_info info;
451
452         mutex_lock(&bp->cnic_lock);
453         c_ops = bp->cnic_ops;
454         if (c_ops) {
455                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458                         bnapi->cnic_tag = bnapi->last_status_idx;
459                 }
460                 info.cmd = CNIC_CTL_START_CMD;
461                 c_ops->cnic_ctl(bp->cnic_data, &info);
462         }
463         mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483         u32 val1;
484         int i, ret;
485
486         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493                 udelay(40);
494         }
495
496         val1 = (bp->phy_addr << 21) | (reg << 16) |
497                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498                 BNX2_EMAC_MDIO_COMM_START_BUSY;
499         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501         for (i = 0; i < 50; i++) {
502                 udelay(10);
503
504                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506                         udelay(5);
507
508                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511                         break;
512                 }
513         }
514
515         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516                 *val = 0x0;
517                 ret = -EBUSY;
518         }
519         else {
520                 *val = val1;
521                 ret = 0;
522         }
523
524         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531                 udelay(40);
532         }
533
534         return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540         u32 val1;
541         int i, ret;
542
543         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550                 udelay(40);
551         }
552
553         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558         for (i = 0; i < 50; i++) {
559                 udelay(10);
560
561                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563                         udelay(5);
564                         break;
565                 }
566         }
567
568         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569                 ret = -EBUSY;
570         else
571                 ret = 0;
572
573         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580                 udelay(40);
581         }
582
583         return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589         int i;
590         struct bnx2_napi *bnapi;
591
592         for (i = 0; i < bp->irq_nvecs; i++) {
593                 bnapi = &bp->bnx2_napi[i];
594                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596         }
597         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603         int i;
604         struct bnx2_napi *bnapi;
605
606         for (i = 0; i < bp->irq_nvecs; i++) {
607                 bnapi = &bp->bnx2_napi[i];
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612                        bnapi->last_status_idx);
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        bnapi->last_status_idx);
617         }
618         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624         int i;
625
626         atomic_inc(&bp->intr_sem);
627         if (!netif_running(bp->dev))
628                 return;
629
630         bnx2_disable_int(bp);
631         for (i = 0; i < bp->irq_nvecs; i++)
632                 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638         int i;
639
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655 {
656         if (stop_cnic)
657                 bnx2_cnic_stop(bp);
658         if (netif_running(bp->dev)) {
659                 int i;
660
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663                 /* prevent tx timeout */
664                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
665                         struct netdev_queue *txq;
666
667                         txq = netdev_get_tx_queue(bp->dev, i);
668                         txq->trans_start = jiffies;
669                 }
670         }
671         bnx2_disable_int_sync(bp);
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         bnx2_napi_enable(bp);
681                         bnx2_enable_int(bp);
682                         if (start_cnic)
683                                 bnx2_cnic_start(bp);
684                 }
685         }
686 }
687
688 static void
689 bnx2_free_tx_mem(struct bnx2 *bp)
690 {
691         int i;
692
693         for (i = 0; i < bp->num_tx_rings; i++) {
694                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697                 if (txr->tx_desc_ring) {
698                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699                                             txr->tx_desc_ring,
700                                             txr->tx_desc_mapping);
701                         txr->tx_desc_ring = NULL;
702                 }
703                 kfree(txr->tx_buf_ring);
704                 txr->tx_buf_ring = NULL;
705         }
706 }
707
708 static void
709 bnx2_free_rx_mem(struct bnx2 *bp)
710 {
711         int i;
712
713         for (i = 0; i < bp->num_rx_rings; i++) {
714                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716                 int j;
717
718                 for (j = 0; j < bp->rx_max_ring; j++) {
719                         if (rxr->rx_desc_ring[j])
720                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721                                                     rxr->rx_desc_ring[j],
722                                                     rxr->rx_desc_mapping[j]);
723                         rxr->rx_desc_ring[j] = NULL;
724                 }
725                 vfree(rxr->rx_buf_ring);
726                 rxr->rx_buf_ring = NULL;
727
728                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729                         if (rxr->rx_pg_desc_ring[j])
730                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
731                                                     rxr->rx_pg_desc_ring[j],
732                                                     rxr->rx_pg_desc_mapping[j]);
733                         rxr->rx_pg_desc_ring[j] = NULL;
734                 }
735                 vfree(rxr->rx_pg_ring);
736                 rxr->rx_pg_ring = NULL;
737         }
738 }
739
740 static int
741 bnx2_alloc_tx_mem(struct bnx2 *bp)
742 {
743         int i;
744
745         for (i = 0; i < bp->num_tx_rings; i++) {
746                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750                 if (txr->tx_buf_ring == NULL)
751                         return -ENOMEM;
752
753                 txr->tx_desc_ring =
754                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755                                              &txr->tx_desc_mapping);
756                 if (txr->tx_desc_ring == NULL)
757                         return -ENOMEM;
758         }
759         return 0;
760 }
761
762 static int
763 bnx2_alloc_rx_mem(struct bnx2 *bp)
764 {
765         int i;
766
767         for (i = 0; i < bp->num_rx_rings; i++) {
768                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770                 int j;
771
772                 rxr->rx_buf_ring =
773                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774                 if (rxr->rx_buf_ring == NULL)
775                         return -ENOMEM;
776
777                 memset(rxr->rx_buf_ring, 0,
778                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780                 for (j = 0; j < bp->rx_max_ring; j++) {
781                         rxr->rx_desc_ring[j] =
782                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783                                                      &rxr->rx_desc_mapping[j]);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796                                bp->rx_max_pg_ring);
797                 }
798
799                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800                         rxr->rx_pg_desc_ring[j] =
801                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802                                                 &rxr->rx_pg_desc_mapping[j]);
803                         if (rxr->rx_pg_desc_ring[j] == NULL)
804                                 return -ENOMEM;
805
806                 }
807         }
808         return 0;
809 }
810
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
813 {
814         int i;
815         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
816
817         bnx2_free_tx_mem(bp);
818         bnx2_free_rx_mem(bp);
819
820         for (i = 0; i < bp->ctx_pages; i++) {
821                 if (bp->ctx_blk[i]) {
822                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823                                             bp->ctx_blk[i],
824                                             bp->ctx_blk_mapping[i]);
825                         bp->ctx_blk[i] = NULL;
826                 }
827         }
828         if (bnapi->status_blk.msi) {
829                 pci_free_consistent(bp->pdev, bp->status_stats_size,
830                                     bnapi->status_blk.msi,
831                                     bp->status_blk_mapping);
832                 bnapi->status_blk.msi = NULL;
833                 bp->stats_blk = NULL;
834         }
835 }
836
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
839 {
840         int i, status_blk_size, err;
841         struct bnx2_napi *bnapi;
842         void *status_blk;
843
844         /* Combine status and statistics blocks into one allocation. */
845         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846         if (bp->flags & BNX2_FLAG_MSIX_CAP)
847                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
849         bp->status_stats_size = status_blk_size +
850                                 sizeof(struct statistics_block);
851
852         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853                                           &bp->status_blk_mapping);
854         if (status_blk == NULL)
855                 goto alloc_mem_err;
856
857         memset(status_blk, 0, bp->status_stats_size);
858
859         bnapi = &bp->bnx2_napi[0];
860         bnapi->status_blk.msi = status_blk;
861         bnapi->hw_tx_cons_ptr =
862                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863         bnapi->hw_rx_cons_ptr =
864                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
867                         struct status_block_msix *sblk;
868
869                         bnapi = &bp->bnx2_napi[i];
870
871                         sblk = (void *) (status_blk +
872                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873                         bnapi->status_blk.msix = sblk;
874                         bnapi->hw_tx_cons_ptr =
875                                 &sblk->status_tx_quick_consumer_index;
876                         bnapi->hw_rx_cons_ptr =
877                                 &sblk->status_rx_quick_consumer_index;
878                         bnapi->int_num = i << 24;
879                 }
880         }
881
882         bp->stats_blk = status_blk + status_blk_size;
883
884         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888                 if (bp->ctx_pages == 0)
889                         bp->ctx_pages = 1;
890                 for (i = 0; i < bp->ctx_pages; i++) {
891                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892                                                 BCM_PAGE_SIZE,
893                                                 &bp->ctx_blk_mapping[i]);
894                         if (bp->ctx_blk[i] == NULL)
895                                 goto alloc_mem_err;
896                 }
897         }
898
899         err = bnx2_alloc_rx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         err = bnx2_alloc_tx_mem(bp);
904         if (err)
905                 goto alloc_mem_err;
906
907         return 0;
908
909 alloc_mem_err:
910         bnx2_free_mem(bp);
911         return -ENOMEM;
912 }
913
914 static void
915 bnx2_report_fw_link(struct bnx2 *bp)
916 {
917         u32 fw_link_status = 0;
918
919         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
920                 return;
921
922         if (bp->link_up) {
923                 u32 bmsr;
924
925                 switch (bp->line_speed) {
926                 case SPEED_10:
927                         if (bp->duplex == DUPLEX_HALF)
928                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
929                         else
930                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
931                         break;
932                 case SPEED_100:
933                         if (bp->duplex == DUPLEX_HALF)
934                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
935                         else
936                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
937                         break;
938                 case SPEED_1000:
939                         if (bp->duplex == DUPLEX_HALF)
940                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941                         else
942                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943                         break;
944                 case SPEED_2500:
945                         if (bp->duplex == DUPLEX_HALF)
946                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947                         else
948                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949                         break;
950                 }
951
952                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954                 if (bp->autoneg) {
955                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
957                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959
960                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
961                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
962                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963                         else
964                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965                 }
966         }
967         else
968                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
970         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 }
972
973 static char *
974 bnx2_xceiver_str(struct bnx2 *bp)
975 {
976         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
977                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
978                  "Copper"));
979 }
980
981 static void
982 bnx2_report_link(struct bnx2 *bp)
983 {
984         if (bp->link_up) {
985                 netif_carrier_on(bp->dev);
986                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987                             bnx2_xceiver_str(bp),
988                             bp->line_speed,
989                             bp->duplex == DUPLEX_FULL ? "full" : "half");
990
991                 if (bp->flow_ctrl) {
992                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
993                                 pr_cont(", receive ");
994                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
995                                         pr_cont("& transmit ");
996                         }
997                         else {
998                                 pr_cont(", transmit ");
999                         }
1000                         pr_cont("flow control ON");
1001                 }
1002                 pr_cont("\n");
1003         } else {
1004                 netif_carrier_off(bp->dev);
1005                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006                            bnx2_xceiver_str(bp));
1007         }
1008
1009         bnx2_report_fw_link(bp);
1010 }
1011
1012 static void
1013 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014 {
1015         u32 local_adv, remote_adv;
1016
1017         bp->flow_ctrl = 0;
1018         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1019                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021                 if (bp->duplex == DUPLEX_FULL) {
1022                         bp->flow_ctrl = bp->req_flow_ctrl;
1023                 }
1024                 return;
1025         }
1026
1027         if (bp->duplex != DUPLEX_FULL) {
1028                 return;
1029         }
1030
1031         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1032             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033                 u32 val;
1034
1035                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037                         bp->flow_ctrl |= FLOW_CTRL_TX;
1038                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039                         bp->flow_ctrl |= FLOW_CTRL_RX;
1040                 return;
1041         }
1042
1043         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1045
1046         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1047                 u32 new_local_adv = 0;
1048                 u32 new_remote_adv = 0;
1049
1050                 if (local_adv & ADVERTISE_1000XPAUSE)
1051                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054                 if (remote_adv & ADVERTISE_1000XPAUSE)
1055                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059                 local_adv = new_local_adv;
1060                 remote_adv = new_remote_adv;
1061         }
1062
1063         /* See Table 28B-3 of 802.3ab-1999 spec. */
1064         if (local_adv & ADVERTISE_PAUSE_CAP) {
1065                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068                         }
1069                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070                                 bp->flow_ctrl = FLOW_CTRL_RX;
1071                         }
1072                 }
1073                 else {
1074                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076                         }
1077                 }
1078         }
1079         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083                         bp->flow_ctrl = FLOW_CTRL_TX;
1084                 }
1085         }
1086 }
1087
1088 static int
1089 bnx2_5709s_linkup(struct bnx2 *bp)
1090 {
1091         u32 val, speed;
1092
1093         bp->link_up = 1;
1094
1095         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100                 bp->line_speed = bp->req_line_speed;
1101                 bp->duplex = bp->req_duplex;
1102                 return 0;
1103         }
1104         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105         switch (speed) {
1106                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107                         bp->line_speed = SPEED_10;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110                         bp->line_speed = SPEED_100;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114                         bp->line_speed = SPEED_1000;
1115                         break;
1116                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117                         bp->line_speed = SPEED_2500;
1118                         break;
1119         }
1120         if (val & MII_BNX2_GP_TOP_AN_FD)
1121                 bp->duplex = DUPLEX_FULL;
1122         else
1123                 bp->duplex = DUPLEX_HALF;
1124         return 0;
1125 }
1126
1127 static int
1128 bnx2_5708s_linkup(struct bnx2 *bp)
1129 {
1130         u32 val;
1131
1132         bp->link_up = 1;
1133         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135                 case BCM5708S_1000X_STAT1_SPEED_10:
1136                         bp->line_speed = SPEED_10;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_100:
1139                         bp->line_speed = SPEED_100;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_1G:
1142                         bp->line_speed = SPEED_1000;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145                         bp->line_speed = SPEED_2500;
1146                         break;
1147         }
1148         if (val & BCM5708S_1000X_STAT1_FD)
1149                 bp->duplex = DUPLEX_FULL;
1150         else
1151                 bp->duplex = DUPLEX_HALF;
1152
1153         return 0;
1154 }
1155
1156 static int
1157 bnx2_5706s_linkup(struct bnx2 *bp)
1158 {
1159         u32 bmcr, local_adv, remote_adv, common;
1160
1161         bp->link_up = 1;
1162         bp->line_speed = SPEED_1000;
1163
1164         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1165         if (bmcr & BMCR_FULLDPLX) {
1166                 bp->duplex = DUPLEX_FULL;
1167         }
1168         else {
1169                 bp->duplex = DUPLEX_HALF;
1170         }
1171
1172         if (!(bmcr & BMCR_ANENABLE)) {
1173                 return 0;
1174         }
1175
1176         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1178
1179         common = local_adv & remote_adv;
1180         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182                 if (common & ADVERTISE_1000XFULL) {
1183                         bp->duplex = DUPLEX_FULL;
1184                 }
1185                 else {
1186                         bp->duplex = DUPLEX_HALF;
1187                 }
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int
1194 bnx2_copper_linkup(struct bnx2 *bp)
1195 {
1196         u32 bmcr;
1197
1198         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1199         if (bmcr & BMCR_ANENABLE) {
1200                 u32 local_adv, remote_adv, common;
1201
1202                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205                 common = local_adv & (remote_adv >> 2);
1206                 if (common & ADVERTISE_1000FULL) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_FULL;
1209                 }
1210                 else if (common & ADVERTISE_1000HALF) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_HALF;
1213                 }
1214                 else {
1215                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1217
1218                         common = local_adv & remote_adv;
1219                         if (common & ADVERTISE_100FULL) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_FULL;
1222                         }
1223                         else if (common & ADVERTISE_100HALF) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_HALF;
1226                         }
1227                         else if (common & ADVERTISE_10FULL) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_FULL;
1230                         }
1231                         else if (common & ADVERTISE_10HALF) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_HALF;
1234                         }
1235                         else {
1236                                 bp->line_speed = 0;
1237                                 bp->link_up = 0;
1238                         }
1239                 }
1240         }
1241         else {
1242                 if (bmcr & BMCR_SPEED100) {
1243                         bp->line_speed = SPEED_100;
1244                 }
1245                 else {
1246                         bp->line_speed = SPEED_10;
1247                 }
1248                 if (bmcr & BMCR_FULLDPLX) {
1249                         bp->duplex = DUPLEX_FULL;
1250                 }
1251                 else {
1252                         bp->duplex = DUPLEX_HALF;
1253                 }
1254         }
1255
1256         return 0;
1257 }
1258
1259 static void
1260 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1261 {
1262         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1263
1264         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266         val |= 0x02 << 8;
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269                 u32 lo_water, hi_water;
1270
1271                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273                 else
1274                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275                 if (lo_water >= bp->rx_ring_size)
1276                         lo_water = 0;
1277
1278                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1279
1280                 if (hi_water <= lo_water)
1281                         lo_water = 0;
1282
1283                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286                 if (hi_water > 0xf)
1287                         hi_water = 0xf;
1288                 else if (hi_water == 0)
1289                         lo_water = 0;
1290                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291         }
1292         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 }
1294
1295 static void
1296 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297 {
1298         int i;
1299         u32 cid;
1300
1301         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302                 if (i == 1)
1303                         cid = RX_RSS_CID;
1304                 bnx2_init_rx_context(bp, cid);
1305         }
1306 }
1307
1308 static void
1309 bnx2_set_mac_link(struct bnx2 *bp)
1310 {
1311         u32 val;
1312
1313         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315                 (bp->duplex == DUPLEX_HALF)) {
1316                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317         }
1318
1319         /* Configure the EMAC mode register. */
1320         val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1323                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1324                 BNX2_EMAC_MODE_25G_MODE);
1325
1326         if (bp->link_up) {
1327                 switch (bp->line_speed) {
1328                         case SPEED_10:
1329                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1331                                         break;
1332                                 }
1333                                 /* fall through */
1334                         case SPEED_100:
1335                                 val |= BNX2_EMAC_MODE_PORT_MII;
1336                                 break;
1337                         case SPEED_2500:
1338                                 val |= BNX2_EMAC_MODE_25G_MODE;
1339                                 /* fall through */
1340                         case SPEED_1000:
1341                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342                                 break;
1343                 }
1344         }
1345         else {
1346                 val |= BNX2_EMAC_MODE_PORT_GMII;
1347         }
1348
1349         /* Set the MAC to operate in the appropriate duplex mode. */
1350         if (bp->duplex == DUPLEX_HALF)
1351                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352         REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354         /* Enable/disable rx PAUSE. */
1355         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357         if (bp->flow_ctrl & FLOW_CTRL_RX)
1358                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361         /* Enable/disable tx PAUSE. */
1362         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365         if (bp->flow_ctrl & FLOW_CTRL_TX)
1366                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369         /* Acknowledge the interrupt. */
1370         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
1372         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1373                 bnx2_init_all_rx_contexts(bp);
1374 }
1375
1376 static void
1377 bnx2_enable_bmsr1(struct bnx2 *bp)
1378 {
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5709))
1381                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382                                MII_BNX2_BLK_ADDR_GP_STATUS);
1383 }
1384
1385 static void
1386 bnx2_disable_bmsr1(struct bnx2 *bp)
1387 {
1388         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1389             (CHIP_NUM(bp) == CHIP_NUM_5709))
1390                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 }
1393
1394 static int
1395 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396 {
1397         u32 up1;
1398         int ret = 1;
1399
1400         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1401                 return 0;
1402
1403         if (bp->autoneg & AUTONEG_SPEED)
1404                 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
1406         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409         bnx2_read_phy(bp, bp->mii_up1, &up1);
1410         if (!(up1 & BCM5708S_UP1_2G5)) {
1411                 up1 |= BCM5708S_UP1_2G5;
1412                 bnx2_write_phy(bp, bp->mii_up1, up1);
1413                 ret = 0;
1414         }
1415
1416         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420         return ret;
1421 }
1422
1423 static int
1424 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425 {
1426         u32 up1;
1427         int ret = 0;
1428
1429         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430                 return 0;
1431
1432         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
1435         bnx2_read_phy(bp, bp->mii_up1, &up1);
1436         if (up1 & BCM5708S_UP1_2G5) {
1437                 up1 &= ~BCM5708S_UP1_2G5;
1438                 bnx2_write_phy(bp, bp->mii_up1, up1);
1439                 ret = 1;
1440         }
1441
1442         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
1446         return ret;
1447 }
1448
1449 static void
1450 bnx2_enable_forced_2g5(struct bnx2 *bp)
1451 {
1452         u32 bmcr;
1453
1454         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1455                 return;
1456
1457         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458                 u32 val;
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1462                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1474         } else {
1475                 return;
1476         }
1477
1478         if (bp->autoneg & AUTONEG_SPEED) {
1479                 bmcr &= ~BMCR_ANENABLE;
1480                 if (bp->req_duplex == DUPLEX_FULL)
1481                         bmcr |= BMCR_FULLDPLX;
1482         }
1483         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484 }
1485
1486 static void
1487 bnx2_disable_forced_2g5(struct bnx2 *bp)
1488 {
1489         u32 bmcr;
1490
1491         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1492                 return;
1493
1494         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495                 u32 val;
1496
1497                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1499                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1508                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510         } else {
1511                 return;
1512         }
1513
1514         if (bp->autoneg & AUTONEG_SPEED)
1515                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517 }
1518
1519 static void
1520 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521 {
1522         u32 val;
1523
1524         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526         if (start)
1527                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528         else
1529                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530 }
1531
1532 static int
1533 bnx2_set_link(struct bnx2 *bp)
1534 {
1535         u32 bmsr;
1536         u8 link_up;
1537
1538         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1539                 bp->link_up = 1;
1540                 return 0;
1541         }
1542
1543         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1544                 return 0;
1545
1546         link_up = bp->link_up;
1547
1548         bnx2_enable_bmsr1(bp);
1549         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551         bnx2_disable_bmsr1(bp);
1552
1553         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1554             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1555                 u32 val, an_dbg;
1556
1557                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1558                         bnx2_5706s_force_link_dn(bp, 0);
1559                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1560                 }
1561                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1562
1563                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1569                         bmsr |= BMSR_LSTATUS;
1570                 else
1571                         bmsr &= ~BMSR_LSTATUS;
1572         }
1573
1574         if (bmsr & BMSR_LSTATUS) {
1575                 bp->link_up = 1;
1576
1577                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1578                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579                                 bnx2_5706s_linkup(bp);
1580                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581                                 bnx2_5708s_linkup(bp);
1582                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583                                 bnx2_5709s_linkup(bp);
1584                 }
1585                 else {
1586                         bnx2_copper_linkup(bp);
1587                 }
1588                 bnx2_resolve_flow_ctrl(bp);
1589         }
1590         else {
1591                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1592                     (bp->autoneg & AUTONEG_SPEED))
1593                         bnx2_disable_forced_2g5(bp);
1594
1595                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1596                         u32 bmcr;
1597
1598                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599                         bmcr |= BMCR_ANENABLE;
1600                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
1602                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1603                 }
1604                 bp->link_up = 0;
1605         }
1606
1607         if (bp->link_up != link_up) {
1608                 bnx2_report_link(bp);
1609         }
1610
1611         bnx2_set_mac_link(bp);
1612
1613         return 0;
1614 }
1615
1616 static int
1617 bnx2_reset_phy(struct bnx2 *bp)
1618 {
1619         int i;
1620         u32 reg;
1621
1622         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1623
1624 #define PHY_RESET_MAX_WAIT 100
1625         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626                 udelay(10);
1627
1628                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1629                 if (!(reg & BMCR_RESET)) {
1630                         udelay(20);
1631                         break;
1632                 }
1633         }
1634         if (i == PHY_RESET_MAX_WAIT) {
1635                 return -EBUSY;
1636         }
1637         return 0;
1638 }
1639
1640 static u32
1641 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642 {
1643         u32 adv = 0;
1644
1645         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
1648                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1649                         adv = ADVERTISE_1000XPAUSE;
1650                 }
1651                 else {
1652                         adv = ADVERTISE_PAUSE_CAP;
1653                 }
1654         }
1655         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1656                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1657                         adv = ADVERTISE_1000XPSE_ASYM;
1658                 }
1659                 else {
1660                         adv = ADVERTISE_PAUSE_ASYM;
1661                 }
1662         }
1663         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1664                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666                 }
1667                 else {
1668                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669                 }
1670         }
1671         return adv;
1672 }
1673
1674 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1675
1676 static int
1677 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1678 __releases(&bp->phy_lock)
1679 __acquires(&bp->phy_lock)
1680 {
1681         u32 speed_arg = 0, pause_adv;
1682
1683         pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685         if (bp->autoneg & AUTONEG_SPEED) {
1686                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687                 if (bp->advertising & ADVERTISED_10baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689                 if (bp->advertising & ADVERTISED_10baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691                 if (bp->advertising & ADVERTISED_100baseT_Half)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693                 if (bp->advertising & ADVERTISED_100baseT_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699         } else {
1700                 if (bp->req_line_speed == SPEED_2500)
1701                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702                 else if (bp->req_line_speed == SPEED_1000)
1703                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704                 else if (bp->req_line_speed == SPEED_100) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709                 } else if (bp->req_line_speed == SPEED_10) {
1710                         if (bp->req_duplex == DUPLEX_FULL)
1711                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712                         else
1713                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714                 }
1715         }
1716
1717         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1719         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1720                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722         if (port == PORT_TP)
1723                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
1726         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1727
1728         spin_unlock_bh(&bp->phy_lock);
1729         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1730         spin_lock_bh(&bp->phy_lock);
1731
1732         return 0;
1733 }
1734
1735 static int
1736 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1737 __releases(&bp->phy_lock)
1738 __acquires(&bp->phy_lock)
1739 {
1740         u32 adv, bmcr;
1741         u32 new_adv = 0;
1742
1743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1744                 return (bnx2_setup_remote_phy(bp, port));
1745
1746         if (!(bp->autoneg & AUTONEG_SPEED)) {
1747                 u32 new_bmcr;
1748                 int force_link_down = 0;
1749
1750                 if (bp->req_line_speed == SPEED_2500) {
1751                         if (!bnx2_test_and_enable_2g5(bp))
1752                                 force_link_down = 1;
1753                 } else if (bp->req_line_speed == SPEED_1000) {
1754                         if (bnx2_test_and_disable_2g5(bp))
1755                                 force_link_down = 1;
1756                 }
1757                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1758                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
1760                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1761                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1762                 new_bmcr |= BMCR_SPEED1000;
1763
1764                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765                         if (bp->req_line_speed == SPEED_2500)
1766                                 bnx2_enable_forced_2g5(bp);
1767                         else if (bp->req_line_speed == SPEED_1000) {
1768                                 bnx2_disable_forced_2g5(bp);
1769                                 new_bmcr &= ~0x2000;
1770                         }
1771
1772                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1773                         if (bp->req_line_speed == SPEED_2500)
1774                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775                         else
1776                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1777                 }
1778
1779                 if (bp->req_duplex == DUPLEX_FULL) {
1780                         adv |= ADVERTISE_1000XFULL;
1781                         new_bmcr |= BMCR_FULLDPLX;
1782                 }
1783                 else {
1784                         adv |= ADVERTISE_1000XHALF;
1785                         new_bmcr &= ~BMCR_FULLDPLX;
1786                 }
1787                 if ((new_bmcr != bmcr) || (force_link_down)) {
1788                         /* Force a link down visible on the other side */
1789                         if (bp->link_up) {
1790                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1791                                                ~(ADVERTISE_1000XFULL |
1792                                                  ADVERTISE_1000XHALF));
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1794                                         BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796                                 bp->link_up = 0;
1797                                 netif_carrier_off(bp->dev);
1798                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                                 bnx2_report_link(bp);
1800                         }
1801                         bnx2_write_phy(bp, bp->mii_adv, adv);
1802                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1803                 } else {
1804                         bnx2_resolve_flow_ctrl(bp);
1805                         bnx2_set_mac_link(bp);
1806                 }
1807                 return 0;
1808         }
1809
1810         bnx2_test_and_enable_2g5(bp);
1811
1812         if (bp->advertising & ADVERTISED_1000baseT_Full)
1813                 new_adv |= ADVERTISE_1000XFULL;
1814
1815         new_adv |= bnx2_phy_get_pause_adv(bp);
1816
1817         bnx2_read_phy(bp, bp->mii_adv, &adv);
1818         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1819
1820         bp->serdes_an_pending = 0;
1821         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822                 /* Force a link down visible on the other side */
1823                 if (bp->link_up) {
1824                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1825                         spin_unlock_bh(&bp->phy_lock);
1826                         msleep(20);
1827                         spin_lock_bh(&bp->phy_lock);
1828                 }
1829
1830                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1832                         BMCR_ANENABLE);
1833                 /* Speed up link-up time when the link partner
1834                  * does not autonegotiate which is very common
1835                  * in blade servers. Some blade servers use
1836                  * IPMI for kerboard input and it's important
1837                  * to minimize link disruptions. Autoneg. involves
1838                  * exchanging base pages plus 3 next pages and
1839                  * normally completes in about 120 msec.
1840                  */
1841                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1842                 bp->serdes_an_pending = 1;
1843                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1844         } else {
1845                 bnx2_resolve_flow_ctrl(bp);
1846                 bnx2_set_mac_link(bp);
1847         }
1848
1849         return 0;
1850 }
1851
1852 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1853         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1854                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855                 (ADVERTISED_1000baseT_Full)
1856
1857 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1858         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1859         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1860         ADVERTISED_1000baseT_Full)
1861
1862 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1864
1865 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
1867 static void
1868 bnx2_set_default_remote_link(struct bnx2 *bp)
1869 {
1870         u32 link;
1871
1872         if (bp->phy_port == PORT_TP)
1873                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1874         else
1875                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1876
1877         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878                 bp->req_line_speed = 0;
1879                 bp->autoneg |= AUTONEG_SPEED;
1880                 bp->advertising = ADVERTISED_Autoneg;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882                         bp->advertising |= ADVERTISED_10baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884                         bp->advertising |= ADVERTISED_10baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886                         bp->advertising |= ADVERTISED_100baseT_Half;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888                         bp->advertising |= ADVERTISED_100baseT_Full;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890                         bp->advertising |= ADVERTISED_1000baseT_Full;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892                         bp->advertising |= ADVERTISED_2500baseX_Full;
1893         } else {
1894                 bp->autoneg = 0;
1895                 bp->advertising = 0;
1896                 bp->req_duplex = DUPLEX_FULL;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898                         bp->req_line_speed = SPEED_10;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903                         bp->req_line_speed = SPEED_100;
1904                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905                                 bp->req_duplex = DUPLEX_HALF;
1906                 }
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908                         bp->req_line_speed = SPEED_1000;
1909                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910                         bp->req_line_speed = SPEED_2500;
1911         }
1912 }
1913
1914 static void
1915 bnx2_set_default_link(struct bnx2 *bp)
1916 {
1917         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918                 bnx2_set_default_remote_link(bp);
1919                 return;
1920         }
1921
1922         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923         bp->req_line_speed = 0;
1924         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1925                 u32 reg;
1926
1927                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
1929                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1930                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932                         bp->autoneg = 0;
1933                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1934                         bp->req_duplex = DUPLEX_FULL;
1935                 }
1936         } else
1937                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938 }
1939
1940 static void
1941 bnx2_send_heart_beat(struct bnx2 *bp)
1942 {
1943         u32 msg;
1944         u32 addr;
1945
1946         spin_lock(&bp->indirect_lock);
1947         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951         spin_unlock(&bp->indirect_lock);
1952 }
1953
1954 static void
1955 bnx2_remote_phy_event(struct bnx2 *bp)
1956 {
1957         u32 msg;
1958         u8 link_up = bp->link_up;
1959         u8 old_port;
1960
1961         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1962
1963         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964                 bnx2_send_heart_beat(bp);
1965
1966         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
1968         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969                 bp->link_up = 0;
1970         else {
1971                 u32 speed;
1972
1973                 bp->link_up = 1;
1974                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975                 bp->duplex = DUPLEX_FULL;
1976                 switch (speed) {
1977                         case BNX2_LINK_STATUS_10HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_10FULL:
1980                                 bp->line_speed = SPEED_10;
1981                                 break;
1982                         case BNX2_LINK_STATUS_100HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_100BASE_T4:
1985                         case BNX2_LINK_STATUS_100FULL:
1986                                 bp->line_speed = SPEED_100;
1987                                 break;
1988                         case BNX2_LINK_STATUS_1000HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_1000FULL:
1991                                 bp->line_speed = SPEED_1000;
1992                                 break;
1993                         case BNX2_LINK_STATUS_2500HALF:
1994                                 bp->duplex = DUPLEX_HALF;
1995                         case BNX2_LINK_STATUS_2500FULL:
1996                                 bp->line_speed = SPEED_2500;
1997                                 break;
1998                         default:
1999                                 bp->line_speed = 0;
2000                                 break;
2001                 }
2002
2003                 bp->flow_ctrl = 0;
2004                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006                         if (bp->duplex == DUPLEX_FULL)
2007                                 bp->flow_ctrl = bp->req_flow_ctrl;
2008                 } else {
2009                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2011                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2013                 }
2014
2015                 old_port = bp->phy_port;
2016                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017                         bp->phy_port = PORT_FIBRE;
2018                 else
2019                         bp->phy_port = PORT_TP;
2020
2021                 if (old_port != bp->phy_port)
2022                         bnx2_set_default_link(bp);
2023
2024         }
2025         if (bp->link_up != link_up)
2026                 bnx2_report_link(bp);
2027
2028         bnx2_set_mac_link(bp);
2029 }
2030
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034         u32 evt_code;
2035
2036         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037         switch (evt_code) {
2038                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039                         bnx2_remote_phy_event(bp);
2040                         break;
2041                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042                 default:
2043                         bnx2_send_heart_beat(bp);
2044                         break;
2045         }
2046         return 0;
2047 }
2048
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054         u32 bmcr;
2055         u32 new_bmcr;
2056
2057         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058
2059         if (bp->autoneg & AUTONEG_SPEED) {
2060                 u32 adv_reg, adv1000_reg;
2061                 u32 new_adv_reg = 0;
2062                 u32 new_adv1000_reg = 0;
2063
2064                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066                         ADVERTISE_PAUSE_ASYM);
2067
2068                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069                 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071                 if (bp->advertising & ADVERTISED_10baseT_Half)
2072                         new_adv_reg |= ADVERTISE_10HALF;
2073                 if (bp->advertising & ADVERTISED_10baseT_Full)
2074                         new_adv_reg |= ADVERTISE_10FULL;
2075                 if (bp->advertising & ADVERTISED_100baseT_Half)
2076                         new_adv_reg |= ADVERTISE_100HALF;
2077                 if (bp->advertising & ADVERTISED_100baseT_Full)
2078                         new_adv_reg |= ADVERTISE_100FULL;
2079                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080                         new_adv1000_reg |= ADVERTISE_1000FULL;
2081
2082                 new_adv_reg |= ADVERTISE_CSMA;
2083
2084                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086                 if ((adv1000_reg != new_adv1000_reg) ||
2087                         (adv_reg != new_adv_reg) ||
2088                         ((bmcr & BMCR_ANENABLE) == 0)) {
2089
2090                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2091                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2092                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2093                                 BMCR_ANENABLE);
2094                 }
2095                 else if (bp->link_up) {
2096                         /* Flow ctrl may have changed from auto to forced */
2097                         /* or vice-versa. */
2098
2099                         bnx2_resolve_flow_ctrl(bp);
2100                         bnx2_set_mac_link(bp);
2101                 }
2102                 return 0;
2103         }
2104
2105         new_bmcr = 0;
2106         if (bp->req_line_speed == SPEED_100) {
2107                 new_bmcr |= BMCR_SPEED100;
2108         }
2109         if (bp->req_duplex == DUPLEX_FULL) {
2110                 new_bmcr |= BMCR_FULLDPLX;
2111         }
2112         if (new_bmcr != bmcr) {
2113                 u32 bmsr;
2114
2115                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118                 if (bmsr & BMSR_LSTATUS) {
2119                         /* Force link down */
2120                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121                         spin_unlock_bh(&bp->phy_lock);
2122                         msleep(50);
2123                         spin_lock_bh(&bp->phy_lock);
2124
2125                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127                 }
2128
2129                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131                 /* Normally, the new speed is setup after the link has
2132                  * gone down and up again. In some cases, link will not go
2133                  * down so we need to set up the new speed here.
2134                  */
2135                 if (bmsr & BMSR_LSTATUS) {
2136                         bp->line_speed = bp->req_line_speed;
2137                         bp->duplex = bp->req_duplex;
2138                         bnx2_resolve_flow_ctrl(bp);
2139                         bnx2_set_mac_link(bp);
2140                 }
2141         } else {
2142                 bnx2_resolve_flow_ctrl(bp);
2143                 bnx2_set_mac_link(bp);
2144         }
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153         if (bp->loopback == MAC_LOOPBACK)
2154                 return 0;
2155
2156         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157                 return (bnx2_setup_serdes_phy(bp, port));
2158         }
2159         else {
2160                 return (bnx2_setup_copper_phy(bp));
2161         }
2162 }
2163
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167         u32 val;
2168
2169         bp->mii_bmcr = MII_BMCR + 0x10;
2170         bp->mii_bmsr = MII_BMSR + 0x10;
2171         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172         bp->mii_adv = MII_ADVERTISE + 0x10;
2173         bp->mii_lpa = MII_LPA + 0x10;
2174         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180         if (reset_phy)
2181                 bnx2_reset_phy(bp);
2182
2183         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193                 val |= BCM5708S_UP1_2G5;
2194         else
2195                 val &= ~BCM5708S_UP1_2G5;
2196         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211         return 0;
2212 }
2213
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217         u32 val;
2218
2219         if (reset_phy)
2220                 bnx2_reset_phy(bp);
2221
2222         bp->mii_up1 = BCM5708S_UP1;
2223
2224         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238                 val |= BCM5708S_UP1_2G5;
2239                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240         }
2241
2242         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2243             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2245                 /* increase tx signal amplitude */
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247                                BCM5708S_BLK_ADDR_TX_MISC);
2248                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252         }
2253
2254         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257         if (val) {
2258                 u32 is_backplane;
2259
2260                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                                        BCM5708S_BLK_ADDR_TX_MISC);
2264                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266                                        BCM5708S_BLK_ADDR_DIG);
2267                 }
2268         }
2269         return 0;
2270 }
2271
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275         if (reset_phy)
2276                 bnx2_reset_phy(bp);
2277
2278         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283         if (bp->dev->mtu > 1500) {
2284                 u32 val;
2285
2286                 /* Set extended packet length bit */
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294         }
2295         else {
2296                 u32 val;
2297
2298                 bnx2_write_phy(bp, 0x18, 0x7);
2299                 bnx2_read_phy(bp, 0x18, &val);
2300                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303                 bnx2_read_phy(bp, 0x1c, &val);
2304                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313         u32 val;
2314
2315         if (reset_phy)
2316                 bnx2_reset_phy(bp);
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319                 bnx2_write_phy(bp, 0x18, 0x0c00);
2320                 bnx2_write_phy(bp, 0x17, 0x000a);
2321                 bnx2_write_phy(bp, 0x15, 0x310b);
2322                 bnx2_write_phy(bp, 0x17, 0x201f);
2323                 bnx2_write_phy(bp, 0x15, 0x9506);
2324                 bnx2_write_phy(bp, 0x17, 0x401f);
2325                 bnx2_write_phy(bp, 0x15, 0x14e2);
2326                 bnx2_write_phy(bp, 0x18, 0x0400);
2327         }
2328
2329         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2332                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333                 val &= ~(1 << 8);
2334                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335         }
2336
2337         if (bp->dev->mtu > 1500) {
2338                 /* Set extended packet length bit */
2339                 bnx2_write_phy(bp, 0x18, 0x7);
2340                 bnx2_read_phy(bp, 0x18, &val);
2341                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343                 bnx2_read_phy(bp, 0x10, &val);
2344                 bnx2_write_phy(bp, 0x10, val | 0x1);
2345         }
2346         else {
2347                 bnx2_write_phy(bp, 0x18, 0x7);
2348                 bnx2_read_phy(bp, 0x18, &val);
2349                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351                 bnx2_read_phy(bp, 0x10, &val);
2352                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353         }
2354
2355         /* ethernet@wirespeed */
2356         bnx2_write_phy(bp, 0x18, 0x7007);
2357         bnx2_read_phy(bp, 0x18, &val);
2358         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2359         return 0;
2360 }
2361
2362
2363 static int
2364 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2365 __releases(&bp->phy_lock)
2366 __acquires(&bp->phy_lock)
2367 {
2368         u32 val;
2369         int rc = 0;
2370
2371         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2373
2374         bp->mii_bmcr = MII_BMCR;
2375         bp->mii_bmsr = MII_BMSR;
2376         bp->mii_bmsr1 = MII_BMSR;
2377         bp->mii_adv = MII_ADVERTISE;
2378         bp->mii_lpa = MII_LPA;
2379
2380         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
2382         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2383                 goto setup_phy;
2384
2385         bnx2_read_phy(bp, MII_PHYSID1, &val);
2386         bp->phy_id = val << 16;
2387         bnx2_read_phy(bp, MII_PHYSID2, &val);
2388         bp->phy_id |= val & 0xffff;
2389
2390         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2391                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2392                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2393                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2394                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2395                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2396                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2397         }
2398         else {
2399                 rc = bnx2_init_copper_phy(bp, reset_phy);
2400         }
2401
2402 setup_phy:
2403         if (!rc)
2404                 rc = bnx2_setup_phy(bp, bp->phy_port);
2405
2406         return rc;
2407 }
2408
2409 static int
2410 bnx2_set_mac_loopback(struct bnx2 *bp)
2411 {
2412         u32 mac_mode;
2413
2414         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418         bp->link_up = 1;
2419         return 0;
2420 }
2421
2422 static int bnx2_test_link(struct bnx2 *);
2423
2424 static int
2425 bnx2_set_phy_loopback(struct bnx2 *bp)
2426 {
2427         u32 mac_mode;
2428         int rc, i;
2429
2430         spin_lock_bh(&bp->phy_lock);
2431         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2432                             BMCR_SPEED1000);
2433         spin_unlock_bh(&bp->phy_lock);
2434         if (rc)
2435                 return rc;
2436
2437         for (i = 0; i < 10; i++) {
2438                 if (bnx2_test_link(bp) == 0)
2439                         break;
2440                 msleep(100);
2441         }
2442
2443         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2446                       BNX2_EMAC_MODE_25G_MODE);
2447
2448         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450         bp->link_up = 1;
2451         return 0;
2452 }
2453
2454 static int
2455 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2456 {
2457         int i;
2458         u32 val;
2459
2460         bp->fw_wr_seq++;
2461         msg_data |= bp->fw_wr_seq;
2462
2463         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2464
2465         if (!ack)
2466                 return 0;
2467
2468         /* wait for an acknowledgement. */
2469         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2470                 msleep(10);
2471
2472                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2473
2474                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475                         break;
2476         }
2477         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478                 return 0;
2479
2480         /* If we timed out, inform the firmware that this is the case. */
2481         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482                 if (!silent)
2483                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2484
2485                 msg_data &= ~BNX2_DRV_MSG_CODE;
2486                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2488                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2489
2490                 return -EBUSY;
2491         }
2492
2493         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494                 return -EIO;
2495
2496         return 0;
2497 }
2498
2499 static int
2500 bnx2_init_5709_context(struct bnx2 *bp)
2501 {
2502         int i, ret = 0;
2503         u32 val;
2504
2505         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506         val |= (BCM_PAGE_BITS - 8) << 16;
2507         REG_WR(bp, BNX2_CTX_COMMAND, val);
2508         for (i = 0; i < 10; i++) {
2509                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511                         break;
2512                 udelay(2);
2513         }
2514         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515                 return -EBUSY;
2516
2517         for (i = 0; i < bp->ctx_pages; i++) {
2518                 int j;
2519
2520                 if (bp->ctx_blk[i])
2521                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522                 else
2523                         return -ENOMEM;
2524
2525                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529                        (u64) bp->ctx_blk_mapping[i] >> 32);
2530                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532                 for (j = 0; j < 10; j++) {
2533
2534                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536                                 break;
2537                         udelay(5);
2538                 }
2539                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540                         ret = -EBUSY;
2541                         break;
2542                 }
2543         }
2544         return ret;
2545 }
2546
2547 static void
2548 bnx2_init_context(struct bnx2 *bp)
2549 {
2550         u32 vcid;
2551
2552         vcid = 96;
2553         while (vcid) {
2554                 u32 vcid_addr, pcid_addr, offset;
2555                 int i;
2556
2557                 vcid--;
2558
2559                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560                         u32 new_vcid;
2561
2562                         vcid_addr = GET_PCID_ADDR(vcid);
2563                         if (vcid & 0x8) {
2564                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565                         }
2566                         else {
2567                                 new_vcid = vcid;
2568                         }
2569                         pcid_addr = GET_PCID_ADDR(new_vcid);
2570                 }
2571                 else {
2572                         vcid_addr = GET_CID_ADDR(vcid);
2573                         pcid_addr = vcid_addr;
2574                 }
2575
2576                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577                         vcid_addr += (i << PHY_CTX_SHIFT);
2578                         pcid_addr += (i << PHY_CTX_SHIFT);
2579
2580                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2581                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2582
2583                         /* Zero out the context. */
2584                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2585                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2586                 }
2587         }
2588 }
2589
2590 static int
2591 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592 {
2593         u16 *good_mbuf;
2594         u32 good_mbuf_cnt;
2595         u32 val;
2596
2597         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598         if (good_mbuf == NULL) {
2599                 pr_err("Failed to allocate memory in %s\n", __func__);
2600                 return -ENOMEM;
2601         }
2602
2603         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606         good_mbuf_cnt = 0;
2607
2608         /* Allocate a bunch of mbufs and save the good ones in an array. */
2609         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2610         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2611                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2613
2614                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2615
2616                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618                 /* The addresses with Bit 9 set are bad memory blocks. */
2619                 if (!(val & (1 << 9))) {
2620                         good_mbuf[good_mbuf_cnt] = (u16) val;
2621                         good_mbuf_cnt++;
2622                 }
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2625         }
2626
2627         /* Free the good ones back to the mbuf pool thus discarding
2628          * all the bad ones. */
2629         while (good_mbuf_cnt) {
2630                 good_mbuf_cnt--;
2631
2632                 val = good_mbuf[good_mbuf_cnt];
2633                 val = (val << 9) | val | 1;
2634
2635                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2636         }
2637         kfree(good_mbuf);
2638         return 0;
2639 }
2640
2641 static void
2642 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2643 {
2644         u32 val;
2645
2646         val = (mac_addr[0] << 8) | mac_addr[1];
2647
2648         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2649
2650         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2651                 (mac_addr[4] << 8) | mac_addr[5];
2652
2653         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2654 }
2655
2656 static inline int
2657 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2658 {
2659         dma_addr_t mapping;
2660         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2661         struct rx_bd *rxbd =
2662                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2663         struct page *page = alloc_page(GFP_ATOMIC);
2664
2665         if (!page)
2666                 return -ENOMEM;
2667         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668                                PCI_DMA_FROMDEVICE);
2669         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670                 __free_page(page);
2671                 return -EIO;
2672         }
2673
2674         rx_pg->page = page;
2675         dma_unmap_addr_set(rx_pg, mapping, mapping);
2676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678         return 0;
2679 }
2680
2681 static void
2682 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 {
2684         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2685         struct page *page = rx_pg->page;
2686
2687         if (!page)
2688                 return;
2689
2690         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691                        PCI_DMA_FROMDEVICE);
2692
2693         __free_page(page);
2694         rx_pg->page = NULL;
2695 }
2696
2697 static inline int
2698 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699 {
2700         struct sk_buff *skb;
2701         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2702         dma_addr_t mapping;
2703         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2704         unsigned long align;
2705
2706         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2707         if (skb == NULL) {
2708                 return -ENOMEM;
2709         }
2710
2711         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2713
2714         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715                 PCI_DMA_FROMDEVICE);
2716         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717                 dev_kfree_skb(skb);
2718                 return -EIO;
2719         }
2720
2721         rx_buf->skb = skb;
2722         rx_buf->desc = (struct l2_fhdr *) skb->data;
2723         dma_unmap_addr_set(rx_buf, mapping, mapping);
2724
2725         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727
2728         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2729
2730         return 0;
2731 }
2732
2733 static int
2734 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2735 {
2736         struct status_block *sblk = bnapi->status_blk.msi;
2737         u32 new_link_state, old_link_state;
2738         int is_set = 1;
2739
2740         new_link_state = sblk->status_attn_bits & event;
2741         old_link_state = sblk->status_attn_bits_ack & event;
2742         if (new_link_state != old_link_state) {
2743                 if (new_link_state)
2744                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2745                 else
2746                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2747         } else
2748                 is_set = 0;
2749
2750         return is_set;
2751 }
2752
2753 static void
2754 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2755 {
2756         spin_lock(&bp->phy_lock);
2757
2758         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2759                 bnx2_set_link(bp);
2760         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2761                 bnx2_set_remote_link(bp);
2762
2763         spin_unlock(&bp->phy_lock);
2764
2765 }
2766
2767 static inline u16
2768 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2769 {
2770         u16 cons;
2771
2772         /* Tell compiler that status block fields can change. */
2773         barrier();
2774         cons = *bnapi->hw_tx_cons_ptr;
2775         barrier();
2776         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2777                 cons++;
2778         return cons;
2779 }
2780
2781 static int
2782 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2783 {
2784         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2785         u16 hw_cons, sw_cons, sw_ring_cons;
2786         int tx_pkt = 0, index;
2787         struct netdev_queue *txq;
2788
2789         index = (bnapi - bp->bnx2_napi);
2790         txq = netdev_get_tx_queue(bp->dev, index);
2791
2792         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2793         sw_cons = txr->tx_cons;
2794
2795         while (sw_cons != hw_cons) {
2796                 struct sw_tx_bd *tx_buf;
2797                 struct sk_buff *skb;
2798                 int i, last;
2799
2800                 sw_ring_cons = TX_RING_IDX(sw_cons);
2801
2802                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2803                 skb = tx_buf->skb;
2804
2805                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2806                 prefetch(&skb->end);
2807
2808                 /* partial BD completions possible with TSO packets */
2809                 if (tx_buf->is_gso) {
2810                         u16 last_idx, last_ring_idx;
2811
2812                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2813                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2814                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2815                                 last_idx++;
2816                         }
2817                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2818                                 break;
2819                         }
2820                 }
2821
2822                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2823                         skb_headlen(skb), PCI_DMA_TODEVICE);
2824
2825                 tx_buf->skb = NULL;
2826                 last = tx_buf->nr_frags;
2827
2828                 for (i = 0; i < last; i++) {
2829                         sw_cons = NEXT_TX_BD(sw_cons);
2830
2831                         pci_unmap_page(bp->pdev,
2832                                 dma_unmap_addr(
2833                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2834                                         mapping),
2835                                 skb_shinfo(skb)->frags[i].size,
2836                                 PCI_DMA_TODEVICE);
2837                 }
2838
2839                 sw_cons = NEXT_TX_BD(sw_cons);
2840
2841                 dev_kfree_skb(skb);
2842                 tx_pkt++;
2843                 if (tx_pkt == budget)
2844                         break;
2845
2846                 if (hw_cons == sw_cons)
2847                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2848         }
2849
2850         txr->hw_tx_cons = hw_cons;
2851         txr->tx_cons = sw_cons;
2852
2853         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2854          * before checking for netif_tx_queue_stopped().  Without the
2855          * memory barrier, there is a small possibility that bnx2_start_xmit()
2856          * will miss it and cause the queue to be stopped forever.
2857          */
2858         smp_mb();
2859
2860         if (unlikely(netif_tx_queue_stopped(txq)) &&
2861                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2862                 __netif_tx_lock(txq, smp_processor_id());
2863                 if ((netif_tx_queue_stopped(txq)) &&
2864                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2865                         netif_tx_wake_queue(txq);
2866                 __netif_tx_unlock(txq);
2867         }
2868
2869         return tx_pkt;
2870 }
2871
2872 static void
2873 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2874                         struct sk_buff *skb, int count)
2875 {
2876         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2877         struct rx_bd *cons_bd, *prod_bd;
2878         int i;
2879         u16 hw_prod, prod;
2880         u16 cons = rxr->rx_pg_cons;
2881
2882         cons_rx_pg = &rxr->rx_pg_ring[cons];
2883
2884         /* The caller was unable to allocate a new page to replace the
2885          * last one in the frags array, so we need to recycle that page
2886          * and then free the skb.
2887          */
2888         if (skb) {
2889                 struct page *page;
2890                 struct skb_shared_info *shinfo;
2891
2892                 shinfo = skb_shinfo(skb);
2893                 shinfo->nr_frags--;
2894                 page = shinfo->frags[shinfo->nr_frags].page;
2895                 shinfo->frags[shinfo->nr_frags].page = NULL;
2896
2897                 cons_rx_pg->page = page;
2898                 dev_kfree_skb(skb);
2899         }
2900
2901         hw_prod = rxr->rx_pg_prod;
2902
2903         for (i = 0; i < count; i++) {
2904                 prod = RX_PG_RING_IDX(hw_prod);
2905
2906                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2907                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2908                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2909                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2910
2911                 if (prod != cons) {
2912                         prod_rx_pg->page = cons_rx_pg->page;
2913                         cons_rx_pg->page = NULL;
2914                         dma_unmap_addr_set(prod_rx_pg, mapping,
2915                                 dma_unmap_addr(cons_rx_pg, mapping));
2916
2917                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2918                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2919
2920                 }
2921                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2922                 hw_prod = NEXT_RX_BD(hw_prod);
2923         }
2924         rxr->rx_pg_prod = hw_prod;
2925         rxr->rx_pg_cons = cons;
2926 }
2927
2928 static inline void
2929 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2930                   struct sk_buff *skb, u16 cons, u16 prod)
2931 {
2932         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2933         struct rx_bd *cons_bd, *prod_bd;
2934
2935         cons_rx_buf = &rxr->rx_buf_ring[cons];
2936         prod_rx_buf = &rxr->rx_buf_ring[prod];
2937
2938         pci_dma_sync_single_for_device(bp->pdev,
2939                 dma_unmap_addr(cons_rx_buf, mapping),
2940                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2941
2942         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2943
2944         prod_rx_buf->skb = skb;
2945         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2946
2947         if (cons == prod)
2948                 return;
2949
2950         dma_unmap_addr_set(prod_rx_buf, mapping,
2951                         dma_unmap_addr(cons_rx_buf, mapping));
2952
2953         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2954         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2955         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2957 }
2958
2959 static int
2960 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2961             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2962             u32 ring_idx)
2963 {
2964         int err;
2965         u16 prod = ring_idx & 0xffff;
2966
2967         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2968         if (unlikely(err)) {
2969                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2970                 if (hdr_len) {
2971                         unsigned int raw_len = len + 4;
2972                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2973
2974                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2975                 }
2976                 return err;
2977         }
2978
2979         skb_reserve(skb, BNX2_RX_OFFSET);
2980         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2981                          PCI_DMA_FROMDEVICE);
2982
2983         if (hdr_len == 0) {
2984                 skb_put(skb, len);
2985                 return 0;
2986         } else {
2987                 unsigned int i, frag_len, frag_size, pages;
2988                 struct sw_pg *rx_pg;
2989                 u16 pg_cons = rxr->rx_pg_cons;
2990                 u16 pg_prod = rxr->rx_pg_prod;
2991
2992                 frag_size = len + 4 - hdr_len;
2993                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2994                 skb_put(skb, hdr_len);
2995
2996                 for (i = 0; i < pages; i++) {
2997                         dma_addr_t mapping_old;
2998
2999                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3000                         if (unlikely(frag_len <= 4)) {
3001                                 unsigned int tail = 4 - frag_len;
3002
3003                                 rxr->rx_pg_cons = pg_cons;
3004                                 rxr->rx_pg_prod = pg_prod;
3005                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3006                                                         pages - i);
3007                                 skb->len -= tail;
3008                                 if (i == 0) {
3009                                         skb->tail -= tail;
3010                                 } else {
3011                                         skb_frag_t *frag =
3012                                                 &skb_shinfo(skb)->frags[i - 1];
3013                                         frag->size -= tail;
3014                                         skb->data_len -= tail;
3015                                         skb->truesize -= tail;
3016                                 }
3017                                 return 0;
3018                         }
3019                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3020
3021                         /* Don't unmap yet.  If we're unable to allocate a new
3022                          * page, we need to recycle the page and the DMA addr.
3023                          */
3024                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3025                         if (i == pages - 1)
3026                                 frag_len -= 4;
3027
3028                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3029                         rx_pg->page = NULL;
3030
3031                         err = bnx2_alloc_rx_page(bp, rxr,
3032                                                  RX_PG_RING_IDX(pg_prod));
3033                         if (unlikely(err)) {
3034                                 rxr->rx_pg_cons = pg_cons;
3035                                 rxr->rx_pg_prod = pg_prod;
3036                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3037                                                         pages - i);
3038                                 return err;
3039                         }
3040
3041                         pci_unmap_page(bp->pdev, mapping_old,
3042                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3043
3044                         frag_size -= frag_len;
3045                         skb->data_len += frag_len;
3046                         skb->truesize += frag_len;
3047                         skb->len += frag_len;
3048
3049                         pg_prod = NEXT_RX_BD(pg_prod);
3050                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3051                 }
3052                 rxr->rx_pg_prod = pg_prod;
3053                 rxr->rx_pg_cons = pg_cons;
3054         }
3055         return 0;
3056 }
3057
3058 static inline u16
3059 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3060 {
3061         u16 cons;
3062
3063         /* Tell compiler that status block fields can change. */
3064         barrier();
3065         cons = *bnapi->hw_rx_cons_ptr;
3066         barrier();
3067         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3068                 cons++;
3069         return cons;
3070 }
3071
3072 static int
3073 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3074 {
3075         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3076         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3077         struct l2_fhdr *rx_hdr;
3078         int rx_pkt = 0, pg_ring_used = 0;
3079         struct pci_dev *pdev = bp->pdev;
3080
3081         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3082         sw_cons = rxr->rx_cons;
3083         sw_prod = rxr->rx_prod;
3084
3085         /* Memory barrier necessary as speculative reads of the rx
3086          * buffer can be ahead of the index in the status block
3087          */
3088         rmb();
3089         while (sw_cons != hw_cons) {
3090                 unsigned int len, hdr_len;
3091                 u32 status;
3092                 struct sw_bd *rx_buf, *next_rx_buf;
3093                 struct sk_buff *skb;
3094                 dma_addr_t dma_addr;
3095                 u16 vtag = 0;
3096                 int hw_vlan __maybe_unused = 0;
3097
3098                 sw_ring_cons = RX_RING_IDX(sw_cons);
3099                 sw_ring_prod = RX_RING_IDX(sw_prod);
3100
3101                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3102                 skb = rx_buf->skb;
3103                 prefetchw(skb);
3104
3105                 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3106                         next_rx_buf =
3107                                 &rxr->rx_buf_ring[
3108                                         RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3109                         prefetch(next_rx_buf->desc);
3110                 }
3111                 rx_buf->skb = NULL;
3112
3113                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3114
3115                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3116                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3117                         PCI_DMA_FROMDEVICE);
3118
3119                 rx_hdr = rx_buf->desc;
3120                 len = rx_hdr->l2_fhdr_pkt_len;
3121                 status = rx_hdr->l2_fhdr_status;
3122
3123                 hdr_len = 0;
3124                 if (status & L2_FHDR_STATUS_SPLIT) {
3125                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3126                         pg_ring_used = 1;
3127                 } else if (len > bp->rx_jumbo_thresh) {
3128                         hdr_len = bp->rx_jumbo_thresh;
3129                         pg_ring_used = 1;
3130                 }
3131
3132                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3133                                        L2_FHDR_ERRORS_PHY_DECODE |
3134                                        L2_FHDR_ERRORS_ALIGNMENT |
3135                                        L2_FHDR_ERRORS_TOO_SHORT |
3136                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3137
3138                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3139                                           sw_ring_prod);
3140                         if (pg_ring_used) {
3141                                 int pages;
3142
3143                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3144
3145                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3146                         }
3147                         goto next_rx;
3148                 }
3149
3150                 len -= 4;
3151
3152                 if (len <= bp->rx_copy_thresh) {
3153                         struct sk_buff *new_skb;
3154
3155                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3156                         if (new_skb == NULL) {
3157                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3158                                                   sw_ring_prod);
3159                                 goto next_rx;
3160                         }
3161
3162                         /* aligned copy */
3163                         skb_copy_from_linear_data_offset(skb,
3164                                                          BNX2_RX_OFFSET - 6,
3165                                       new_skb->data, len + 6);
3166                         skb_reserve(new_skb, 6);
3167                         skb_put(new_skb, len);
3168
3169                         bnx2_reuse_rx_skb(bp, rxr, skb,
3170                                 sw_ring_cons, sw_ring_prod);
3171
3172                         skb = new_skb;
3173                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3174                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3175                         goto next_rx;
3176
3177                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3178                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3179                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3180 #ifdef BCM_VLAN
3181                         if (bp->vlgrp)
3182                                 hw_vlan = 1;
3183                         else
3184 #endif
3185                         {
3186                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3187                                         __skb_push(skb, 4);
3188
3189                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3190                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3191                                 ve->h_vlan_TCI = htons(vtag);
3192                                 len += 4;
3193                         }
3194                 }
3195
3196                 skb->protocol = eth_type_trans(skb, bp->dev);
3197
3198                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3199                         (ntohs(skb->protocol) != 0x8100)) {
3200
3201                         dev_kfree_skb(skb);
3202                         goto next_rx;
3203
3204                 }
3205
3206                 skb->ip_summed = CHECKSUM_NONE;
3207                 if (bp->rx_csum &&
3208                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3209                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3210
3211                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3212                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3213                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3214                 }
3215
3216                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3217
3218 #ifdef BCM_VLAN
3219                 if (hw_vlan)
3220                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3221                 else
3222 #endif
3223                         napi_gro_receive(&bnapi->napi, skb);
3224
3225                 rx_pkt++;
3226
3227 next_rx:
3228                 sw_cons = NEXT_RX_BD(sw_cons);
3229                 sw_prod = NEXT_RX_BD(sw_prod);
3230
3231                 if ((rx_pkt == budget))
3232                         break;
3233
3234                 /* Refresh hw_cons to see if there is new work */
3235                 if (sw_cons == hw_cons) {
3236                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3237                         rmb();
3238                 }
3239         }
3240         rxr->rx_cons = sw_cons;
3241         rxr->rx_prod = sw_prod;
3242
3243         if (pg_ring_used)
3244                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3245
3246         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3247
3248         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3249
3250         mmiowb();
3251
3252         return rx_pkt;
3253
3254 }
3255
3256 /* MSI ISR - The only difference between this and the INTx ISR
3257  * is that the MSI interrupt is always serviced.
3258  */
3259 static irqreturn_t
3260 bnx2_msi(int irq, void *dev_instance)
3261 {
3262         struct bnx2_napi *bnapi = dev_instance;
3263         struct bnx2 *bp = bnapi->bp;
3264
3265         prefetch(bnapi->status_blk.msi);
3266         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3267                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3268                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3269
3270         /* Return here if interrupt is disabled. */
3271         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3272                 return IRQ_HANDLED;
3273
3274         napi_schedule(&bnapi->napi);
3275
3276         return IRQ_HANDLED;
3277 }
3278
3279 static irqreturn_t
3280 bnx2_msi_1shot(int irq, void *dev_instance)
3281 {
3282         struct bnx2_napi *bnapi = dev_instance;
3283         struct bnx2 *bp = bnapi->bp;
3284
3285         prefetch(bnapi->status_blk.msi);
3286
3287         /* Return here if interrupt is disabled. */
3288         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289                 return IRQ_HANDLED;
3290
3291         napi_schedule(&bnapi->napi);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 static irqreturn_t
3297 bnx2_interrupt(int irq, void *dev_instance)
3298 {
3299         struct bnx2_napi *bnapi = dev_instance;
3300         struct bnx2 *bp = bnapi->bp;
3301         struct status_block *sblk = bnapi->status_blk.msi;
3302
3303         /* When using INTx, it is possible for the interrupt to arrive
3304          * at the CPU before the status block posted prior to the
3305          * interrupt. Reading a register will flush the status block.
3306          * When using MSI, the MSI message will always complete after
3307          * the status block write.
3308          */
3309         if ((sblk->status_idx == bnapi->last_status_idx) &&
3310             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3311              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3312                 return IRQ_NONE;
3313
3314         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3315                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3316                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3317
3318         /* Read back to deassert IRQ immediately to avoid too many
3319          * spurious interrupts.
3320          */
3321         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3322
3323         /* Return here if interrupt is shared and is disabled. */
3324         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3325                 return IRQ_HANDLED;
3326
3327         if (napi_schedule_prep(&bnapi->napi)) {
3328                 bnapi->last_status_idx = sblk->status_idx;
3329                 __napi_schedule(&bnapi->napi);
3330         }
3331
3332         return IRQ_HANDLED;
3333 }
3334
3335 static inline int
3336 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3337 {
3338         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3339         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3340
3341         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3342             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3343                 return 1;
3344         return 0;
3345 }
3346
3347 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3348                                  STATUS_ATTN_BITS_TIMER_ABORT)
3349
3350 static inline int
3351 bnx2_has_work(struct bnx2_napi *bnapi)
3352 {
3353         struct status_block *sblk = bnapi->status_blk.msi;
3354
3355         if (bnx2_has_fast_work(bnapi))
3356                 return 1;
3357
3358 #ifdef BCM_CNIC
3359         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3360                 return 1;
3361 #endif
3362
3363         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3364             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3365                 return 1;
3366
3367         return 0;
3368 }
3369
3370 static void
3371 bnx2_chk_missed_msi(struct bnx2 *bp)
3372 {
3373         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3374         u32 msi_ctrl;
3375
3376         if (bnx2_has_work(bnapi)) {
3377                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3378                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3379                         return;
3380
3381                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3382                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3383                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3384                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3385                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3386                 }
3387         }
3388
3389         bp->idle_chk_status_idx = bnapi->last_status_idx;
3390 }
3391
3392 #ifdef BCM_CNIC
3393 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3394 {
3395         struct cnic_ops *c_ops;
3396
3397         if (!bnapi->cnic_present)
3398                 return;
3399
3400         rcu_read_lock();
3401         c_ops = rcu_dereference(bp->cnic_ops);
3402         if (c_ops)
3403                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3404                                                       bnapi->status_blk.msi);
3405         rcu_read_unlock();
3406 }
3407 #endif
3408
3409 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410 {
3411         struct status_block *sblk = bnapi->status_blk.msi;
3412         u32 status_attn_bits = sblk->status_attn_bits;
3413         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3414
3415         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3416             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3417
3418                 bnx2_phy_int(bp, bnapi);
3419
3420                 /* This is needed to take care of transient status
3421                  * during link changes.
3422                  */
3423                 REG_WR(bp, BNX2_HC_COMMAND,
3424                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3425                 REG_RD(bp, BNX2_HC_COMMAND);
3426         }
3427 }
3428
3429 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3430                           int work_done, int budget)
3431 {
3432         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3433         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3434
3435         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3436                 bnx2_tx_int(bp, bnapi, 0);
3437
3438         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3439                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3440
3441         return work_done;
3442 }
3443
3444 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3445 {
3446         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3447         struct bnx2 *bp = bnapi->bp;
3448         int work_done = 0;
3449         struct status_block_msix *sblk = bnapi->status_blk.msix;
3450
3451         while (1) {
3452                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3453                 if (unlikely(work_done >= budget))
3454                         break;
3455
3456                 bnapi->last_status_idx = sblk->status_idx;
3457                 /* status idx must be read before checking for more work. */
3458                 rmb();
3459                 if (likely(!bnx2_has_fast_work(bnapi))) {
3460
3461                         napi_complete(napi);
3462                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3463                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3464                                bnapi->last_status_idx);
3465                         break;
3466                 }
3467         }
3468         return work_done;
3469 }
3470
3471 static int bnx2_poll(struct napi_struct *napi, int budget)
3472 {
3473         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3474         struct bnx2 *bp = bnapi->bp;
3475         int work_done = 0;
3476         struct status_block *sblk = bnapi->status_blk.msi;
3477
3478         while (1) {
3479                 bnx2_poll_link(bp, bnapi);
3480
3481                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3482
3483 #ifdef BCM_CNIC
3484                 bnx2_poll_cnic(bp, bnapi);
3485 #endif
3486
3487                 /* bnapi->last_status_idx is used below to tell the hw how
3488                  * much work has been processed, so we must read it before
3489                  * checking for more work.
3490                  */
3491                 bnapi->last_status_idx = sblk->status_idx;
3492
3493                 if (unlikely(work_done >= budget))
3494                         break;
3495
3496                 rmb();
3497                 if (likely(!bnx2_has_work(bnapi))) {
3498                         napi_complete(napi);
3499                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3500                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502                                        bnapi->last_status_idx);
3503                                 break;
3504                         }
3505                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3506                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3507                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3508                                bnapi->last_status_idx);
3509
3510                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3511                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512                                bnapi->last_status_idx);
3513                         break;
3514                 }
3515         }
3516
3517         return work_done;
3518 }
3519
3520 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3521  * from set_multicast.
3522  */
3523 static void
3524 bnx2_set_rx_mode(struct net_device *dev)
3525 {
3526         struct bnx2 *bp = netdev_priv(dev);
3527         u32 rx_mode, sort_mode;
3528         struct netdev_hw_addr *ha;
3529         int i;
3530
3531         if (!netif_running(dev))
3532                 return;
3533
3534         spin_lock_bh(&bp->phy_lock);
3535
3536         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3537                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3538         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3539 #ifdef BCM_VLAN
3540         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3541                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3542 #else
3543         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3544                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3545 #endif
3546         if (dev->flags & IFF_PROMISC) {
3547                 /* Promiscuous mode. */
3548                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3549                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3550                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3551         }
3552         else if (dev->flags & IFF_ALLMULTI) {
3553                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3554                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3555                                0xffffffff);
3556                 }
3557                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3558         }
3559         else {
3560                 /* Accept one or more multicast(s). */
3561                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3562                 u32 regidx;
3563                 u32 bit;
3564                 u32 crc;
3565
3566                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3567
3568                 netdev_for_each_mc_addr(ha, dev) {
3569                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3570                         bit = crc & 0xff;
3571                         regidx = (bit & 0xe0) >> 5;
3572                         bit &= 0x1f;
3573                         mc_filter[regidx] |= (1 << bit);
3574                 }
3575
3576                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3577                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3578                                mc_filter[i]);
3579                 }
3580
3581                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3582         }
3583
3584         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3585                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3586                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3587                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3588         } else if (!(dev->flags & IFF_PROMISC)) {
3589                 /* Add all entries into to the match filter list */
3590                 i = 0;
3591                 netdev_for_each_uc_addr(ha, dev) {
3592                         bnx2_set_mac_addr(bp, ha->addr,
3593                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3594                         sort_mode |= (1 <<
3595                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3596                         i++;
3597                 }
3598
3599         }
3600
3601         if (rx_mode != bp->rx_mode) {
3602                 bp->rx_mode = rx_mode;
3603                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3604         }
3605
3606         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3607         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3608         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3609
3610         spin_unlock_bh(&bp->phy_lock);
3611 }
3612
3613 static int __devinit
3614 check_fw_section(const struct firmware *fw,
3615                  const struct bnx2_fw_file_section *section,
3616                  u32 alignment, bool non_empty)
3617 {
3618         u32 offset = be32_to_cpu(section->offset);
3619         u32 len = be32_to_cpu(section->len);
3620
3621         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3622                 return -EINVAL;
3623         if ((non_empty && len == 0) || len > fw->size - offset ||
3624             len & (alignment - 1))
3625                 return -EINVAL;
3626         return 0;
3627 }
3628
3629 static int __devinit
3630 check_mips_fw_entry(const struct firmware *fw,
3631                     const struct bnx2_mips_fw_file_entry *entry)
3632 {
3633         if (check_fw_section(fw, &entry->text, 4, true) ||
3634             check_fw_section(fw, &entry->data, 4, false) ||
3635             check_fw_section(fw, &entry->rodata, 4, false))
3636                 return -EINVAL;
3637         return 0;
3638 }
3639
3640 static int __devinit
3641 bnx2_request_firmware(struct bnx2 *bp)
3642 {
3643         const char *mips_fw_file, *rv2p_fw_file;
3644         const struct bnx2_mips_fw_file *mips_fw;
3645         const struct bnx2_rv2p_fw_file *rv2p_fw;
3646         int rc;
3647
3648         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3649                 mips_fw_file = FW_MIPS_FILE_09;
3650                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3651                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3652                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3653                 else
3654                         rv2p_fw_file = FW_RV2P_FILE_09;
3655         } else {
3656                 mips_fw_file = FW_MIPS_FILE_06;
3657                 rv2p_fw_file = FW_RV2P_FILE_06;
3658         }
3659
3660         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3661         if (rc) {
3662                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3663                 return rc;
3664         }
3665
3666         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3667         if (rc) {
3668                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3669                 return rc;
3670         }
3671         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3672         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3673         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3674             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3675             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3676             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3677             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3678             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3679                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3680                 return -EINVAL;
3681         }
3682         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3683             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3684             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3685                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3686                 return -EINVAL;
3687         }
3688
3689         return 0;
3690 }
3691
3692 static u32
3693 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3694 {
3695         switch (idx) {
3696         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3697                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3698                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3699                 break;
3700         }
3701         return rv2p_code;
3702 }
3703
3704 static int
3705 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3706              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3707 {
3708         u32 rv2p_code_len, file_offset;
3709         __be32 *rv2p_code;
3710         int i;
3711         u32 val, cmd, addr;
3712
3713         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3714         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3715
3716         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3717
3718         if (rv2p_proc == RV2P_PROC1) {
3719                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3720                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3721         } else {
3722                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3723                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3724         }
3725
3726         for (i = 0; i < rv2p_code_len; i += 8) {
3727                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3728                 rv2p_code++;
3729                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3730                 rv2p_code++;
3731
3732                 val = (i / 8) | cmd;
3733                 REG_WR(bp, addr, val);
3734         }
3735
3736         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3737         for (i = 0; i < 8; i++) {
3738                 u32 loc, code;
3739
3740                 loc = be32_to_cpu(fw_entry->fixup[i]);
3741                 if (loc && ((loc * 4) < rv2p_code_len)) {
3742                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3743                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3744                         code = be32_to_cpu(*(rv2p_code + loc));
3745                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3746                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3747
3748                         val = (loc / 2) | cmd;
3749                         REG_WR(bp, addr, val);
3750                 }
3751         }
3752
3753         /* Reset the processor, un-stall is done later. */
3754         if (rv2p_proc == RV2P_PROC1) {
3755                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3756         }
3757         else {
3758                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3759         }
3760
3761         return 0;
3762 }
3763
3764 static int
3765 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3766             const struct bnx2_mips_fw_file_entry *fw_entry)
3767 {
3768         u32 addr, len, file_offset;
3769         __be32 *data;
3770         u32 offset;
3771         u32 val;
3772
3773         /* Halt the CPU. */
3774         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3775         val |= cpu_reg->mode_value_halt;
3776         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3777         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3778
3779         /* Load the Text area. */
3780         addr = be32_to_cpu(fw_entry->text.addr);
3781         len = be32_to_cpu(fw_entry->text.len);
3782         file_offset = be32_to_cpu(fw_entry->text.offset);
3783         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3784
3785         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3786         if (len) {
3787                 int j;
3788
3789                 for (j = 0; j < (len / 4); j++, offset += 4)
3790                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3791         }
3792
3793         /* Load the Data area. */
3794         addr = be32_to_cpu(fw_entry->data.addr);
3795         len = be32_to_cpu(fw_entry->data.len);
3796         file_offset = be32_to_cpu(fw_entry->data.offset);
3797         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3798
3799         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3800         if (len) {
3801                 int j;
3802
3803                 for (j = 0; j < (len / 4); j++, offset += 4)
3804                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3805         }
3806
3807         /* Load the Read-Only area. */
3808         addr = be32_to_cpu(fw_entry->rodata.addr);
3809         len = be32_to_cpu(fw_entry->rodata.len);
3810         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3811         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3812
3813         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3814         if (len) {
3815                 int j;
3816
3817                 for (j = 0; j < (len / 4); j++, offset += 4)
3818                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3819         }
3820
3821         /* Clear the pre-fetch instruction. */
3822         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3823
3824         val = be32_to_cpu(fw_entry->start_addr);
3825         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3826
3827         /* Start the CPU. */
3828         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3829         val &= ~cpu_reg->mode_value_halt;
3830         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3831         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3832
3833         return 0;
3834 }
3835
3836 static int
3837 bnx2_init_cpus(struct bnx2 *bp)
3838 {
3839         const struct bnx2_mips_fw_file *mips_fw =
3840                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3841         const struct bnx2_rv2p_fw_file *rv2p_fw =
3842                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3843         int rc;
3844
3845         /* Initialize the RV2P processor. */
3846         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3847         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3848
3849         /* Initialize the RX Processor. */
3850         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3851         if (rc)
3852                 goto init_cpu_err;
3853
3854         /* Initialize the TX Processor. */
3855         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3856         if (rc)
3857                 goto init_cpu_err;
3858
3859         /* Initialize the TX Patch-up Processor. */
3860         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3861         if (rc)
3862                 goto init_cpu_err;
3863
3864         /* Initialize the Completion Processor. */
3865         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3866         if (rc)
3867                 goto init_cpu_err;
3868
3869         /* Initialize the Command Processor. */
3870         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3871
3872 init_cpu_err:
3873         return rc;
3874 }
3875
3876 static int
3877 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3878 {
3879         u16 pmcsr;
3880
3881         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3882
3883         switch (state) {
3884         case PCI_D0: {
3885                 u32 val;
3886
3887                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3888                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3889                         PCI_PM_CTRL_PME_STATUS);
3890
3891                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3892                         /* delay required during transition out of D3hot */
3893                         msleep(20);
3894
3895                 val = REG_RD(bp, BNX2_EMAC_MODE);
3896                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3897                 val &= ~BNX2_EMAC_MODE_MPKT;
3898                 REG_WR(bp, BNX2_EMAC_MODE, val);
3899
3900                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3901                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3902                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3903                 break;
3904         }
3905         case PCI_D3hot: {
3906                 int i;
3907                 u32 val, wol_msg;
3908
3909                 if (bp->wol) {
3910                         u32 advertising;
3911                         u8 autoneg;
3912
3913                         autoneg = bp->autoneg;
3914                         advertising = bp->advertising;
3915
3916                         if (bp->phy_port == PORT_TP) {
3917                                 bp->autoneg = AUTONEG_SPEED;
3918                                 bp->advertising = ADVERTISED_10baseT_Half |
3919                                         ADVERTISED_10baseT_Full |
3920                                         ADVERTISED_100baseT_Half |
3921                                         ADVERTISED_100baseT_Full |
3922                                         ADVERTISED_Autoneg;
3923                         }
3924
3925                         spin_lock_bh(&bp->phy_lock);
3926                         bnx2_setup_phy(bp, bp->phy_port);
3927                         spin_unlock_bh(&bp->phy_lock);
3928
3929                         bp->autoneg = autoneg;
3930                         bp->advertising = advertising;
3931
3932                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3933
3934                         val = REG_RD(bp, BNX2_EMAC_MODE);
3935
3936                         /* Enable port mode. */
3937                         val &= ~BNX2_EMAC_MODE_PORT;
3938                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3939                                BNX2_EMAC_MODE_ACPI_RCVD |
3940                                BNX2_EMAC_MODE_MPKT;
3941                         if (bp->phy_port == PORT_TP)
3942                                 val |= BNX2_EMAC_MODE_PORT_MII;
3943                         else {
3944                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3945                                 if (bp->line_speed == SPEED_2500)
3946                                         val |= BNX2_EMAC_MODE_25G_MODE;
3947                         }
3948
3949                         REG_WR(bp, BNX2_EMAC_MODE, val);
3950
3951                         /* receive all multicast */
3952                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3953                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3954                                        0xffffffff);
3955                         }
3956                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3957                                BNX2_EMAC_RX_MODE_SORT_MODE);
3958
3959                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3960                               BNX2_RPM_SORT_USER0_MC_EN;
3961                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3962                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3963                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3964                                BNX2_RPM_SORT_USER0_ENA);
3965
3966                         /* Need to enable EMAC and RPM for WOL. */
3967                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3968                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3969                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3970                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3971
3972                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3973                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3974                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3975
3976                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3977                 }
3978                 else {
3979                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3980                 }
3981
3982                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3983                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3984                                      1, 0);
3985
3986                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3987                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3988                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3989
3990                         if (bp->wol)
3991                                 pmcsr |= 3;
3992                 }
3993                 else {
3994                         pmcsr |= 3;
3995                 }
3996                 if (bp->wol) {
3997                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3998                 }
3999                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4000                                       pmcsr);
4001
4002                 /* No more memory access after this point until
4003                  * device is brought back to D0.
4004                  */
4005                 udelay(50);
4006                 break;
4007         }
4008         default:
4009                 return -EINVAL;
4010         }
4011         return 0;
4012 }
4013
4014 static int
4015 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4016 {
4017         u32 val;
4018         int j;
4019
4020         /* Request access to the flash interface. */
4021         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4022         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4023                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4024                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4025                         break;
4026
4027                 udelay(5);
4028         }
4029
4030         if (j >= NVRAM_TIMEOUT_COUNT)
4031                 return -EBUSY;
4032
4033         return 0;
4034 }
4035
4036 static int
4037 bnx2_release_nvram_lock(struct bnx2 *bp)
4038 {
4039         int j;
4040         u32 val;
4041
4042         /* Relinquish nvram interface. */
4043         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4044
4045         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4046                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4047                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4048                         break;
4049
4050                 udelay(5);
4051         }
4052
4053         if (j >= NVRAM_TIMEOUT_COUNT)
4054                 return -EBUSY;
4055
4056         return 0;
4057 }
4058
4059
4060 static int
4061 bnx2_enable_nvram_write(struct bnx2 *bp)
4062 {
4063         u32 val;
4064
4065         val = REG_RD(bp, BNX2_MISC_CFG);
4066         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4067
4068         if (bp->flash_info->flags & BNX2_NV_WREN) {
4069                 int j;
4070
4071                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4072                 REG_WR(bp, BNX2_NVM_COMMAND,
4073                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4074
4075                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4076                         udelay(5);
4077
4078                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4079                         if (val & BNX2_NVM_COMMAND_DONE)
4080                                 break;
4081                 }
4082
4083                 if (j >= NVRAM_TIMEOUT_COUNT)
4084                         return -EBUSY;
4085         }
4086         return 0;
4087 }
4088
4089 static void
4090 bnx2_disable_nvram_write(struct bnx2 *bp)
4091 {
4092         u32 val;
4093
4094         val = REG_RD(bp, BNX2_MISC_CFG);
4095         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4096 }
4097
4098
4099 static void
4100 bnx2_enable_nvram_access(struct bnx2 *bp)
4101 {
4102         u32 val;
4103
4104         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105         /* Enable both bits, even on read. */
4106         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4107                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4108 }
4109
4110 static void
4111 bnx2_disable_nvram_access(struct bnx2 *bp)
4112 {
4113         u32 val;
4114
4115         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4116         /* Disable both bits, even after read. */
4117         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4118                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4119                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4120 }
4121
4122 static int
4123 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4124 {
4125         u32 cmd;
4126         int j;
4127
4128         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4129                 /* Buffered flash, no erase needed */
4130                 return 0;
4131
4132         /* Build an erase command */
4133         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4134               BNX2_NVM_COMMAND_DOIT;
4135
4136         /* Need to clear DONE bit separately. */
4137         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138
4139         /* Address of the NVRAM to read from. */
4140         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4141
4142         /* Issue an erase command. */
4143         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4144
4145         /* Wait for completion. */
4146         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4147                 u32 val;
4148
4149                 udelay(5);
4150
4151                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4152                 if (val & BNX2_NVM_COMMAND_DONE)
4153                         break;
4154         }
4155
4156         if (j >= NVRAM_TIMEOUT_COUNT)
4157                 return -EBUSY;
4158
4159         return 0;
4160 }
4161
4162 static int
4163 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4164 {
4165         u32 cmd;
4166         int j;
4167
4168         /* Build the command word. */
4169         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4170
4171         /* Calculate an offset of a buffered flash, not needed for 5709. */
4172         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4173                 offset = ((offset / bp->flash_info->page_size) <<
4174                            bp->flash_info->page_bits) +
4175                           (offset % bp->flash_info->page_size);
4176         }
4177
4178         /* Need to clear DONE bit separately. */
4179         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4180
4181         /* Address of the NVRAM to read from. */
4182         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4183
4184         /* Issue a read command. */
4185         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4186
4187         /* Wait for completion. */
4188         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4189                 u32 val;
4190
4191                 udelay(5);
4192
4193                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4194                 if (val & BNX2_NVM_COMMAND_DONE) {
4195                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4196                         memcpy(ret_val, &v, 4);
4197                         break;
4198                 }
4199         }
4200         if (j >= NVRAM_TIMEOUT_COUNT)
4201                 return -EBUSY;
4202
4203         return 0;
4204 }
4205
4206
4207 static int
4208 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4209 {
4210         u32 cmd;
4211         __be32 val32;
4212         int j;
4213
4214         /* Build the command word. */
4215         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4216
4217         /* Calculate an offset of a buffered flash, not needed for 5709. */
4218         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4219                 offset = ((offset / bp->flash_info->page_size) <<
4220                           bp->flash_info->page_bits) +
4221                          (offset % bp->flash_info->page_size);
4222         }
4223
4224         /* Need to clear DONE bit separately. */
4225         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4226
4227         memcpy(&val32, val, 4);
4228
4229         /* Write the data. */
4230         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4231
4232         /* Address of the NVRAM to write to. */
4233         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4234
4235         /* Issue the write command. */
4236         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4237
4238         /* Wait for completion. */
4239         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4240                 udelay(5);
4241
4242                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4243                         break;
4244         }
4245         if (j >= NVRAM_TIMEOUT_COUNT)
4246                 return -EBUSY;
4247
4248         return 0;
4249 }
4250
4251 static int
4252 bnx2_init_nvram(struct bnx2 *bp)
4253 {
4254         u32 val;
4255         int j, entry_count, rc = 0;
4256         const struct flash_spec *flash;
4257
4258         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4259                 bp->flash_info = &flash_5709;
4260                 goto get_flash_size;
4261         }
4262
4263         /* Determine the selected interface. */
4264         val = REG_RD(bp, BNX2_NVM_CFG1);
4265
4266         entry_count = ARRAY_SIZE(flash_table);
4267
4268         if (val & 0x40000000) {
4269
4270                 /* Flash interface has been reconfigured */
4271                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4272                      j++, flash++) {
4273                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4274                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4275                                 bp->flash_info = flash;
4276                                 break;
4277                         }
4278                 }
4279         }
4280         else {
4281                 u32 mask;
4282                 /* Not yet been reconfigured */
4283
4284                 if (val & (1 << 23))
4285                         mask = FLASH_BACKUP_STRAP_MASK;
4286                 else
4287                         mask = FLASH_STRAP_MASK;
4288
4289                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4290                         j++, flash++) {
4291
4292                         if ((val & mask) == (flash->strapping & mask)) {
4293                                 bp->flash_info = flash;
4294
4295                                 /* Request access to the flash interface. */
4296                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4297                                         return rc;
4298
4299                                 /* Enable access to flash interface */
4300                                 bnx2_enable_nvram_access(bp);
4301
4302                                 /* Reconfigure the flash interface */
4303                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4304                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4305                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4306                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4307
4308                                 /* Disable access to flash interface */
4309                                 bnx2_disable_nvram_access(bp);
4310                                 bnx2_release_nvram_lock(bp);
4311
4312                                 break;
4313                         }
4314                 }
4315         } /* if (val & 0x40000000) */
4316
4317         if (j == entry_count) {
4318                 bp->flash_info = NULL;
4319                 pr_alert("Unknown flash/EEPROM type\n");
4320                 return -ENODEV;
4321         }
4322
4323 get_flash_size:
4324         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4325         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4326         if (val)
4327                 bp->flash_size = val;
4328         else
4329                 bp->flash_size = bp->flash_info->total_size;
4330
4331         return rc;
4332 }
4333
4334 static int
4335 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4336                 int buf_size)
4337 {
4338         int rc = 0;
4339         u32 cmd_flags, offset32, len32, extra;
4340
4341         if (buf_size == 0)
4342                 return 0;
4343
4344         /* Request access to the flash interface. */
4345         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4346                 return rc;
4347
4348         /* Enable access to flash interface */
4349         bnx2_enable_nvram_access(bp);
4350
4351         len32 = buf_size;
4352         offset32 = offset;
4353         extra = 0;
4354
4355         cmd_flags = 0;
4356
4357         if (offset32 & 3) {
4358                 u8 buf[4];
4359                 u32 pre_len;
4360
4361                 offset32 &= ~3;
4362                 pre_len = 4 - (offset & 3);
4363
4364                 if (pre_len >= len32) {
4365                         pre_len = len32;
4366                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4367                                     BNX2_NVM_COMMAND_LAST;
4368                 }
4369                 else {
4370                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4371                 }
4372
4373                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4374
4375                 if (rc)
4376                         return rc;
4377
4378                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4379
4380                 offset32 += 4;
4381                 ret_buf += pre_len;
4382                 len32 -= pre_len;
4383         }
4384         if (len32 & 3) {
4385                 extra = 4 - (len32 & 3);
4386                 len32 = (len32 + 4) & ~3;
4387         }
4388
4389         if (len32 == 4) {
4390                 u8 buf[4];
4391
4392                 if (cmd_flags)
4393                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4394                 else
4395                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4396                                     BNX2_NVM_COMMAND_LAST;
4397
4398                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4399
4400                 memcpy(ret_buf, buf, 4 - extra);
4401         }
4402         else if (len32 > 0) {
4403                 u8 buf[4];
4404
4405                 /* Read the first word. */
4406                 if (cmd_flags)
4407                         cmd_flags = 0;
4408                 else
4409                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4410
4411                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4412
4413                 /* Advance to the next dword. */
4414                 offset32 += 4;
4415                 ret_buf += 4;
4416                 len32 -= 4;
4417
4418                 while (len32 > 4 && rc == 0) {
4419                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4420
4421                         /* Advance to the next dword. */
4422                         offset32 += 4;
4423                         ret_buf += 4;
4424                         len32 -= 4;
4425                 }
4426
4427                 if (rc)
4428                         return rc;
4429
4430                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4431                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4432
4433                 memcpy(ret_buf, buf, 4 - extra);
4434         }
4435
4436         /* Disable access to flash interface */
4437         bnx2_disable_nvram_access(bp);
4438
4439         bnx2_release_nvram_lock(bp);
4440
4441         return rc;
4442 }
4443
4444 static int
4445 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4446                 int buf_size)
4447 {
4448         u32 written, offset32, len32;
4449         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4450         int rc = 0;
4451         int align_start, align_end;
4452
4453         buf = data_buf;
4454         offset32 = offset;
4455         len32 = buf_size;
4456         align_start = align_end = 0;
4457
4458         if ((align_start = (offset32 & 3))) {
4459                 offset32 &= ~3;
4460                 len32 += align_start;
4461                 if (len32 < 4)
4462                         len32 = 4;
4463                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4464                         return rc;
4465         }
4466
4467         if (len32 & 3) {
4468                 align_end = 4 - (len32 & 3);
4469                 len32 += align_end;
4470                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4471                         return rc;
4472         }
4473
4474         if (align_start || align_end) {
4475                 align_buf = kmalloc(len32, GFP_KERNEL);
4476                 if (align_buf == NULL)
4477                         return -ENOMEM;
4478                 if (align_start) {
4479                         memcpy(align_buf, start, 4);
4480                 }
4481                 if (align_end) {
4482                         memcpy(align_buf + len32 - 4, end, 4);
4483                 }
4484                 memcpy(align_buf + align_start, data_buf, buf_size);
4485                 buf = align_buf;
4486         }
4487
4488         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4489                 flash_buffer = kmalloc(264, GFP_KERNEL);
4490                 if (flash_buffer == NULL) {
4491                         rc = -ENOMEM;
4492                         goto nvram_write_end;
4493                 }
4494         }
4495
4496         written = 0;
4497         while ((written < len32) && (rc == 0)) {
4498                 u32 page_start, page_end, data_start, data_end;
4499                 u32 addr, cmd_flags;
4500                 int i;
4501
4502                 /* Find the page_start addr */
4503                 page_start = offset32 + written;
4504                 page_start -= (page_start % bp->flash_info->page_size);
4505                 /* Find the page_end addr */
4506                 page_end = page_start + bp->flash_info->page_size;
4507                 /* Find the data_start addr */
4508                 data_start = (written == 0) ? offset32 : page_start;
4509                 /* Find the data_end addr */
4510                 data_end = (page_end > offset32 + len32) ?
4511                         (offset32 + len32) : page_end;
4512
4513                 /* Request access to the flash interface. */
4514                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4515                         goto nvram_write_end;
4516
4517                 /* Enable access to flash interface */
4518                 bnx2_enable_nvram_access(bp);
4519
4520                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4521                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4522                         int j;
4523
4524                         /* Read the whole page into the buffer
4525                          * (non-buffer flash only) */
4526                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4527                                 if (j == (bp->flash_info->page_size - 4)) {
4528                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4529                                 }
4530                                 rc = bnx2_nvram_read_dword(bp,
4531                                         page_start + j,
4532                                         &flash_buffer[j],
4533                                         cmd_flags);
4534
4535                                 if (rc)
4536                                         goto nvram_write_end;
4537
4538                                 cmd_flags = 0;
4539                         }
4540                 }
4541
4542                 /* Enable writes to flash interface (unlock write-protect) */
4543                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4544                         goto nvram_write_end;
4545
4546                 /* Loop to write back the buffer data from page_start to
4547                  * data_start */
4548                 i = 0;
4549                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4550                         /* Erase the page */
4551                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4552                                 goto nvram_write_end;
4553
4554                         /* Re-enable the write again for the actual write */
4555                         bnx2_enable_nvram_write(bp);
4556
4557                         for (addr = page_start; addr < data_start;
4558                                 addr += 4, i += 4) {
4559
4560                                 rc = bnx2_nvram_write_dword(bp, addr,
4561                                         &flash_buffer[i], cmd_flags);
4562
4563                                 if (rc != 0)
4564                                         goto nvram_write_end;
4565
4566                                 cmd_flags = 0;
4567                         }
4568                 }
4569
4570                 /* Loop to write the new data from data_start to data_end */
4571                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4572                         if ((addr == page_end - 4) ||
4573                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4574                                  (addr == data_end - 4))) {
4575
4576                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4577                         }
4578                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4579                                 cmd_flags);
4580
4581                         if (rc != 0)
4582                                 goto nvram_write_end;
4583
4584                         cmd_flags = 0;
4585                         buf += 4;
4586                 }
4587
4588                 /* Loop to write back the buffer data from data_end
4589                  * to page_end */
4590                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4591                         for (addr = data_end; addr < page_end;
4592                                 addr += 4, i += 4) {
4593
4594                                 if (addr == page_end-4) {
4595                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4596                                 }
4597                                 rc = bnx2_nvram_write_dword(bp, addr,
4598                                         &flash_buffer[i], cmd_flags);
4599
4600                                 if (rc != 0)
4601                                         goto nvram_write_end;
4602
4603                                 cmd_flags = 0;
4604                         }
4605                 }
4606
4607                 /* Disable writes to flash interface (lock write-protect) */
4608                 bnx2_disable_nvram_write(bp);
4609
4610                 /* Disable access to flash interface */
4611                 bnx2_disable_nvram_access(bp);
4612                 bnx2_release_nvram_lock(bp);
4613
4614                 /* Increment written */
4615                 written += data_end - data_start;
4616         }
4617
4618 nvram_write_end:
4619         kfree(flash_buffer);
4620         kfree(align_buf);
4621         return rc;
4622 }
4623
4624 static void
4625 bnx2_init_fw_cap(struct bnx2 *bp)
4626 {
4627         u32 val, sig = 0;
4628
4629         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4630         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4631
4632         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4633                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4634
4635         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4636         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4637                 return;
4638
4639         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4640                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4641                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4642         }
4643
4644         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4645             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4646                 u32 link;
4647
4648                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4649
4650                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4651                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4652                         bp->phy_port = PORT_FIBRE;
4653                 else
4654                         bp->phy_port = PORT_TP;
4655
4656                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4657                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4658         }
4659
4660         if (netif_running(bp->dev) && sig)
4661                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4662 }
4663
4664 static void
4665 bnx2_setup_msix_tbl(struct bnx2 *bp)
4666 {
4667         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4668
4669         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4670         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4671 }
4672
4673 static int
4674 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4675 {
4676         u32 val;
4677         int i, rc = 0;
4678         u8 old_port;
4679
4680         /* Wait for the current PCI transaction to complete before
4681          * issuing a reset. */
4682         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4683                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4684                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4685                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4686                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4687         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4688         udelay(5);
4689
4690         /* Wait for the firmware to tell us it is ok to issue a reset. */
4691         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4692
4693         /* Deposit a driver reset signature so the firmware knows that
4694          * this is a soft reset. */
4695         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4696                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4697
4698         /* Do a dummy read to force the chip to complete all current transaction
4699          * before we issue a reset. */
4700         val = REG_RD(bp, BNX2_MISC_ID);
4701
4702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4703                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4704                 REG_RD(bp, BNX2_MISC_COMMAND);
4705                 udelay(5);
4706
4707                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4708                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4709
4710                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4711
4712         } else {
4713                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4715                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4716
4717                 /* Chip reset. */
4718                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4719
4720                 /* Reading back any register after chip reset will hang the
4721                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4722                  * of margin for write posting.
4723                  */
4724                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4725                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4726                         msleep(20);
4727
4728                 /* Reset takes approximate 30 usec */
4729                 for (i = 0; i < 10; i++) {
4730                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4731                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4733                                 break;
4734                         udelay(10);
4735                 }
4736
4737                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4738                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4739                         pr_err("Chip reset did not complete\n");
4740                         return -EBUSY;
4741                 }
4742         }
4743
4744         /* Make sure byte swapping is properly configured. */
4745         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4746         if (val != 0x01020304) {
4747                 pr_err("Chip not in correct endian mode\n");
4748                 return -ENODEV;
4749         }
4750
4751         /* Wait for the firmware to finish its initialization. */
4752         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4753         if (rc)
4754                 return rc;
4755
4756         spin_lock_bh(&bp->phy_lock);
4757         old_port = bp->phy_port;
4758         bnx2_init_fw_cap(bp);
4759         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4760             old_port != bp->phy_port)
4761                 bnx2_set_default_remote_link(bp);
4762         spin_unlock_bh(&bp->phy_lock);
4763
4764         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4765                 /* Adjust the voltage regular to two steps lower.  The default
4766                  * of this register is 0x0000000e. */
4767                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4768
4769                 /* Remove bad rbuf memory from the free pool. */
4770                 rc = bnx2_alloc_bad_rbuf(bp);
4771         }
4772
4773         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4774                 bnx2_setup_msix_tbl(bp);
4775                 /* Prevent MSIX table reads and write from timing out */
4776                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4777                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4778         }
4779
4780         return rc;
4781 }
4782
4783 static int
4784 bnx2_init_chip(struct bnx2 *bp)
4785 {
4786         u32 val, mtu;
4787         int rc, i;
4788
4789         /* Make sure the interrupt is not active. */
4790         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4791
4792         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4793               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4794 #ifdef __BIG_ENDIAN
4795               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4796 #endif
4797               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4798               DMA_READ_CHANS << 12 |
4799               DMA_WRITE_CHANS << 16;
4800
4801         val |= (0x2 << 20) | (1 << 11);
4802
4803         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4804                 val |= (1 << 23);
4805
4806         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4807             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4808                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4809
4810         REG_WR(bp, BNX2_DMA_CONFIG, val);
4811
4812         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4813                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4814                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4815                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4816         }
4817
4818         if (bp->flags & BNX2_FLAG_PCIX) {
4819                 u16 val16;
4820
4821                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4822                                      &val16);
4823                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4824                                       val16 & ~PCI_X_CMD_ERO);
4825         }
4826
4827         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4828                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4829                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4830                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4831
4832         /* Initialize context mapping and zero out the quick contexts.  The
4833          * context block must have already been enabled. */
4834         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4835                 rc = bnx2_init_5709_context(bp);
4836                 if (rc)
4837                         return rc;
4838         } else
4839                 bnx2_init_context(bp);
4840
4841         if ((rc = bnx2_init_cpus(bp)) != 0)
4842                 return rc;
4843
4844         bnx2_init_nvram(bp);
4845
4846         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4847
4848         val = REG_RD(bp, BNX2_MQ_CONFIG);
4849         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4850         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4851         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4852                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4853                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4854                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4855         }
4856
4857         REG_WR(bp, BNX2_MQ_CONFIG, val);
4858
4859         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4860         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4861         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4862
4863         val = (BCM_PAGE_BITS - 8) << 24;
4864         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4865
4866         /* Configure page size. */
4867         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4868         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4869         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4870         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4871
4872         val = bp->mac_addr[0] +
4873               (bp->mac_addr[1] << 8) +
4874               (bp->mac_addr[2] << 16) +
4875               bp->mac_addr[3] +
4876               (bp->mac_addr[4] << 8) +
4877               (bp->mac_addr[5] << 16);
4878         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4879
4880         /* Program the MTU.  Also include 4 bytes for CRC32. */
4881         mtu = bp->dev->mtu;
4882         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4883         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4884                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4885         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4886
4887         if (mtu < 1500)
4888                 mtu = 1500;
4889
4890         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4891         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4892         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4893
4894         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4895         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4896                 bp->bnx2_napi[i].last_status_idx = 0;
4897
4898         bp->idle_chk_status_idx = 0xffff;
4899
4900         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4901
4902         /* Set up how to generate a link change interrupt. */
4903         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4904
4905         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4906                (u64) bp->status_blk_mapping & 0xffffffff);
4907         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4908
4909         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4910                (u64) bp->stats_blk_mapping & 0xffffffff);
4911         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4912                (u64) bp->stats_blk_mapping >> 32);
4913
4914         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4915                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4916
4917         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4918                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4919
4920         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4921                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4922
4923         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4924
4925         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4926
4927         REG_WR(bp, BNX2_HC_COM_TICKS,
4928                (bp->com_ticks_int << 16) | bp->com_ticks);
4929
4930         REG_WR(bp, BNX2_HC_CMD_TICKS,
4931                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4932
4933         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4934                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4935         else
4936                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4937         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4938
4939         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4940                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4941         else {
4942                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4943                       BNX2_HC_CONFIG_COLLECT_STATS;
4944         }
4945
4946         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4947                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4948                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4949
4950                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4951         }
4952
4953         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4954                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4955
4956         REG_WR(bp, BNX2_HC_CONFIG, val);
4957
4958         for (i = 1; i < bp->irq_nvecs; i++) {
4959                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4960                            BNX2_HC_SB_CONFIG_1;
4961
4962                 REG_WR(bp, base,
4963                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4964                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4965                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4966
4967                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4968                         (bp->tx_quick_cons_trip_int << 16) |
4969                          bp->tx_quick_cons_trip);
4970
4971                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4972                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4973
4974                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4975                        (bp->rx_quick_cons_trip_int << 16) |
4976                         bp->rx_quick_cons_trip);
4977
4978                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4979                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4980         }
4981
4982         /* Clear internal stats counters. */
4983         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4984
4985         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4986
4987         /* Initialize the receive filter. */
4988         bnx2_set_rx_mode(bp->dev);
4989
4990         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4991                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4992                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4993                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4994         }
4995         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4996                           1, 0);
4997
4998         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4999         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5000
5001         udelay(20);
5002
5003         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5004
5005         return rc;
5006 }
5007
5008 static void
5009 bnx2_clear_ring_states(struct bnx2 *bp)
5010 {
5011         struct bnx2_napi *bnapi;
5012         struct bnx2_tx_ring_info *txr;
5013         struct bnx2_rx_ring_info *rxr;
5014         int i;
5015
5016         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5017                 bnapi = &bp->bnx2_napi[i];
5018                 txr = &bnapi->tx_ring;
5019                 rxr = &bnapi->rx_ring;
5020
5021                 txr->tx_cons = 0;
5022                 txr->hw_tx_cons = 0;
5023                 rxr->rx_prod_bseq = 0;
5024                 rxr->rx_prod = 0;
5025                 rxr->rx_cons = 0;
5026                 rxr->rx_pg_prod = 0;
5027                 rxr->rx_pg_cons = 0;
5028         }
5029 }
5030
5031 static void
5032 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5033 {
5034         u32 val, offset0, offset1, offset2, offset3;
5035         u32 cid_addr = GET_CID_ADDR(cid);
5036
5037         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5038                 offset0 = BNX2_L2CTX_TYPE_XI;
5039                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5040                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5041                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5042         } else {
5043                 offset0 = BNX2_L2CTX_TYPE;
5044                 offset1 = BNX2_L2CTX_CMD_TYPE;
5045                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5046                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5047         }
5048         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5049         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5050
5051         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5052         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5053
5054         val = (u64) txr->tx_desc_mapping >> 32;
5055         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5056
5057         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5058         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5059 }
5060
5061 static void
5062 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5063 {
5064         struct tx_bd *txbd;
5065         u32 cid = TX_CID;
5066         struct bnx2_napi *bnapi;
5067         struct bnx2_tx_ring_info *txr;
5068
5069         bnapi = &bp->bnx2_napi[ring_num];
5070         txr = &bnapi->tx_ring;
5071
5072         if (ring_num == 0)
5073                 cid = TX_CID;
5074         else
5075                 cid = TX_TSS_CID + ring_num - 1;
5076
5077         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5078
5079         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5080
5081         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5082         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5083
5084         txr->tx_prod = 0;
5085         txr->tx_prod_bseq = 0;
5086
5087         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5088         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5089
5090         bnx2_init_tx_context(bp, cid, txr);
5091 }
5092
5093 static void
5094 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5095                      int num_rings)
5096 {
5097         int i;
5098         struct rx_bd *rxbd;
5099
5100         for (i = 0; i < num_rings; i++) {
5101                 int j;
5102
5103                 rxbd = &rx_ring[i][0];
5104                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5105                         rxbd->rx_bd_len = buf_size;
5106                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5107                 }
5108                 if (i == (num_rings - 1))
5109                         j = 0;
5110                 else
5111                         j = i + 1;
5112                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5113                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5114         }
5115 }
5116
5117 static void
5118 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5119 {
5120         int i;
5121         u16 prod, ring_prod;
5122         u32 cid, rx_cid_addr, val;
5123         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5124         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5125
5126         if (ring_num == 0)
5127                 cid = RX_CID;
5128         else
5129                 cid = RX_RSS_CID + ring_num - 1;
5130
5131         rx_cid_addr = GET_CID_ADDR(cid);
5132
5133         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5134                              bp->rx_buf_use_size, bp->rx_max_ring);
5135
5136         bnx2_init_rx_context(bp, cid);
5137
5138         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5139                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5140                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5141         }
5142
5143         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5144         if (bp->rx_pg_ring_size) {
5145                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5146                                      rxr->rx_pg_desc_mapping,
5147                                      PAGE_SIZE, bp->rx_max_pg_ring);
5148                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5149                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5150                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5151                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5152
5153                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5154                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5155
5156                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5157                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5158
5159                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5160                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5161         }
5162
5163         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5164         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5165
5166         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5167         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5168
5169         ring_prod = prod = rxr->rx_pg_prod;
5170         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5171                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5172                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5173                                     ring_num, i, bp->rx_pg_ring_size);
5174                         break;
5175                 }
5176                 prod = NEXT_RX_BD(prod);
5177                 ring_prod = RX_PG_RING_IDX(prod);
5178         }
5179         rxr->rx_pg_prod = prod;
5180
5181         ring_prod = prod = rxr->rx_prod;
5182         for (i = 0; i < bp->rx_ring_size; i++) {
5183                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5184                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5185                                     ring_num, i, bp->rx_ring_size);
5186                         break;
5187                 }
5188                 prod = NEXT_RX_BD(prod);
5189                 ring_prod = RX_RING_IDX(prod);
5190         }
5191         rxr->rx_prod = prod;
5192
5193         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5194         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5195         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5196
5197         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5198         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5199
5200         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5201 }
5202
5203 static void
5204 bnx2_init_all_rings(struct bnx2 *bp)
5205 {
5206         int i;
5207         u32 val;
5208
5209         bnx2_clear_ring_states(bp);
5210
5211         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5212         for (i = 0; i < bp->num_tx_rings; i++)
5213                 bnx2_init_tx_ring(bp, i);
5214
5215         if (bp->num_tx_rings > 1)
5216                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5217                        (TX_TSS_CID << 7));
5218
5219         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5220         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5221
5222         for (i = 0; i < bp->num_rx_rings; i++)
5223                 bnx2_init_rx_ring(bp, i);
5224
5225         if (bp->num_rx_rings > 1) {
5226                 u32 tbl_32;
5227                 u8 *tbl = (u8 *) &tbl_32;
5228
5229                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5230                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5231
5232                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5233                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5234                         if ((i % 4) == 3)
5235                                 bnx2_reg_wr_ind(bp,
5236                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5237                                                 cpu_to_be32(tbl_32));
5238                 }
5239
5240                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5241                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5242
5243                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5244
5245         }
5246 }
5247
5248 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5249 {
5250         u32 max, num_rings = 1;
5251
5252         while (ring_size > MAX_RX_DESC_CNT) {
5253                 ring_size -= MAX_RX_DESC_CNT;
5254                 num_rings++;
5255         }
5256         /* round to next power of 2 */
5257         max = max_size;
5258         while ((max & num_rings) == 0)
5259                 max >>= 1;
5260
5261         if (num_rings != max)
5262                 max <<= 1;
5263
5264         return max;
5265 }
5266
5267 static void
5268 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5269 {
5270         u32 rx_size, rx_space, jumbo_size;
5271
5272         /* 8 for CRC and VLAN */
5273         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5274
5275         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5276                 sizeof(struct skb_shared_info);
5277
5278         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5279         bp->rx_pg_ring_size = 0;
5280         bp->rx_max_pg_ring = 0;
5281         bp->rx_max_pg_ring_idx = 0;
5282         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5283                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5284
5285                 jumbo_size = size * pages;
5286                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5287                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5288
5289                 bp->rx_pg_ring_size = jumbo_size;
5290                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5291                                                         MAX_RX_PG_RINGS);
5292                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5293                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5294                 bp->rx_copy_thresh = 0;
5295         }
5296
5297         bp->rx_buf_use_size = rx_size;
5298         /* hw alignment */
5299         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5300         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5301         bp->rx_ring_size = size;
5302         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5303         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5304 }
5305
5306 static void
5307 bnx2_free_tx_skbs(struct bnx2 *bp)
5308 {
5309         int i;
5310
5311         for (i = 0; i < bp->num_tx_rings; i++) {
5312                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5313                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5314                 int j;
5315
5316                 if (txr->tx_buf_ring == NULL)
5317                         continue;
5318
5319                 for (j = 0; j < TX_DESC_CNT; ) {
5320                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5321                         struct sk_buff *skb = tx_buf->skb;
5322                         int k, last;
5323
5324                         if (skb == NULL) {
5325                                 j++;
5326                                 continue;
5327                         }
5328
5329                         pci_unmap_single(bp->pdev,
5330                                          dma_unmap_addr(tx_buf, mapping),
5331                                          skb_headlen(skb),
5332                                          PCI_DMA_TODEVICE);
5333
5334                         tx_buf->skb = NULL;
5335
5336                         last = tx_buf->nr_frags;
5337                         j++;
5338                         for (k = 0; k < last; k++, j++) {
5339                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5340                                 pci_unmap_page(bp->pdev,
5341                                         dma_unmap_addr(tx_buf, mapping),
5342                                         skb_shinfo(skb)->frags[k].size,
5343                                         PCI_DMA_TODEVICE);
5344                         }
5345                         dev_kfree_skb(skb);
5346                 }
5347         }
5348 }
5349
5350 static void
5351 bnx2_free_rx_skbs(struct bnx2 *bp)
5352 {
5353         int i;
5354
5355         for (i = 0; i < bp->num_rx_rings; i++) {
5356                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5358                 int j;
5359
5360                 if (rxr->rx_buf_ring == NULL)
5361                         return;
5362
5363                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5364                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5365                         struct sk_buff *skb = rx_buf->skb;
5366
5367                         if (skb == NULL)
5368                                 continue;
5369
5370                         pci_unmap_single(bp->pdev,
5371                                          dma_unmap_addr(rx_buf, mapping),
5372                                          bp->rx_buf_use_size,
5373                                          PCI_DMA_FROMDEVICE);
5374
5375                         rx_buf->skb = NULL;
5376
5377                         dev_kfree_skb(skb);
5378                 }
5379                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5380                         bnx2_free_rx_page(bp, rxr, j);
5381         }
5382 }
5383
5384 static void
5385 bnx2_free_skbs(struct bnx2 *bp)
5386 {
5387         bnx2_free_tx_skbs(bp);
5388         bnx2_free_rx_skbs(bp);
5389 }
5390
5391 static int
5392 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5393 {
5394         int rc;
5395
5396         rc = bnx2_reset_chip(bp, reset_code);
5397         bnx2_free_skbs(bp);
5398         if (rc)
5399                 return rc;
5400
5401         if ((rc = bnx2_init_chip(bp)) != 0)
5402                 return rc;
5403
5404         bnx2_init_all_rings(bp);
5405         return 0;
5406 }
5407
5408 static int
5409 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5410 {
5411         int rc;
5412
5413         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5414                 return rc;
5415
5416         spin_lock_bh(&bp->phy_lock);
5417         bnx2_init_phy(bp, reset_phy);
5418         bnx2_set_link(bp);
5419         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5420                 bnx2_remote_phy_event(bp);
5421         spin_unlock_bh(&bp->phy_lock);
5422         return 0;
5423 }
5424
5425 static int
5426 bnx2_shutdown_chip(struct bnx2 *bp)
5427 {
5428         u32 reset_code;
5429
5430         if (bp->flags & BNX2_FLAG_NO_WOL)
5431                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5432         else if (bp->wol)
5433                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5434         else
5435                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5436
5437         return bnx2_reset_chip(bp, reset_code);
5438 }
5439
5440 static int
5441 bnx2_test_registers(struct bnx2 *bp)
5442 {
5443         int ret;
5444         int i, is_5709;
5445         static const struct {
5446                 u16   offset;
5447                 u16   flags;
5448 #define BNX2_FL_NOT_5709        1
5449                 u32   rw_mask;
5450                 u32   ro_mask;
5451         } reg_tbl[] = {
5452                 { 0x006c, 0, 0x00000000, 0x0000003f },
5453                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5454                 { 0x0094, 0, 0x00000000, 0x00000000 },
5455
5456                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5457                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5460                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5461                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5462                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5463                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5464                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5465
5466                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5467                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5469                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5470                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5471                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5472
5473                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5474                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5475                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5476
5477                 { 0x1000, 0, 0x00000000, 0x00000001 },
5478                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5479
5480                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5481                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5482                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5483                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5484                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5485                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5486                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5487                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5488                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5489                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5490
5491                 { 0x1800, 0, 0x00000000, 0x00000001 },
5492                 { 0x1804, 0, 0x00000000, 0x00000003 },
5493
5494                 { 0x2800, 0, 0x00000000, 0x00000001 },
5495                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5496                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5497                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5498                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5499                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5500                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5501                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5502                 { 0x2840, 0, 0x00000000, 0xffffffff },
5503                 { 0x2844, 0, 0x00000000, 0xffffffff },
5504                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5505                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5506
5507                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5508                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5509
5510                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5511                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5512                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5513                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5514                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5515                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5516                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5517                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5518                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5519
5520                 { 0x5004, 0, 0x00000000, 0x0000007f },
5521                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5522
5523                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5524                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5525                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5526                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5527                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5528                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5529                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5530                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5531                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5532
5533                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5534                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5535                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5536                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5537                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5538                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5539                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5540                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5541                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5542                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5543                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5544                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5545                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5546                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5547                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5548                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5549                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5550                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5551                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5552                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5553                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5554                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5555                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5556
5557                 { 0xffff, 0, 0x00000000, 0x00000000 },
5558         };
5559
5560         ret = 0;
5561         is_5709 = 0;
5562         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5563                 is_5709 = 1;
5564
5565         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5566                 u32 offset, rw_mask, ro_mask, save_val, val;
5567                 u16 flags = reg_tbl[i].flags;
5568
5569                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5570                         continue;
5571
5572                 offset = (u32) reg_tbl[i].offset;
5573                 rw_mask = reg_tbl[i].rw_mask;
5574                 ro_mask = reg_tbl[i].ro_mask;
5575
5576                 save_val = readl(bp->regview + offset);
5577
5578                 writel(0, bp->regview + offset);
5579
5580                 val = readl(bp->regview + offset);
5581                 if ((val & rw_mask) != 0) {
5582                         goto reg_test_err;
5583                 }
5584
5585                 if ((val & ro_mask) != (save_val & ro_mask)) {
5586                         goto reg_test_err;
5587                 }
5588
5589                 writel(0xffffffff, bp->regview + offset);
5590
5591                 val = readl(bp->regview + offset);
5592                 if ((val & rw_mask) != rw_mask) {
5593                         goto reg_test_err;
5594                 }
5595
5596                 if ((val & ro_mask) != (save_val & ro_mask)) {
5597                         goto reg_test_err;
5598                 }
5599
5600                 writel(save_val, bp->regview + offset);
5601                 continue;
5602
5603 reg_test_err:
5604                 writel(save_val, bp->regview + offset);
5605                 ret = -ENODEV;
5606                 break;
5607         }
5608         return ret;
5609 }
5610
5611 static int
5612 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5613 {
5614         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5615                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5616         int i;
5617
5618         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5619                 u32 offset;
5620
5621                 for (offset = 0; offset < size; offset += 4) {
5622
5623                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5624
5625                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5626                                 test_pattern[i]) {
5627                                 return -ENODEV;
5628                         }
5629                 }
5630         }
5631         return 0;
5632 }
5633
5634 static int
5635 bnx2_test_memory(struct bnx2 *bp)
5636 {
5637         int ret = 0;
5638         int i;
5639         static struct mem_entry {
5640                 u32   offset;
5641                 u32   len;
5642         } mem_tbl_5706[] = {
5643                 { 0x60000,  0x4000 },
5644                 { 0xa0000,  0x3000 },
5645                 { 0xe0000,  0x4000 },
5646                 { 0x120000, 0x4000 },
5647                 { 0x1a0000, 0x4000 },
5648                 { 0x160000, 0x4000 },
5649                 { 0xffffffff, 0    },
5650         },
5651         mem_tbl_5709[] = {
5652                 { 0x60000,  0x4000 },
5653                 { 0xa0000,  0x3000 },
5654                 { 0xe0000,  0x4000 },
5655                 { 0x120000, 0x4000 },
5656                 { 0x1a0000, 0x4000 },
5657                 { 0xffffffff, 0    },
5658         };
5659         struct mem_entry *mem_tbl;
5660
5661         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5662                 mem_tbl = mem_tbl_5709;
5663         else
5664                 mem_tbl = mem_tbl_5706;
5665
5666         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5667                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5668                         mem_tbl[i].len)) != 0) {
5669                         return ret;
5670                 }
5671         }
5672
5673         return ret;
5674 }
5675
5676 #define BNX2_MAC_LOOPBACK       0
5677 #define BNX2_PHY_LOOPBACK       1
5678
5679 static int
5680 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5681 {
5682         unsigned int pkt_size, num_pkts, i;
5683         struct sk_buff *skb, *rx_skb;
5684         unsigned char *packet;
5685         u16 rx_start_idx, rx_idx;
5686         dma_addr_t map;
5687         struct tx_bd *txbd;
5688         struct sw_bd *rx_buf;
5689         struct l2_fhdr *rx_hdr;
5690         int ret = -ENODEV;
5691         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5692         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5693         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5694
5695         tx_napi = bnapi;
5696
5697         txr = &tx_napi->tx_ring;
5698         rxr = &bnapi->rx_ring;
5699         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5700                 bp->loopback = MAC_LOOPBACK;
5701                 bnx2_set_mac_loopback(bp);
5702         }
5703         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5704                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5705                         return 0;
5706
5707                 bp->loopback = PHY_LOOPBACK;
5708                 bnx2_set_phy_loopback(bp);
5709         }
5710         else
5711                 return -EINVAL;
5712
5713         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5714         skb = netdev_alloc_skb(bp->dev, pkt_size);
5715         if (!skb)
5716                 return -ENOMEM;
5717         packet = skb_put(skb, pkt_size);
5718         memcpy(packet, bp->dev->dev_addr, 6);
5719         memset(packet + 6, 0x0, 8);
5720         for (i = 14; i < pkt_size; i++)
5721                 packet[i] = (unsigned char) (i & 0xff);
5722
5723         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5724                 PCI_DMA_TODEVICE);
5725         if (pci_dma_mapping_error(bp->pdev, map)) {
5726                 dev_kfree_skb(skb);
5727                 return -EIO;
5728         }
5729
5730         REG_WR(bp, BNX2_HC_COMMAND,
5731                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5732
5733         REG_RD(bp, BNX2_HC_COMMAND);
5734
5735         udelay(5);
5736         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5737
5738         num_pkts = 0;
5739
5740         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5741
5742         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5743         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5744         txbd->tx_bd_mss_nbytes = pkt_size;
5745         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5746
5747         num_pkts++;
5748         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5749         txr->tx_prod_bseq += pkt_size;
5750
5751         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5752         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5753
5754         udelay(100);
5755
5756         REG_WR(bp, BNX2_HC_COMMAND,
5757                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5758
5759         REG_RD(bp, BNX2_HC_COMMAND);
5760
5761         udelay(5);
5762
5763         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5764         dev_kfree_skb(skb);
5765
5766         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5767                 goto loopback_test_done;
5768
5769         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5770         if (rx_idx != rx_start_idx + num_pkts) {
5771                 goto loopback_test_done;
5772         }
5773
5774         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5775         rx_skb = rx_buf->skb;
5776
5777         rx_hdr = rx_buf->desc;
5778         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5779
5780         pci_dma_sync_single_for_cpu(bp->pdev,
5781                 dma_unmap_addr(rx_buf, mapping),
5782                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5783
5784         if (rx_hdr->l2_fhdr_status &
5785                 (L2_FHDR_ERRORS_BAD_CRC |
5786                 L2_FHDR_ERRORS_PHY_DECODE |
5787                 L2_FHDR_ERRORS_ALIGNMENT |
5788                 L2_FHDR_ERRORS_TOO_SHORT |
5789                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5790
5791                 goto loopback_test_done;
5792         }
5793
5794         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5795                 goto loopback_test_done;
5796         }
5797
5798         for (i = 14; i < pkt_size; i++) {
5799                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5800                         goto loopback_test_done;
5801                 }
5802         }
5803
5804         ret = 0;
5805
5806 loopback_test_done:
5807         bp->loopback = 0;
5808         return ret;
5809 }
5810
5811 #define BNX2_MAC_LOOPBACK_FAILED        1
5812 #define BNX2_PHY_LOOPBACK_FAILED        2
5813 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5814                                          BNX2_PHY_LOOPBACK_FAILED)
5815
5816 static int
5817 bnx2_test_loopback(struct bnx2 *bp)
5818 {
5819         int rc = 0;
5820
5821         if (!netif_running(bp->dev))
5822                 return BNX2_LOOPBACK_FAILED;
5823
5824         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5825         spin_lock_bh(&bp->phy_lock);
5826         bnx2_init_phy(bp, 1);
5827         spin_unlock_bh(&bp->phy_lock);
5828         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5829                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5830         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5831                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5832         return rc;
5833 }
5834
5835 #define NVRAM_SIZE 0x200
5836 #define CRC32_RESIDUAL 0xdebb20e3
5837
5838 static int
5839 bnx2_test_nvram(struct bnx2 *bp)
5840 {
5841         __be32 buf[NVRAM_SIZE / 4];
5842         u8 *data = (u8 *) buf;
5843         int rc = 0;
5844         u32 magic, csum;
5845
5846         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5847                 goto test_nvram_done;
5848
5849         magic = be32_to_cpu(buf[0]);
5850         if (magic != 0x669955aa) {
5851                 rc = -ENODEV;
5852                 goto test_nvram_done;
5853         }
5854
5855         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5856                 goto test_nvram_done;
5857
5858         csum = ether_crc_le(0x100, data);
5859         if (csum != CRC32_RESIDUAL) {
5860                 rc = -ENODEV;
5861                 goto test_nvram_done;
5862         }
5863
5864         csum = ether_crc_le(0x100, data + 0x100);
5865         if (csum != CRC32_RESIDUAL) {
5866                 rc = -ENODEV;
5867         }
5868
5869 test_nvram_done:
5870         return rc;
5871 }
5872
5873 static int
5874 bnx2_test_link(struct bnx2 *bp)
5875 {
5876         u32 bmsr;
5877
5878         if (!netif_running(bp->dev))
5879                 return -ENODEV;
5880
5881         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5882                 if (bp->link_up)
5883                         return 0;
5884                 return -ENODEV;
5885         }
5886         spin_lock_bh(&bp->phy_lock);
5887         bnx2_enable_bmsr1(bp);
5888         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5889         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5890         bnx2_disable_bmsr1(bp);
5891         spin_unlock_bh(&bp->phy_lock);
5892
5893         if (bmsr & BMSR_LSTATUS) {
5894                 return 0;
5895         }
5896         return -ENODEV;
5897 }
5898
5899 static int
5900 bnx2_test_intr(struct bnx2 *bp)
5901 {
5902         int i;
5903         u16 status_idx;
5904
5905         if (!netif_running(bp->dev))
5906                 return -ENODEV;
5907
5908         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5909
5910         /* This register is not touched during run-time. */
5911         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5912         REG_RD(bp, BNX2_HC_COMMAND);
5913
5914         for (i = 0; i < 10; i++) {
5915                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5916                         status_idx) {
5917
5918                         break;
5919                 }
5920
5921                 msleep_interruptible(10);
5922         }
5923         if (i < 10)
5924                 return 0;
5925
5926         return -ENODEV;
5927 }
5928
5929 /* Determining link for parallel detection. */
5930 static int
5931 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5932 {
5933         u32 mode_ctl, an_dbg, exp;
5934
5935         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5936                 return 0;
5937
5938         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5939         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5940
5941         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5942                 return 0;
5943
5944         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5945         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5946         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5947
5948         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5949                 return 0;
5950
5951         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5952         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5953         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5954
5955         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5956                 return 0;
5957
5958         return 1;
5959 }
5960
5961 static void
5962 bnx2_5706_serdes_timer(struct bnx2 *bp)
5963 {
5964         int check_link = 1;
5965
5966         spin_lock(&bp->phy_lock);
5967         if (bp->serdes_an_pending) {
5968                 bp->serdes_an_pending--;
5969                 check_link = 0;
5970         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5971                 u32 bmcr;
5972
5973                 bp->current_interval = BNX2_TIMER_INTERVAL;
5974
5975                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5976
5977                 if (bmcr & BMCR_ANENABLE) {
5978                         if (bnx2_5706_serdes_has_link(bp)) {
5979                                 bmcr &= ~BMCR_ANENABLE;
5980                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5981                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5982                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5983                         }
5984                 }
5985         }
5986         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5987                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5988                 u32 phy2;
5989
5990                 bnx2_write_phy(bp, 0x17, 0x0f01);
5991                 bnx2_read_phy(bp, 0x15, &phy2);
5992                 if (phy2 & 0x20) {
5993                         u32 bmcr;
5994
5995                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5996                         bmcr |= BMCR_ANENABLE;
5997                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5998
5999                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6000                 }
6001         } else
6002                 bp->current_interval = BNX2_TIMER_INTERVAL;
6003
6004         if (check_link) {
6005                 u32 val;
6006
6007                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6009                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6010
6011                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6012                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6013                                 bnx2_5706s_force_link_dn(bp, 1);
6014                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6015                         } else
6016                                 bnx2_set_link(bp);
6017                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6018                         bnx2_set_link(bp);
6019         }
6020         spin_unlock(&bp->phy_lock);
6021 }
6022
6023 static void
6024 bnx2_5708_serdes_timer(struct bnx2 *bp)
6025 {
6026         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6027                 return;
6028
6029         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6030                 bp->serdes_an_pending = 0;
6031                 return;
6032         }
6033
6034         spin_lock(&bp->phy_lock);
6035         if (bp->serdes_an_pending)
6036                 bp->serdes_an_pending--;
6037         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6038                 u32 bmcr;
6039
6040                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6041                 if (bmcr & BMCR_ANENABLE) {
6042                         bnx2_enable_forced_2g5(bp);
6043                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6044                 } else {
6045                         bnx2_disable_forced_2g5(bp);
6046                         bp->serdes_an_pending = 2;
6047                         bp->current_interval = BNX2_TIMER_INTERVAL;
6048                 }
6049
6050         } else
6051                 bp->current_interval = BNX2_TIMER_INTERVAL;
6052
6053         spin_unlock(&bp->phy_lock);
6054 }
6055
6056 static void
6057 bnx2_timer(unsigned long data)
6058 {
6059         struct bnx2 *bp = (struct bnx2 *) data;
6060
6061         if (!netif_running(bp->dev))
6062                 return;
6063
6064         if (atomic_read(&bp->intr_sem) != 0)
6065                 goto bnx2_restart_timer;
6066
6067         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6068              BNX2_FLAG_USING_MSI)
6069                 bnx2_chk_missed_msi(bp);
6070
6071         bnx2_send_heart_beat(bp);
6072
6073         bp->stats_blk->stat_FwRxDrop =
6074                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6075
6076         /* workaround occasional corrupted counters */
6077         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6078                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6079                                             BNX2_HC_COMMAND_STATS_NOW);
6080
6081         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6082                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6083                         bnx2_5706_serdes_timer(bp);
6084                 else
6085                         bnx2_5708_serdes_timer(bp);
6086         }
6087
6088 bnx2_restart_timer:
6089         mod_timer(&bp->timer, jiffies + bp->current_interval);
6090 }
6091
6092 static int
6093 bnx2_request_irq(struct bnx2 *bp)
6094 {
6095         unsigned long flags;
6096         struct bnx2_irq *irq;
6097         int rc = 0, i;
6098
6099         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6100                 flags = 0;
6101         else
6102                 flags = IRQF_SHARED;
6103
6104         for (i = 0; i < bp->irq_nvecs; i++) {
6105                 irq = &bp->irq_tbl[i];
6106                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6107                                  &bp->bnx2_napi[i]);
6108                 if (rc)
6109                         break;
6110                 irq->requested = 1;
6111         }
6112         return rc;
6113 }
6114
6115 static void
6116 bnx2_free_irq(struct bnx2 *bp)
6117 {
6118         struct bnx2_irq *irq;
6119         int i;
6120
6121         for (i = 0; i < bp->irq_nvecs; i++) {
6122                 irq = &bp->irq_tbl[i];
6123                 if (irq->requested)
6124                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6125                 irq->requested = 0;
6126         }
6127         if (bp->flags & BNX2_FLAG_USING_MSI)
6128                 pci_disable_msi(bp->pdev);
6129         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6130                 pci_disable_msix(bp->pdev);
6131
6132         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6133 }
6134
6135 static void
6136 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6137 {
6138         int i, rc;
6139         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6140         struct net_device *dev = bp->dev;
6141         const int len = sizeof(bp->irq_tbl[0].name);
6142
6143         bnx2_setup_msix_tbl(bp);
6144         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6145         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6147
6148         /*  Need to flush the previous three writes to ensure MSI-X
6149          *  is setup properly */
6150         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6151
6152         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6153                 msix_ent[i].entry = i;
6154                 msix_ent[i].vector = 0;
6155         }
6156
6157         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6158         if (rc != 0)
6159                 return;
6160
6161         bp->irq_nvecs = msix_vecs;
6162         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6163         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6164                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6165                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6166                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6167         }
6168 }
6169
6170 static void
6171 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6172 {
6173         int cpus = num_online_cpus();
6174         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6175
6176         bp->irq_tbl[0].handler = bnx2_interrupt;
6177         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6178         bp->irq_nvecs = 1;
6179         bp->irq_tbl[0].vector = bp->pdev->irq;
6180
6181         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6182                 bnx2_enable_msix(bp, msix_vecs);
6183
6184         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6185             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6186                 if (pci_enable_msi(bp->pdev) == 0) {
6187                         bp->flags |= BNX2_FLAG_USING_MSI;
6188                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6189                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6190                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6191                         } else
6192                                 bp->irq_tbl[0].handler = bnx2_msi;
6193
6194                         bp->irq_tbl[0].vector = bp->pdev->irq;
6195                 }
6196         }
6197
6198         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6199         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6200
6201         bp->num_rx_rings = bp->irq_nvecs;
6202 }
6203
6204 /* Called with rtnl_lock */
6205 static int
6206 bnx2_open(struct net_device *dev)
6207 {
6208         struct bnx2 *bp = netdev_priv(dev);
6209         int rc;
6210
6211         netif_carrier_off(dev);
6212
6213         bnx2_set_power_state(bp, PCI_D0);
6214         bnx2_disable_int(bp);
6215
6216         bnx2_setup_int_mode(bp, disable_msi);
6217         bnx2_init_napi(bp);
6218         bnx2_napi_enable(bp);
6219         rc = bnx2_alloc_mem(bp);
6220         if (rc)
6221                 goto open_err;
6222
6223         rc = bnx2_request_irq(bp);
6224         if (rc)
6225                 goto open_err;
6226
6227         rc = bnx2_init_nic(bp, 1);
6228         if (rc)
6229                 goto open_err;
6230
6231         mod_timer(&bp->timer, jiffies + bp->current_interval);
6232
6233         atomic_set(&bp->intr_sem, 0);
6234
6235         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6236
6237         bnx2_enable_int(bp);
6238
6239         if (bp->flags & BNX2_FLAG_USING_MSI) {
6240                 /* Test MSI to make sure it is working
6241                  * If MSI test fails, go back to INTx mode
6242                  */
6243                 if (bnx2_test_intr(bp) != 0) {
6244                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6245
6246                         bnx2_disable_int(bp);
6247                         bnx2_free_irq(bp);
6248
6249                         bnx2_setup_int_mode(bp, 1);
6250
6251                         rc = bnx2_init_nic(bp, 0);
6252
6253                         if (!rc)
6254                                 rc = bnx2_request_irq(bp);
6255
6256                         if (rc) {
6257                                 del_timer_sync(&bp->timer);
6258                                 goto open_err;
6259                         }
6260                         bnx2_enable_int(bp);
6261                 }
6262         }
6263         if (bp->flags & BNX2_FLAG_USING_MSI)
6264                 netdev_info(dev, "using MSI\n");
6265         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6266                 netdev_info(dev, "using MSIX\n");
6267
6268         netif_tx_start_all_queues(dev);
6269
6270         return 0;
6271
6272 open_err:
6273         bnx2_napi_disable(bp);
6274         bnx2_free_skbs(bp);
6275         bnx2_free_irq(bp);
6276         bnx2_free_mem(bp);
6277         return rc;
6278 }
6279
6280 static void
6281 bnx2_reset_task(struct work_struct *work)
6282 {
6283         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6284
6285         rtnl_lock();
6286         if (!netif_running(bp->dev)) {
6287                 rtnl_unlock();
6288                 return;
6289         }
6290
6291         bnx2_netif_stop(bp, true);
6292
6293         bnx2_init_nic(bp, 1);
6294
6295         atomic_set(&bp->intr_sem, 1);
6296         bnx2_netif_start(bp, true);
6297         rtnl_unlock();
6298 }
6299
6300 static void
6301 bnx2_dump_state(struct bnx2 *bp)
6302 {
6303         struct net_device *dev = bp->dev;
6304
6305         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6306         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6307                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6308                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6309         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6310                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6311                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6312         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6313                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6314         if (bp->flags & BNX2_FLAG_USING_MSIX)
6315                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6316                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6317 }
6318
6319 static void
6320 bnx2_tx_timeout(struct net_device *dev)
6321 {
6322         struct bnx2 *bp = netdev_priv(dev);
6323
6324         bnx2_dump_state(bp);
6325
6326         /* This allows the netif to be shutdown gracefully before resetting */
6327         schedule_work(&bp->reset_task);
6328 }
6329
6330 #ifdef BCM_VLAN
6331 /* Called with rtnl_lock */
6332 static void
6333 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6334 {
6335         struct bnx2 *bp = netdev_priv(dev);
6336
6337         if (netif_running(dev))
6338                 bnx2_netif_stop(bp, false);
6339
6340         bp->vlgrp = vlgrp;
6341
6342         if (!netif_running(dev))
6343                 return;
6344
6345         bnx2_set_rx_mode(dev);
6346         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6347                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6348
6349         bnx2_netif_start(bp, false);
6350 }
6351 #endif
6352
6353 /* Called with netif_tx_lock.
6354  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6355  * netif_wake_queue().
6356  */
6357 static netdev_tx_t
6358 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6359 {
6360         struct bnx2 *bp = netdev_priv(dev);
6361         dma_addr_t mapping;
6362         struct tx_bd *txbd;
6363         struct sw_tx_bd *tx_buf;
6364         u32 len, vlan_tag_flags, last_frag, mss;
6365         u16 prod, ring_prod;
6366         int i;
6367         struct bnx2_napi *bnapi;
6368         struct bnx2_tx_ring_info *txr;
6369         struct netdev_queue *txq;
6370
6371         /*  Determine which tx ring we will be placed on */
6372         i = skb_get_queue_mapping(skb);
6373         bnapi = &bp->bnx2_napi[i];
6374         txr = &bnapi->tx_ring;
6375         txq = netdev_get_tx_queue(dev, i);
6376
6377         if (unlikely(bnx2_tx_avail(bp, txr) <
6378             (skb_shinfo(skb)->nr_frags + 1))) {
6379                 netif_tx_stop_queue(txq);
6380                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6381
6382                 return NETDEV_TX_BUSY;
6383         }
6384         len = skb_headlen(skb);
6385         prod = txr->tx_prod;
6386         ring_prod = TX_RING_IDX(prod);
6387
6388         vlan_tag_flags = 0;
6389         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6390                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6391         }
6392
6393 #ifdef BCM_VLAN
6394         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6395                 vlan_tag_flags |=
6396                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6397         }
6398 #endif
6399         if ((mss = skb_shinfo(skb)->gso_size)) {
6400                 u32 tcp_opt_len;
6401                 struct iphdr *iph;
6402
6403                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6404
6405                 tcp_opt_len = tcp_optlen(skb);
6406
6407                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6408                         u32 tcp_off = skb_transport_offset(skb) -
6409                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6410
6411                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6412                                           TX_BD_FLAGS_SW_FLAGS;
6413                         if (likely(tcp_off == 0))
6414                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6415                         else {
6416                                 tcp_off >>= 3;
6417                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6418                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6419                                                   ((tcp_off & 0x10) <<
6420                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6421                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6422                         }
6423                 } else {
6424                         iph = ip_hdr(skb);
6425                         if (tcp_opt_len || (iph->ihl > 5)) {
6426                                 vlan_tag_flags |= ((iph->ihl - 5) +
6427                                                    (tcp_opt_len >> 2)) << 8;
6428                         }
6429                 }
6430         } else
6431                 mss = 0;
6432
6433         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6434         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6435                 dev_kfree_skb(skb);
6436                 return NETDEV_TX_OK;
6437         }
6438
6439         tx_buf = &txr->tx_buf_ring[ring_prod];
6440         tx_buf->skb = skb;
6441         dma_unmap_addr_set(tx_buf, mapping, mapping);
6442
6443         txbd = &txr->tx_desc_ring[ring_prod];
6444
6445         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6446         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6447         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6448         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6449
6450         last_frag = skb_shinfo(skb)->nr_frags;
6451         tx_buf->nr_frags = last_frag;
6452         tx_buf->is_gso = skb_is_gso(skb);
6453
6454         for (i = 0; i < last_frag; i++) {
6455                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6456
6457                 prod = NEXT_TX_BD(prod);
6458                 ring_prod = TX_RING_IDX(prod);
6459                 txbd = &txr->tx_desc_ring[ring_prod];
6460
6461                 len = frag->size;
6462                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6463                         len, PCI_DMA_TODEVICE);
6464                 if (pci_dma_mapping_error(bp->pdev, mapping))
6465                         goto dma_error;
6466                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6467                                    mapping);
6468
6469                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6470                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6471                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6472                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6473
6474         }
6475         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6476
6477         prod = NEXT_TX_BD(prod);
6478         txr->tx_prod_bseq += skb->len;
6479
6480         REG_WR16(bp, txr->tx_bidx_addr, prod);
6481         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6482
6483         mmiowb();
6484
6485         txr->tx_prod = prod;
6486
6487         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6488                 netif_tx_stop_queue(txq);
6489                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6490                         netif_tx_wake_queue(txq);
6491         }
6492
6493         return NETDEV_TX_OK;
6494 dma_error:
6495         /* save value of frag that failed */
6496         last_frag = i;
6497
6498         /* start back at beginning and unmap skb */
6499         prod = txr->tx_prod;
6500         ring_prod = TX_RING_IDX(prod);
6501         tx_buf = &txr->tx_buf_ring[ring_prod];
6502         tx_buf->skb = NULL;
6503         pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6504                          skb_headlen(skb), PCI_DMA_TODEVICE);
6505
6506         /* unmap remaining mapped pages */
6507         for (i = 0; i < last_frag; i++) {
6508                 prod = NEXT_TX_BD(prod);
6509                 ring_prod = TX_RING_IDX(prod);
6510                 tx_buf = &txr->tx_buf_ring[ring_prod];
6511                 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6512                                skb_shinfo(skb)->frags[i].size,
6513                                PCI_DMA_TODEVICE);
6514         }
6515
6516         dev_kfree_skb(skb);
6517         return NETDEV_TX_OK;
6518 }
6519
6520 /* Called with rtnl_lock */
6521 static int
6522 bnx2_close(struct net_device *dev)
6523 {
6524         struct bnx2 *bp = netdev_priv(dev);
6525
6526         cancel_work_sync(&bp->reset_task);
6527
6528         bnx2_disable_int_sync(bp);
6529         bnx2_napi_disable(bp);
6530         del_timer_sync(&bp->timer);
6531         bnx2_shutdown_chip(bp);
6532         bnx2_free_irq(bp);
6533         bnx2_free_skbs(bp);
6534         bnx2_free_mem(bp);
6535         bp->link_up = 0;
6536         netif_carrier_off(bp->dev);
6537         bnx2_set_power_state(bp, PCI_D3hot);
6538         return 0;
6539 }
6540
6541 static void
6542 bnx2_save_stats(struct bnx2 *bp)
6543 {
6544         u32 *hw_stats = (u32 *) bp->stats_blk;
6545         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6546         int i;
6547
6548         /* The 1st 10 counters are 64-bit counters */
6549         for (i = 0; i < 20; i += 2) {
6550                 u32 hi;
6551                 u64 lo;
6552
6553                 hi = temp_stats[i] + hw_stats[i];
6554                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6555                 if (lo > 0xffffffff)
6556                         hi++;
6557                 temp_stats[i] = hi;
6558                 temp_stats[i + 1] = lo & 0xffffffff;
6559         }
6560
6561         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6562                 temp_stats[i] += hw_stats[i];
6563 }
6564
6565 #define GET_64BIT_NET_STATS64(ctr)                              \
6566         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6567         (unsigned long) (ctr##_lo)
6568
6569 #define GET_64BIT_NET_STATS32(ctr)                              \
6570         (ctr##_lo)
6571
6572 #if (BITS_PER_LONG == 64)
6573 #define GET_64BIT_NET_STATS(ctr)                                \
6574         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6575         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6576 #else
6577 #define GET_64BIT_NET_STATS(ctr)                                \
6578         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6579         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6580 #endif
6581
6582 #define GET_32BIT_NET_STATS(ctr)                                \
6583         (unsigned long) (bp->stats_blk->ctr +                   \
6584                          bp->temp_stats_blk->ctr)
6585
6586 static struct net_device_stats *
6587 bnx2_get_stats(struct net_device *dev)
6588 {
6589         struct bnx2 *bp = netdev_priv(dev);
6590         struct net_device_stats *net_stats = &dev->stats;
6591
6592         if (bp->stats_blk == NULL) {
6593                 return net_stats;
6594         }
6595         net_stats->rx_packets =
6596                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6597                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6598                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6599
6600         net_stats->tx_packets =
6601                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6602                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6603                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6604
6605         net_stats->rx_bytes =
6606                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6607
6608         net_stats->tx_bytes =
6609                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6610
6611         net_stats->multicast =
6612                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6613
6614         net_stats->collisions =
6615                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6616
6617         net_stats->rx_length_errors =
6618                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6619                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6620
6621         net_stats->rx_over_errors =
6622                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6623                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6624
6625         net_stats->rx_frame_errors =
6626                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6627
6628         net_stats->rx_crc_errors =
6629                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6630
6631         net_stats->rx_errors = net_stats->rx_length_errors +
6632                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6633                 net_stats->rx_crc_errors;
6634
6635         net_stats->tx_aborted_errors =
6636                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6637                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6638
6639         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6640             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6641                 net_stats->tx_carrier_errors = 0;
6642         else {
6643                 net_stats->tx_carrier_errors =
6644                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6645         }
6646
6647         net_stats->tx_errors =
6648                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6649                 net_stats->tx_aborted_errors +
6650                 net_stats->tx_carrier_errors;
6651
6652         net_stats->rx_missed_errors =
6653                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6654                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6655                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6656
6657         return net_stats;
6658 }
6659
6660 /* All ethtool functions called with rtnl_lock */
6661
6662 static int
6663 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6664 {
6665         struct bnx2 *bp = netdev_priv(dev);
6666         int support_serdes = 0, support_copper = 0;
6667
6668         cmd->supported = SUPPORTED_Autoneg;
6669         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6670                 support_serdes = 1;
6671                 support_copper = 1;
6672         } else if (bp->phy_port == PORT_FIBRE)
6673                 support_serdes = 1;
6674         else
6675                 support_copper = 1;
6676
6677         if (support_serdes) {
6678                 cmd->supported |= SUPPORTED_1000baseT_Full |
6679                         SUPPORTED_FIBRE;
6680                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6681                         cmd->supported |= SUPPORTED_2500baseX_Full;
6682
6683         }
6684         if (support_copper) {
6685                 cmd->supported |= SUPPORTED_10baseT_Half |
6686                         SUPPORTED_10baseT_Full |
6687                         SUPPORTED_100baseT_Half |
6688                         SUPPORTED_100baseT_Full |
6689                         SUPPORTED_1000baseT_Full |
6690                         SUPPORTED_TP;
6691
6692         }
6693
6694         spin_lock_bh(&bp->phy_lock);
6695         cmd->port = bp->phy_port;
6696         cmd->advertising = bp->advertising;
6697
6698         if (bp->autoneg & AUTONEG_SPEED) {
6699                 cmd->autoneg = AUTONEG_ENABLE;
6700         }
6701         else {
6702                 cmd->autoneg = AUTONEG_DISABLE;
6703         }
6704
6705         if (netif_carrier_ok(dev)) {
6706                 cmd->speed = bp->line_speed;
6707                 cmd->duplex = bp->duplex;
6708         }
6709         else {
6710                 cmd->speed = -1;
6711                 cmd->duplex = -1;
6712         }
6713         spin_unlock_bh(&bp->phy_lock);
6714
6715         cmd->transceiver = XCVR_INTERNAL;
6716         cmd->phy_address = bp->phy_addr;
6717
6718         return 0;
6719 }
6720
6721 static int
6722 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6723 {
6724         struct bnx2 *bp = netdev_priv(dev);
6725         u8 autoneg = bp->autoneg;
6726         u8 req_duplex = bp->req_duplex;
6727         u16 req_line_speed = bp->req_line_speed;
6728         u32 advertising = bp->advertising;
6729         int err = -EINVAL;
6730
6731         spin_lock_bh(&bp->phy_lock);
6732
6733         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6734                 goto err_out_unlock;
6735
6736         if (cmd->port != bp->phy_port &&
6737             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6738                 goto err_out_unlock;
6739
6740         /* If device is down, we can store the settings only if the user
6741          * is setting the currently active port.
6742          */
6743         if (!netif_running(dev) && cmd->port != bp->phy_port)
6744                 goto err_out_unlock;
6745
6746         if (cmd->autoneg == AUTONEG_ENABLE) {
6747                 autoneg |= AUTONEG_SPEED;
6748
6749                 advertising = cmd->advertising;
6750                 if (cmd->port == PORT_TP) {
6751                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6752                         if (!advertising)
6753                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6754                 } else {
6755                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6756                         if (!advertising)
6757                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6758                 }
6759                 advertising |= ADVERTISED_Autoneg;
6760         }
6761         else {
6762                 if (cmd->port == PORT_FIBRE) {
6763                         if ((cmd->speed != SPEED_1000 &&
6764                              cmd->speed != SPEED_2500) ||
6765                             (cmd->duplex != DUPLEX_FULL))
6766                                 goto err_out_unlock;
6767
6768                         if (cmd->speed == SPEED_2500 &&
6769                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6770                                 goto err_out_unlock;
6771                 }
6772                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6773                         goto err_out_unlock;
6774
6775                 autoneg &= ~AUTONEG_SPEED;
6776                 req_line_speed = cmd->speed;
6777                 req_duplex = cmd->duplex;
6778                 advertising = 0;
6779         }
6780
6781         bp->autoneg = autoneg;
6782         bp->advertising = advertising;
6783         bp->req_line_speed = req_line_speed;
6784         bp->req_duplex = req_duplex;
6785
6786         err = 0;
6787         /* If device is down, the new settings will be picked up when it is
6788          * brought up.
6789          */
6790         if (netif_running(dev))
6791                 err = bnx2_setup_phy(bp, cmd->port);
6792
6793 err_out_unlock:
6794         spin_unlock_bh(&bp->phy_lock);
6795
6796         return err;
6797 }
6798
6799 static void
6800 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6801 {
6802         struct bnx2 *bp = netdev_priv(dev);
6803
6804         strcpy(info->driver, DRV_MODULE_NAME);
6805         strcpy(info->version, DRV_MODULE_VERSION);
6806         strcpy(info->bus_info, pci_name(bp->pdev));
6807         strcpy(info->fw_version, bp->fw_version);
6808 }
6809
6810 #define BNX2_REGDUMP_LEN                (32 * 1024)
6811
6812 static int
6813 bnx2_get_regs_len(struct net_device *dev)
6814 {
6815         return BNX2_REGDUMP_LEN;
6816 }
6817
6818 static void
6819 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6820 {
6821         u32 *p = _p, i, offset;
6822         u8 *orig_p = _p;
6823         struct bnx2 *bp = netdev_priv(dev);
6824         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6825                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6826                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6827                                  0x1040, 0x1048, 0x1080, 0x10a4,
6828                                  0x1400, 0x1490, 0x1498, 0x14f0,
6829                                  0x1500, 0x155c, 0x1580, 0x15dc,
6830                                  0x1600, 0x1658, 0x1680, 0x16d8,
6831                                  0x1800, 0x1820, 0x1840, 0x1854,
6832                                  0x1880, 0x1894, 0x1900, 0x1984,
6833                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6834                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6835                                  0x2000, 0x2030, 0x23c0, 0x2400,
6836                                  0x2800, 0x2820, 0x2830, 0x2850,
6837                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6838                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6839                                  0x4080, 0x4090, 0x43c0, 0x4458,
6840                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6841                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6842                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6843                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6844                                  0x6800, 0x6848, 0x684c, 0x6860,
6845                                  0x6888, 0x6910, 0x8000 };
6846
6847         regs->version = 0;
6848
6849         memset(p, 0, BNX2_REGDUMP_LEN);
6850
6851         if (!netif_running(bp->dev))
6852                 return;
6853
6854         i = 0;
6855         offset = reg_boundaries[0];
6856         p += offset;
6857         while (offset < BNX2_REGDUMP_LEN) {
6858                 *p++ = REG_RD(bp, offset);
6859                 offset += 4;
6860                 if (offset == reg_boundaries[i + 1]) {
6861                         offset = reg_boundaries[i + 2];
6862                         p = (u32 *) (orig_p + offset);
6863                         i += 2;
6864                 }
6865         }
6866 }
6867
6868 static void
6869 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6870 {
6871         struct bnx2 *bp = netdev_priv(dev);
6872
6873         if (bp->flags & BNX2_FLAG_NO_WOL) {
6874                 wol->supported = 0;
6875                 wol->wolopts = 0;
6876         }
6877         else {
6878                 wol->supported = WAKE_MAGIC;
6879                 if (bp->wol)
6880                         wol->wolopts = WAKE_MAGIC;
6881                 else
6882                         wol->wolopts = 0;
6883         }
6884         memset(&wol->sopass, 0, sizeof(wol->sopass));
6885 }
6886
6887 static int
6888 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6889 {
6890         struct bnx2 *bp = netdev_priv(dev);
6891
6892         if (wol->wolopts & ~WAKE_MAGIC)
6893                 return -EINVAL;
6894
6895         if (wol->wolopts & WAKE_MAGIC) {
6896                 if (bp->flags & BNX2_FLAG_NO_WOL)
6897                         return -EINVAL;
6898
6899                 bp->wol = 1;
6900         }
6901         else {
6902                 bp->wol = 0;
6903         }
6904         return 0;
6905 }
6906
6907 static int
6908 bnx2_nway_reset(struct net_device *dev)
6909 {
6910         struct bnx2 *bp = netdev_priv(dev);
6911         u32 bmcr;
6912
6913         if (!netif_running(dev))
6914                 return -EAGAIN;
6915
6916         if (!(bp->autoneg & AUTONEG_SPEED)) {
6917                 return -EINVAL;
6918         }
6919
6920         spin_lock_bh(&bp->phy_lock);
6921
6922         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6923                 int rc;
6924
6925                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6926                 spin_unlock_bh(&bp->phy_lock);
6927                 return rc;
6928         }
6929
6930         /* Force a link down visible on the other side */
6931         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6932                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6933                 spin_unlock_bh(&bp->phy_lock);
6934
6935                 msleep(20);
6936
6937                 spin_lock_bh(&bp->phy_lock);
6938
6939                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6940                 bp->serdes_an_pending = 1;
6941                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6942         }
6943
6944         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6945         bmcr &= ~BMCR_LOOPBACK;
6946         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6947
6948         spin_unlock_bh(&bp->phy_lock);
6949
6950         return 0;
6951 }
6952
6953 static u32
6954 bnx2_get_link(struct net_device *dev)
6955 {
6956         struct bnx2 *bp = netdev_priv(dev);
6957
6958         return bp->link_up;
6959 }
6960
6961 static int
6962 bnx2_get_eeprom_len(struct net_device *dev)
6963 {
6964         struct bnx2 *bp = netdev_priv(dev);
6965
6966         if (bp->flash_info == NULL)
6967                 return 0;
6968
6969         return (int) bp->flash_size;
6970 }
6971
6972 static int
6973 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6974                 u8 *eebuf)
6975 {
6976         struct bnx2 *bp = netdev_priv(dev);
6977         int rc;
6978
6979         if (!netif_running(dev))
6980                 return -EAGAIN;
6981
6982         /* parameters already validated in ethtool_get_eeprom */
6983
6984         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6985
6986         return rc;
6987 }
6988
6989 static int
6990 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6991                 u8 *eebuf)
6992 {
6993         struct bnx2 *bp = netdev_priv(dev);
6994         int rc;
6995
6996         if (!netif_running(dev))
6997                 return -EAGAIN;
6998
6999         /* parameters already validated in ethtool_set_eeprom */
7000
7001         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7002
7003         return rc;
7004 }
7005
7006 static int
7007 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7008 {
7009         struct bnx2 *bp = netdev_priv(dev);
7010
7011         memset(coal, 0, sizeof(struct ethtool_coalesce));
7012
7013         coal->rx_coalesce_usecs = bp->rx_ticks;
7014         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7015         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7016         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7017
7018         coal->tx_coalesce_usecs = bp->tx_ticks;
7019         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7020         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7021         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7022
7023         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7024
7025         return 0;
7026 }
7027
7028 static int
7029 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7030 {
7031         struct bnx2 *bp = netdev_priv(dev);
7032
7033         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7034         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7035
7036         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7037         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7038
7039         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7040         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7041
7042         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7043         if (bp->rx_quick_cons_trip_int > 0xff)
7044                 bp->rx_quick_cons_trip_int = 0xff;
7045
7046         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7047         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7048
7049         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7050         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7051
7052         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7053         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7054
7055         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7056         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7057                 0xff;
7058
7059         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7060         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7061                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7062                         bp->stats_ticks = USEC_PER_SEC;
7063         }
7064         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7065                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7066         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7067
7068         if (netif_running(bp->dev)) {
7069                 bnx2_netif_stop(bp, true);
7070                 bnx2_init_nic(bp, 0);
7071                 bnx2_netif_start(bp, true);
7072         }
7073
7074         return 0;
7075 }
7076
7077 static void
7078 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7079 {
7080         struct bnx2 *bp = netdev_priv(dev);
7081
7082         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7083         ering->rx_mini_max_pending = 0;
7084         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7085
7086         ering->rx_pending = bp->rx_ring_size;
7087         ering->rx_mini_pending = 0;
7088         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7089
7090         ering->tx_max_pending = MAX_TX_DESC_CNT;
7091         ering->tx_pending = bp->tx_ring_size;
7092 }
7093
7094 static int
7095 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7096 {
7097         if (netif_running(bp->dev)) {
7098                 /* Reset will erase chipset stats; save them */
7099                 bnx2_save_stats(bp);
7100
7101                 bnx2_netif_stop(bp, true);
7102                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7103                 bnx2_free_skbs(bp);
7104                 bnx2_free_mem(bp);
7105         }
7106
7107         bnx2_set_rx_ring_size(bp, rx);
7108         bp->tx_ring_size = tx;
7109
7110         if (netif_running(bp->dev)) {
7111                 int rc;
7112
7113                 rc = bnx2_alloc_mem(bp);
7114                 if (!rc)
7115                         rc = bnx2_init_nic(bp, 0);
7116
7117                 if (rc) {
7118                         bnx2_napi_enable(bp);
7119                         dev_close(bp->dev);
7120                         return rc;
7121                 }
7122 #ifdef BCM_CNIC
7123                 mutex_lock(&bp->cnic_lock);
7124                 /* Let cnic know about the new status block. */
7125                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7126                         bnx2_setup_cnic_irq_info(bp);
7127                 mutex_unlock(&bp->cnic_lock);
7128 #endif
7129                 bnx2_netif_start(bp, true);
7130         }
7131         return 0;
7132 }
7133
7134 static int
7135 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7136 {
7137         struct bnx2 *bp = netdev_priv(dev);
7138         int rc;
7139
7140         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7141                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7142                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7143
7144                 return -EINVAL;
7145         }
7146         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7147         return rc;
7148 }
7149
7150 static void
7151 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7152 {
7153         struct bnx2 *bp = netdev_priv(dev);
7154
7155         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7156         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7157         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7158 }
7159
7160 static int
7161 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7162 {
7163         struct bnx2 *bp = netdev_priv(dev);
7164
7165         bp->req_flow_ctrl = 0;
7166         if (epause->rx_pause)
7167                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7168         if (epause->tx_pause)
7169                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7170
7171         if (epause->autoneg) {
7172                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7173         }
7174         else {
7175                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7176         }
7177
7178         if (netif_running(dev)) {
7179                 spin_lock_bh(&bp->phy_lock);
7180                 bnx2_setup_phy(bp, bp->phy_port);
7181                 spin_unlock_bh(&bp->phy_lock);
7182         }
7183
7184         return 0;
7185 }
7186
7187 static u32
7188 bnx2_get_rx_csum(struct net_device *dev)
7189 {
7190         struct bnx2 *bp = netdev_priv(dev);
7191
7192         return bp->rx_csum;
7193 }
7194
7195 static int
7196 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7197 {
7198         struct bnx2 *bp = netdev_priv(dev);
7199
7200         bp->rx_csum = data;
7201         return 0;
7202 }
7203
7204 static int
7205 bnx2_set_tso(struct net_device *dev, u32 data)
7206 {
7207         struct bnx2 *bp = netdev_priv(dev);
7208
7209         if (data) {
7210                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7211                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7212                         dev->features |= NETIF_F_TSO6;
7213         } else
7214                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7215                                    NETIF_F_TSO_ECN);
7216         return 0;
7217 }
7218
7219 static struct {
7220         char string[ETH_GSTRING_LEN];
7221 } bnx2_stats_str_arr[] = {
7222         { "rx_bytes" },
7223         { "rx_error_bytes" },
7224         { "tx_bytes" },
7225         { "tx_error_bytes" },
7226         { "rx_ucast_packets" },
7227         { "rx_mcast_packets" },
7228         { "rx_bcast_packets" },
7229         { "tx_ucast_packets" },
7230         { "tx_mcast_packets" },
7231         { "tx_bcast_packets" },
7232         { "tx_mac_errors" },
7233         { "tx_carrier_errors" },
7234         { "rx_crc_errors" },
7235         { "rx_align_errors" },
7236         { "tx_single_collisions" },
7237         { "tx_multi_collisions" },
7238         { "tx_deferred" },
7239         { "tx_excess_collisions" },
7240         { "tx_late_collisions" },
7241         { "tx_total_collisions" },
7242         { "rx_fragments" },
7243         { "rx_jabbers" },
7244         { "rx_undersize_packets" },
7245         { "rx_oversize_packets" },
7246         { "rx_64_byte_packets" },
7247         { "rx_65_to_127_byte_packets" },
7248         { "rx_128_to_255_byte_packets" },
7249         { "rx_256_to_511_byte_packets" },
7250         { "rx_512_to_1023_byte_packets" },
7251         { "rx_1024_to_1522_byte_packets" },
7252         { "rx_1523_to_9022_byte_packets" },
7253         { "tx_64_byte_packets" },
7254         { "tx_65_to_127_byte_packets" },
7255         { "tx_128_to_255_byte_packets" },
7256         { "tx_256_to_511_byte_packets" },
7257         { "tx_512_to_1023_byte_packets" },
7258         { "tx_1024_to_1522_byte_packets" },
7259         { "tx_1523_to_9022_byte_packets" },
7260         { "rx_xon_frames" },
7261         { "rx_xoff_frames" },
7262         { "tx_xon_frames" },
7263         { "tx_xoff_frames" },
7264         { "rx_mac_ctrl_frames" },
7265         { "rx_filtered_packets" },
7266         { "rx_ftq_discards" },
7267         { "rx_discards" },
7268         { "rx_fw_discards" },
7269 };
7270
7271 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7272                         sizeof(bnx2_stats_str_arr[0]))
7273
7274 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7275
7276 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7277     STATS_OFFSET32(stat_IfHCInOctets_hi),
7278     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7279     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7280     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7281     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7282     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7283     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7284     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7285     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7286     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7287     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7288     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7289     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7290     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7291     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7292     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7293     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7294     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7295     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7296     STATS_OFFSET32(stat_EtherStatsCollisions),
7297     STATS_OFFSET32(stat_EtherStatsFragments),
7298     STATS_OFFSET32(stat_EtherStatsJabbers),
7299     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7300     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7301     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7302     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7303     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7304     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7305     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7306     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7307     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7308     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7309     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7310     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7311     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7312     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7313     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7314     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7315     STATS_OFFSET32(stat_XonPauseFramesReceived),
7316     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7317     STATS_OFFSET32(stat_OutXonSent),
7318     STATS_OFFSET32(stat_OutXoffSent),
7319     STATS_OFFSET32(stat_MacControlFramesReceived),
7320     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7321     STATS_OFFSET32(stat_IfInFTQDiscards),
7322     STATS_OFFSET32(stat_IfInMBUFDiscards),
7323     STATS_OFFSET32(stat_FwRxDrop),
7324 };
7325
7326 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7327  * skipped because of errata.
7328  */
7329 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7330         8,0,8,8,8,8,8,8,8,8,
7331         4,0,4,4,4,4,4,4,4,4,
7332         4,4,4,4,4,4,4,4,4,4,
7333         4,4,4,4,4,4,4,4,4,4,
7334         4,4,4,4,4,4,4,
7335 };
7336
7337 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7338         8,0,8,8,8,8,8,8,8,8,
7339         4,4,4,4,4,4,4,4,4,4,
7340         4,4,4,4,4,4,4,4,4,4,
7341         4,4,4,4,4,4,4,4,4,4,
7342         4,4,4,4,4,4,4,
7343 };
7344
7345 #define BNX2_NUM_TESTS 6
7346
7347 static struct {
7348         char string[ETH_GSTRING_LEN];
7349 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7350         { "register_test (offline)" },
7351         { "memory_test (offline)" },
7352         { "loopback_test (offline)" },
7353         { "nvram_test (online)" },
7354         { "interrupt_test (online)" },
7355         { "link_test (online)" },
7356 };
7357
7358 static int
7359 bnx2_get_sset_count(struct net_device *dev, int sset)
7360 {
7361         switch (sset) {
7362         case ETH_SS_TEST:
7363                 return BNX2_NUM_TESTS;
7364         case ETH_SS_STATS:
7365                 return BNX2_NUM_STATS;
7366         default:
7367                 return -EOPNOTSUPP;
7368         }
7369 }
7370
7371 static void
7372 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7373 {
7374         struct bnx2 *bp = netdev_priv(dev);
7375
7376         bnx2_set_power_state(bp, PCI_D0);
7377
7378         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7379         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7380                 int i;
7381
7382                 bnx2_netif_stop(bp, true);
7383                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7384                 bnx2_free_skbs(bp);
7385
7386                 if (bnx2_test_registers(bp) != 0) {
7387                         buf[0] = 1;
7388                         etest->flags |= ETH_TEST_FL_FAILED;
7389                 }
7390                 if (bnx2_test_memory(bp) != 0) {
7391                         buf[1] = 1;
7392                         etest->flags |= ETH_TEST_FL_FAILED;
7393                 }
7394                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7395                         etest->flags |= ETH_TEST_FL_FAILED;
7396
7397                 if (!netif_running(bp->dev))
7398                         bnx2_shutdown_chip(bp);
7399                 else {
7400                         bnx2_init_nic(bp, 1);
7401                         bnx2_netif_start(bp, true);
7402                 }
7403
7404                 /* wait for link up */
7405                 for (i = 0; i < 7; i++) {
7406                         if (bp->link_up)
7407                                 break;
7408                         msleep_interruptible(1000);
7409                 }
7410         }
7411
7412         if (bnx2_test_nvram(bp) != 0) {
7413                 buf[3] = 1;
7414                 etest->flags |= ETH_TEST_FL_FAILED;
7415         }
7416         if (bnx2_test_intr(bp) != 0) {
7417                 buf[4] = 1;
7418                 etest->flags |= ETH_TEST_FL_FAILED;
7419         }
7420
7421         if (bnx2_test_link(bp) != 0) {
7422                 buf[5] = 1;
7423                 etest->flags |= ETH_TEST_FL_FAILED;
7424
7425         }
7426         if (!netif_running(bp->dev))
7427                 bnx2_set_power_state(bp, PCI_D3hot);
7428 }
7429
7430 static void
7431 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7432 {
7433         switch (stringset) {
7434         case ETH_SS_STATS:
7435                 memcpy(buf, bnx2_stats_str_arr,
7436                         sizeof(bnx2_stats_str_arr));
7437                 break;
7438         case ETH_SS_TEST:
7439                 memcpy(buf, bnx2_tests_str_arr,
7440                         sizeof(bnx2_tests_str_arr));
7441                 break;
7442         }
7443 }
7444
7445 static void
7446 bnx2_get_ethtool_stats(struct net_device *dev,
7447                 struct ethtool_stats *stats, u64 *buf)
7448 {
7449         struct bnx2 *bp = netdev_priv(dev);
7450         int i;
7451         u32 *hw_stats = (u32 *) bp->stats_blk;
7452         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7453         u8 *stats_len_arr = NULL;
7454
7455         if (hw_stats == NULL) {
7456                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7457                 return;
7458         }
7459
7460         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7461             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7462             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7463             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7464                 stats_len_arr = bnx2_5706_stats_len_arr;
7465         else
7466                 stats_len_arr = bnx2_5708_stats_len_arr;
7467
7468         for (i = 0; i < BNX2_NUM_STATS; i++) {
7469                 unsigned long offset;
7470
7471                 if (stats_len_arr[i] == 0) {
7472                         /* skip this counter */
7473                         buf[i] = 0;
7474                         continue;
7475                 }
7476
7477                 offset = bnx2_stats_offset_arr[i];
7478                 if (stats_len_arr[i] == 4) {
7479                         /* 4-byte counter */
7480                         buf[i] = (u64) *(hw_stats + offset) +
7481                                  *(temp_stats + offset);
7482                         continue;
7483                 }
7484                 /* 8-byte counter */
7485                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7486                          *(hw_stats + offset + 1) +
7487                          (((u64) *(temp_stats + offset)) << 32) +
7488                          *(temp_stats + offset + 1);
7489         }
7490 }
7491
7492 static int
7493 bnx2_phys_id(struct net_device *dev, u32 data)
7494 {
7495         struct bnx2 *bp = netdev_priv(dev);
7496         int i;
7497         u32 save;
7498
7499         bnx2_set_power_state(bp, PCI_D0);
7500
7501         if (data == 0)
7502                 data = 2;
7503
7504         save = REG_RD(bp, BNX2_MISC_CFG);
7505         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7506
7507         for (i = 0; i < (data * 2); i++) {
7508                 if ((i % 2) == 0) {
7509                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7510                 }
7511                 else {
7512                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7513                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7514                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7515                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7516                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7517                                 BNX2_EMAC_LED_TRAFFIC);
7518                 }
7519                 msleep_interruptible(500);
7520                 if (signal_pending(current))
7521                         break;
7522         }
7523         REG_WR(bp, BNX2_EMAC_LED, 0);
7524         REG_WR(bp, BNX2_MISC_CFG, save);
7525
7526         if (!netif_running(dev))
7527                 bnx2_set_power_state(bp, PCI_D3hot);
7528
7529         return 0;
7530 }
7531
7532 static int
7533 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7534 {
7535         struct bnx2 *bp = netdev_priv(dev);
7536
7537         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7538                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7539         else
7540                 return (ethtool_op_set_tx_csum(dev, data));
7541 }
7542
7543 static const struct ethtool_ops bnx2_ethtool_ops = {
7544         .get_settings           = bnx2_get_settings,
7545         .set_settings           = bnx2_set_settings,
7546         .get_drvinfo            = bnx2_get_drvinfo,
7547         .get_regs_len           = bnx2_get_regs_len,
7548         .get_regs               = bnx2_get_regs,
7549         .get_wol                = bnx2_get_wol,
7550         .set_wol                = bnx2_set_wol,
7551         .nway_reset             = bnx2_nway_reset,
7552         .get_link               = bnx2_get_link,
7553         .get_eeprom_len         = bnx2_get_eeprom_len,
7554         .get_eeprom             = bnx2_get_eeprom,
7555         .set_eeprom             = bnx2_set_eeprom,
7556         .get_coalesce           = bnx2_get_coalesce,
7557         .set_coalesce           = bnx2_set_coalesce,
7558         .get_ringparam          = bnx2_get_ringparam,
7559         .set_ringparam          = bnx2_set_ringparam,
7560         .get_pauseparam         = bnx2_get_pauseparam,
7561         .set_pauseparam         = bnx2_set_pauseparam,
7562         .get_rx_csum            = bnx2_get_rx_csum,
7563         .set_rx_csum            = bnx2_set_rx_csum,
7564         .set_tx_csum            = bnx2_set_tx_csum,
7565         .set_sg                 = ethtool_op_set_sg,
7566         .set_tso                = bnx2_set_tso,
7567         .self_test              = bnx2_self_test,
7568         .get_strings            = bnx2_get_strings,
7569         .phys_id                = bnx2_phys_id,
7570         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7571         .get_sset_count         = bnx2_get_sset_count,
7572 };
7573
7574 /* Called with rtnl_lock */
7575 static int
7576 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7577 {
7578         struct mii_ioctl_data *data = if_mii(ifr);
7579         struct bnx2 *bp = netdev_priv(dev);
7580         int err;
7581
7582         switch(cmd) {
7583         case SIOCGMIIPHY:
7584                 data->phy_id = bp->phy_addr;
7585
7586                 /* fallthru */
7587         case SIOCGMIIREG: {
7588                 u32 mii_regval;
7589
7590                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7591                         return -EOPNOTSUPP;
7592
7593                 if (!netif_running(dev))
7594                         return -EAGAIN;
7595
7596                 spin_lock_bh(&bp->phy_lock);
7597                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7598                 spin_unlock_bh(&bp->phy_lock);
7599
7600                 data->val_out = mii_regval;
7601
7602                 return err;
7603         }
7604
7605         case SIOCSMIIREG:
7606                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7607                         return -EOPNOTSUPP;
7608
7609                 if (!netif_running(dev))
7610                         return -EAGAIN;
7611
7612                 spin_lock_bh(&bp->phy_lock);
7613                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7614                 spin_unlock_bh(&bp->phy_lock);
7615
7616                 return err;
7617
7618         default:
7619                 /* do nothing */
7620                 break;
7621         }
7622         return -EOPNOTSUPP;
7623 }
7624
7625 /* Called with rtnl_lock */
7626 static int
7627 bnx2_change_mac_addr(struct net_device *dev, void *p)
7628 {
7629         struct sockaddr *addr = p;
7630         struct bnx2 *bp = netdev_priv(dev);
7631
7632         if (!is_valid_ether_addr(addr->sa_data))
7633                 return -EINVAL;
7634
7635         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7636         if (netif_running(dev))
7637                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7638
7639         return 0;
7640 }
7641
7642 /* Called with rtnl_lock */
7643 static int
7644 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7645 {
7646         struct bnx2 *bp = netdev_priv(dev);
7647
7648         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7649                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7650                 return -EINVAL;
7651
7652         dev->mtu = new_mtu;
7653         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7654 }
7655
7656 #ifdef CONFIG_NET_POLL_CONTROLLER
7657 static void
7658 poll_bnx2(struct net_device *dev)
7659 {
7660         struct bnx2 *bp = netdev_priv(dev);
7661         int i;
7662
7663         for (i = 0; i < bp->irq_nvecs; i++) {
7664                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7665
7666                 disable_irq(irq->vector);
7667                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7668                 enable_irq(irq->vector);
7669         }
7670 }
7671 #endif
7672
7673 static void __devinit
7674 bnx2_get_5709_media(struct bnx2 *bp)
7675 {
7676         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7677         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7678         u32 strap;
7679
7680         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7681                 return;
7682         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7683                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7684                 return;
7685         }
7686
7687         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7688                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7689         else
7690                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7691
7692         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7693                 switch (strap) {
7694                 case 0x4:
7695                 case 0x5:
7696                 case 0x6:
7697                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7698                         return;
7699                 }
7700         } else {
7701                 switch (strap) {
7702                 case 0x1:
7703                 case 0x2:
7704                 case 0x4:
7705                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7706                         return;
7707                 }
7708         }
7709 }
7710
7711 static void __devinit
7712 bnx2_get_pci_speed(struct bnx2 *bp)
7713 {
7714         u32 reg;
7715
7716         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7717         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7718                 u32 clkreg;
7719
7720                 bp->flags |= BNX2_FLAG_PCIX;
7721
7722                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7723
7724                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7725                 switch (clkreg) {
7726                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7727                         bp->bus_speed_mhz = 133;
7728                         break;
7729
7730                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7731                         bp->bus_speed_mhz = 100;
7732                         break;
7733
7734                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7735                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7736                         bp->bus_speed_mhz = 66;
7737                         break;
7738
7739                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7740                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7741                         bp->bus_speed_mhz = 50;
7742                         break;
7743
7744                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7745                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7746                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7747                         bp->bus_speed_mhz = 33;
7748                         break;
7749                 }
7750         }
7751         else {
7752                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7753                         bp->bus_speed_mhz = 66;
7754                 else
7755                         bp->bus_speed_mhz = 33;
7756         }
7757
7758         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7759                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7760
7761 }
7762
7763 static void __devinit
7764 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7765 {
7766         int rc, i, j;
7767         u8 *data;
7768         unsigned int block_end, rosize, len;
7769
7770 #define BNX2_VPD_NVRAM_OFFSET   0x300
7771 #define BNX2_VPD_LEN            128
7772 #define BNX2_MAX_VER_SLEN       30
7773
7774         data = kmalloc(256, GFP_KERNEL);
7775         if (!data)
7776                 return;
7777
7778         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7779                              BNX2_VPD_LEN);
7780         if (rc)
7781                 goto vpd_done;
7782
7783         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7784                 data[i] = data[i + BNX2_VPD_LEN + 3];
7785                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7786                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7787                 data[i + 3] = data[i + BNX2_VPD_LEN];
7788         }
7789
7790         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7791         if (i < 0)
7792                 goto vpd_done;
7793
7794         rosize = pci_vpd_lrdt_size(&data[i]);
7795         i += PCI_VPD_LRDT_TAG_SIZE;
7796         block_end = i + rosize;
7797
7798         if (block_end > BNX2_VPD_LEN)
7799                 goto vpd_done;
7800
7801         j = pci_vpd_find_info_keyword(data, i, rosize,
7802                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7803         if (j < 0)
7804                 goto vpd_done;
7805
7806         len = pci_vpd_info_field_size(&data[j]);
7807
7808         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7809         if (j + len > block_end || len != 4 ||
7810             memcmp(&data[j], "1028", 4))
7811                 goto vpd_done;
7812
7813         j = pci_vpd_find_info_keyword(data, i, rosize,
7814                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7815         if (j < 0)
7816                 goto vpd_done;
7817
7818         len = pci_vpd_info_field_size(&data[j]);
7819
7820         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7821         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7822                 goto vpd_done;
7823
7824         memcpy(bp->fw_version, &data[j], len);
7825         bp->fw_version[len] = ' ';
7826
7827 vpd_done:
7828         kfree(data);
7829 }
7830
7831 static int __devinit
7832 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7833 {
7834         struct bnx2 *bp;
7835         unsigned long mem_len;
7836         int rc, i, j;
7837         u32 reg;
7838         u64 dma_mask, persist_dma_mask;
7839
7840         SET_NETDEV_DEV(dev, &pdev->dev);
7841         bp = netdev_priv(dev);
7842
7843         bp->flags = 0;
7844         bp->phy_flags = 0;
7845
7846         bp->temp_stats_blk =
7847                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7848
7849         if (bp->temp_stats_blk == NULL) {
7850                 rc = -ENOMEM;
7851                 goto err_out;
7852         }
7853
7854         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7855         rc = pci_enable_device(pdev);
7856         if (rc) {
7857                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7858                 goto err_out;
7859         }
7860
7861         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7862                 dev_err(&pdev->dev,
7863                         "Cannot find PCI device base address, aborting\n");
7864                 rc = -ENODEV;
7865                 goto err_out_disable;
7866         }
7867
7868         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7869         if (rc) {
7870                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7871                 goto err_out_disable;
7872         }
7873
7874         pci_set_master(pdev);
7875         pci_save_state(pdev);
7876
7877         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7878         if (bp->pm_cap == 0) {
7879                 dev_err(&pdev->dev,
7880                         "Cannot find power management capability, aborting\n");
7881                 rc = -EIO;
7882                 goto err_out_release;
7883         }
7884
7885         bp->dev = dev;
7886         bp->pdev = pdev;
7887
7888         spin_lock_init(&bp->phy_lock);
7889         spin_lock_init(&bp->indirect_lock);
7890 #ifdef BCM_CNIC
7891         mutex_init(&bp->cnic_lock);
7892 #endif
7893         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7894
7895         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7896         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7897         dev->mem_end = dev->mem_start + mem_len;
7898         dev->irq = pdev->irq;
7899
7900         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7901
7902         if (!bp->regview) {
7903                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7904                 rc = -ENOMEM;
7905                 goto err_out_release;
7906         }
7907
7908         /* Configure byte swap and enable write to the reg_window registers.
7909          * Rely on CPU to do target byte swapping on big endian systems
7910          * The chip's target access swapping will not swap all accesses
7911          */
7912         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7913                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7914                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7915
7916         bnx2_set_power_state(bp, PCI_D0);
7917
7918         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7919
7920         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7921                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7922                         dev_err(&pdev->dev,
7923                                 "Cannot find PCIE capability, aborting\n");
7924                         rc = -EIO;
7925                         goto err_out_unmap;
7926                 }
7927                 bp->flags |= BNX2_FLAG_PCIE;
7928                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7929                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7930         } else {
7931                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7932                 if (bp->pcix_cap == 0) {
7933                         dev_err(&pdev->dev,
7934                                 "Cannot find PCIX capability, aborting\n");
7935                         rc = -EIO;
7936                         goto err_out_unmap;
7937                 }
7938                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7939         }
7940
7941         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7942                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7943                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7944         }
7945
7946         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7947                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7948                         bp->flags |= BNX2_FLAG_MSI_CAP;
7949         }
7950
7951         /* 5708 cannot support DMA addresses > 40-bit.  */
7952         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7953                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7954         else
7955                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7956
7957         /* Configure DMA attributes. */
7958         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7959                 dev->features |= NETIF_F_HIGHDMA;
7960                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7961                 if (rc) {
7962                         dev_err(&pdev->dev,
7963                                 "pci_set_consistent_dma_mask failed, aborting\n");
7964                         goto err_out_unmap;
7965                 }
7966         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7967                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7968                 goto err_out_unmap;
7969         }
7970
7971         if (!(bp->flags & BNX2_FLAG_PCIE))
7972                 bnx2_get_pci_speed(bp);
7973
7974         /* 5706A0 may falsely detect SERR and PERR. */
7975         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7976                 reg = REG_RD(bp, PCI_COMMAND);
7977                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7978                 REG_WR(bp, PCI_COMMAND, reg);
7979         }
7980         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7981                 !(bp->flags & BNX2_FLAG_PCIX)) {
7982
7983                 dev_err(&pdev->dev,
7984                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7985                 goto err_out_unmap;
7986         }
7987
7988         bnx2_init_nvram(bp);
7989
7990         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7991
7992         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7993             BNX2_SHM_HDR_SIGNATURE_SIG) {
7994                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7995
7996                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7997         } else
7998                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7999
8000         /* Get the permanent MAC address.  First we need to make sure the
8001          * firmware is actually running.
8002          */
8003         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8004
8005         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8006             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8007                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8008                 rc = -ENODEV;
8009                 goto err_out_unmap;
8010         }
8011
8012         bnx2_read_vpd_fw_ver(bp);
8013
8014         j = strlen(bp->fw_version);
8015         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8016         for (i = 0; i < 3 && j < 24; i++) {
8017                 u8 num, k, skip0;
8018
8019                 if (i == 0) {
8020                         bp->fw_version[j++] = 'b';
8021                         bp->fw_version[j++] = 'c';
8022                         bp->fw_version[j++] = ' ';
8023                 }
8024                 num = (u8) (reg >> (24 - (i * 8)));
8025                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8026                         if (num >= k || !skip0 || k == 1) {
8027                                 bp->fw_version[j++] = (num / k) + '0';
8028                                 skip0 = 0;
8029                         }
8030                 }
8031                 if (i != 2)
8032                         bp->fw_version[j++] = '.';
8033         }
8034         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8035         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8036                 bp->wol = 1;
8037
8038         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8039                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8040
8041                 for (i = 0; i < 30; i++) {
8042                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8043                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8044                                 break;
8045                         msleep(10);
8046                 }
8047         }
8048         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8049         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8050         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8051             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8052                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8053
8054                 if (j < 32)
8055                         bp->fw_version[j++] = ' ';
8056                 for (i = 0; i < 3 && j < 28; i++) {
8057                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8058                         reg = swab32(reg);
8059                         memcpy(&bp->fw_version[j], &reg, 4);
8060                         j += 4;
8061                 }
8062         }
8063
8064         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8065         bp->mac_addr[0] = (u8) (reg >> 8);
8066         bp->mac_addr[1] = (u8) reg;
8067
8068         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8069         bp->mac_addr[2] = (u8) (reg >> 24);
8070         bp->mac_addr[3] = (u8) (reg >> 16);
8071         bp->mac_addr[4] = (u8) (reg >> 8);
8072         bp->mac_addr[5] = (u8) reg;
8073
8074         bp->tx_ring_size = MAX_TX_DESC_CNT;
8075         bnx2_set_rx_ring_size(bp, 255);
8076
8077         bp->rx_csum = 1;
8078
8079         bp->tx_quick_cons_trip_int = 2;
8080         bp->tx_quick_cons_trip = 20;
8081         bp->tx_ticks_int = 18;
8082         bp->tx_ticks = 80;
8083
8084         bp->rx_quick_cons_trip_int = 2;
8085         bp->rx_quick_cons_trip = 12;
8086         bp->rx_ticks_int = 18;
8087         bp->rx_ticks = 18;
8088
8089         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8090
8091         bp->current_interval = BNX2_TIMER_INTERVAL;
8092
8093         bp->phy_addr = 1;
8094
8095         /* Disable WOL support if we are running on a SERDES chip. */
8096         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8097                 bnx2_get_5709_media(bp);
8098         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8099                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8100
8101         bp->phy_port = PORT_TP;
8102         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8103                 bp->phy_port = PORT_FIBRE;
8104                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8105                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8106                         bp->flags |= BNX2_FLAG_NO_WOL;
8107                         bp->wol = 0;
8108                 }
8109                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8110                         /* Don't do parallel detect on this board because of
8111                          * some board problems.  The link will not go down
8112                          * if we do parallel detect.
8113                          */
8114                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8115                             pdev->subsystem_device == 0x310c)
8116                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8117                 } else {
8118                         bp->phy_addr = 2;
8119                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8120                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8121                 }
8122         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8123                    CHIP_NUM(bp) == CHIP_NUM_5708)
8124                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8125         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8126                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8127                   CHIP_REV(bp) == CHIP_REV_Bx))
8128                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8129
8130         bnx2_init_fw_cap(bp);
8131
8132         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8133             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8134             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8135             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8136                 bp->flags |= BNX2_FLAG_NO_WOL;
8137                 bp->wol = 0;
8138         }
8139
8140         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8141                 bp->tx_quick_cons_trip_int =
8142                         bp->tx_quick_cons_trip;
8143                 bp->tx_ticks_int = bp->tx_ticks;
8144                 bp->rx_quick_cons_trip_int =
8145                         bp->rx_quick_cons_trip;
8146                 bp->rx_ticks_int = bp->rx_ticks;
8147                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8148                 bp->com_ticks_int = bp->com_ticks;
8149                 bp->cmd_ticks_int = bp->cmd_ticks;
8150         }
8151
8152         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8153          *
8154          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8155          * with byte enables disabled on the unused 32-bit word.  This is legal
8156          * but causes problems on the AMD 8132 which will eventually stop
8157          * responding after a while.
8158          *
8159          * AMD believes this incompatibility is unique to the 5706, and
8160          * prefers to locally disable MSI rather than globally disabling it.
8161          */
8162         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8163                 struct pci_dev *amd_8132 = NULL;
8164
8165                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8166                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8167                                                   amd_8132))) {
8168
8169                         if (amd_8132->revision >= 0x10 &&
8170                             amd_8132->revision <= 0x13) {
8171                                 disable_msi = 1;
8172                                 pci_dev_put(amd_8132);
8173                                 break;
8174                         }
8175                 }
8176         }
8177
8178         bnx2_set_default_link(bp);
8179         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8180
8181         init_timer(&bp->timer);
8182         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8183         bp->timer.data = (unsigned long) bp;
8184         bp->timer.function = bnx2_timer;
8185
8186         return 0;
8187
8188 err_out_unmap:
8189         if (bp->regview) {
8190                 iounmap(bp->regview);
8191                 bp->regview = NULL;
8192         }
8193
8194 err_out_release:
8195         pci_release_regions(pdev);
8196
8197 err_out_disable:
8198         pci_disable_device(pdev);
8199         pci_set_drvdata(pdev, NULL);
8200
8201 err_out:
8202         return rc;
8203 }
8204
8205 static char * __devinit
8206 bnx2_bus_string(struct bnx2 *bp, char *str)
8207 {
8208         char *s = str;
8209
8210         if (bp->flags & BNX2_FLAG_PCIE) {
8211                 s += sprintf(s, "PCI Express");
8212         } else {
8213                 s += sprintf(s, "PCI");
8214                 if (bp->flags & BNX2_FLAG_PCIX)
8215                         s += sprintf(s, "-X");
8216                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8217                         s += sprintf(s, " 32-bit");
8218                 else
8219                         s += sprintf(s, " 64-bit");
8220                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8221         }
8222         return str;
8223 }
8224
8225 static void __devinit
8226 bnx2_init_napi(struct bnx2 *bp)
8227 {
8228         int i;
8229
8230         for (i = 0; i < bp->irq_nvecs; i++) {
8231                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8232                 int (*poll)(struct napi_struct *, int);
8233
8234                 if (i == 0)
8235                         poll = bnx2_poll;
8236                 else
8237                         poll = bnx2_poll_msix;
8238
8239                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8240                 bnapi->bp = bp;
8241         }
8242 }
8243
8244 static const struct net_device_ops bnx2_netdev_ops = {
8245         .ndo_open               = bnx2_open,
8246         .ndo_start_xmit         = bnx2_start_xmit,
8247         .ndo_stop               = bnx2_close,
8248         .ndo_get_stats          = bnx2_get_stats,
8249         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8250         .ndo_do_ioctl           = bnx2_ioctl,
8251         .ndo_validate_addr      = eth_validate_addr,
8252         .ndo_set_mac_address    = bnx2_change_mac_addr,
8253         .ndo_change_mtu         = bnx2_change_mtu,
8254         .ndo_tx_timeout         = bnx2_tx_timeout,
8255 #ifdef BCM_VLAN
8256         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8257 #endif
8258 #ifdef CONFIG_NET_POLL_CONTROLLER
8259         .ndo_poll_controller    = poll_bnx2,
8260 #endif
8261 };
8262
8263 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8264 {
8265 #ifdef BCM_VLAN
8266         dev->vlan_features |= flags;
8267 #endif
8268 }
8269
8270 static int __devinit
8271 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8272 {
8273         static int version_printed = 0;
8274         struct net_device *dev = NULL;
8275         struct bnx2 *bp;
8276         int rc;
8277         char str[40];
8278
8279         if (version_printed++ == 0)
8280                 pr_info("%s", version);
8281
8282         /* dev zeroed in init_etherdev */
8283         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8284
8285         if (!dev)
8286                 return -ENOMEM;
8287
8288         rc = bnx2_init_board(pdev, dev);
8289         if (rc < 0) {
8290                 free_netdev(dev);
8291                 return rc;
8292         }
8293
8294         dev->netdev_ops = &bnx2_netdev_ops;
8295         dev->watchdog_timeo = TX_TIMEOUT;
8296         dev->ethtool_ops = &bnx2_ethtool_ops;
8297
8298         bp = netdev_priv(dev);
8299
8300         pci_set_drvdata(pdev, dev);
8301
8302         rc = bnx2_request_firmware(bp);
8303         if (rc)
8304                 goto error;
8305
8306         memcpy(dev->dev_addr, bp->mac_addr, 6);
8307         memcpy(dev->perm_addr, bp->mac_addr, 6);
8308
8309         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8310         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8311         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8312                 dev->features |= NETIF_F_IPV6_CSUM;
8313                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8314         }
8315 #ifdef BCM_VLAN
8316         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8317 #endif
8318         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8319         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8320         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8321                 dev->features |= NETIF_F_TSO6;
8322                 vlan_features_add(dev, NETIF_F_TSO6);
8323         }
8324         if ((rc = register_netdev(dev))) {
8325                 dev_err(&pdev->dev, "Cannot register net device\n");
8326                 goto error;
8327         }
8328
8329         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8330                     board_info[ent->driver_data].name,
8331                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8332                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8333                     bnx2_bus_string(bp, str),
8334                     dev->base_addr,
8335                     bp->pdev->irq, dev->dev_addr);
8336
8337         return 0;
8338
8339 error:
8340         if (bp->mips_firmware)
8341                 release_firmware(bp->mips_firmware);
8342         if (bp->rv2p_firmware)
8343                 release_firmware(bp->rv2p_firmware);
8344
8345         if (bp->regview)
8346                 iounmap(bp->regview);
8347         pci_release_regions(pdev);
8348         pci_disable_device(pdev);
8349         pci_set_drvdata(pdev, NULL);
8350         free_netdev(dev);
8351         return rc;
8352 }
8353
8354 static void __devexit
8355 bnx2_remove_one(struct pci_dev *pdev)
8356 {
8357         struct net_device *dev = pci_get_drvdata(pdev);
8358         struct bnx2 *bp = netdev_priv(dev);
8359
8360         flush_scheduled_work();
8361
8362         unregister_netdev(dev);
8363
8364         if (bp->mips_firmware)
8365                 release_firmware(bp->mips_firmware);
8366         if (bp->rv2p_firmware)
8367                 release_firmware(bp->rv2p_firmware);
8368
8369         if (bp->regview)
8370                 iounmap(bp->regview);
8371
8372         kfree(bp->temp_stats_blk);
8373
8374         free_netdev(dev);
8375         pci_release_regions(pdev);
8376         pci_disable_device(pdev);
8377         pci_set_drvdata(pdev, NULL);
8378 }
8379
8380 static int
8381 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8382 {
8383         struct net_device *dev = pci_get_drvdata(pdev);
8384         struct bnx2 *bp = netdev_priv(dev);
8385
8386         /* PCI register 4 needs to be saved whether netif_running() or not.
8387          * MSI address and data need to be saved if using MSI and
8388          * netif_running().
8389          */
8390         pci_save_state(pdev);
8391         if (!netif_running(dev))
8392                 return 0;
8393
8394         flush_scheduled_work();
8395         bnx2_netif_stop(bp, true);
8396         netif_device_detach(dev);
8397         del_timer_sync(&bp->timer);
8398         bnx2_shutdown_chip(bp);
8399         bnx2_free_skbs(bp);
8400         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8401         return 0;
8402 }
8403
8404 static int
8405 bnx2_resume(struct pci_dev *pdev)
8406 {
8407         struct net_device *dev = pci_get_drvdata(pdev);
8408         struct bnx2 *bp = netdev_priv(dev);
8409
8410         pci_restore_state(pdev);
8411         if (!netif_running(dev))
8412                 return 0;
8413
8414         bnx2_set_power_state(bp, PCI_D0);
8415         netif_device_attach(dev);
8416         bnx2_init_nic(bp, 1);
8417         bnx2_netif_start(bp, true);
8418         return 0;
8419 }
8420
8421 /**
8422  * bnx2_io_error_detected - called when PCI error is detected
8423  * @pdev: Pointer to PCI device
8424  * @state: The current pci connection state
8425  *
8426  * This function is called after a PCI bus error affecting
8427  * this device has been detected.
8428  */
8429 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8430                                                pci_channel_state_t state)
8431 {
8432         struct net_device *dev = pci_get_drvdata(pdev);
8433         struct bnx2 *bp = netdev_priv(dev);
8434
8435         rtnl_lock();
8436         netif_device_detach(dev);
8437
8438         if (state == pci_channel_io_perm_failure) {
8439                 rtnl_unlock();
8440                 return PCI_ERS_RESULT_DISCONNECT;
8441         }
8442
8443         if (netif_running(dev)) {
8444                 bnx2_netif_stop(bp, true);
8445                 del_timer_sync(&bp->timer);
8446                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8447         }
8448
8449         pci_disable_device(pdev);
8450         rtnl_unlock();
8451
8452         /* Request a slot slot reset. */
8453         return PCI_ERS_RESULT_NEED_RESET;
8454 }
8455
8456 /**
8457  * bnx2_io_slot_reset - called after the pci bus has been reset.
8458  * @pdev: Pointer to PCI device
8459  *
8460  * Restart the card from scratch, as if from a cold-boot.
8461  */
8462 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8463 {
8464         struct net_device *dev = pci_get_drvdata(pdev);
8465         struct bnx2 *bp = netdev_priv(dev);
8466
8467         rtnl_lock();
8468         if (pci_enable_device(pdev)) {
8469                 dev_err(&pdev->dev,
8470                         "Cannot re-enable PCI device after reset\n");
8471                 rtnl_unlock();
8472                 return PCI_ERS_RESULT_DISCONNECT;
8473         }
8474         pci_set_master(pdev);
8475         pci_restore_state(pdev);
8476         pci_save_state(pdev);
8477
8478         if (netif_running(dev)) {
8479                 bnx2_set_power_state(bp, PCI_D0);
8480                 bnx2_init_nic(bp, 1);
8481         }
8482
8483         rtnl_unlock();
8484         return PCI_ERS_RESULT_RECOVERED;
8485 }
8486
8487 /**
8488  * bnx2_io_resume - called when traffic can start flowing again.
8489  * @pdev: Pointer to PCI device
8490  *
8491  * This callback is called when the error recovery driver tells us that
8492  * its OK to resume normal operation.
8493  */
8494 static void bnx2_io_resume(struct pci_dev *pdev)
8495 {
8496         struct net_device *dev = pci_get_drvdata(pdev);
8497         struct bnx2 *bp = netdev_priv(dev);
8498
8499         rtnl_lock();
8500         if (netif_running(dev))
8501                 bnx2_netif_start(bp, true);
8502
8503         netif_device_attach(dev);
8504         rtnl_unlock();
8505 }
8506
8507 static struct pci_error_handlers bnx2_err_handler = {
8508         .error_detected = bnx2_io_error_detected,
8509         .slot_reset     = bnx2_io_slot_reset,
8510         .resume         = bnx2_io_resume,
8511 };
8512
8513 static struct pci_driver bnx2_pci_driver = {
8514         .name           = DRV_MODULE_NAME,
8515         .id_table       = bnx2_pci_tbl,
8516         .probe          = bnx2_init_one,
8517         .remove         = __devexit_p(bnx2_remove_one),
8518         .suspend        = bnx2_suspend,
8519         .resume         = bnx2_resume,
8520         .err_handler    = &bnx2_err_handler,
8521 };
8522
8523 static int __init bnx2_init(void)
8524 {
8525         return pci_register_driver(&bnx2_pci_driver);
8526 }
8527
8528 static void __exit bnx2_cleanup(void)
8529 {
8530         pci_unregister_driver(&bnx2_pci_driver);
8531 }
8532
8533 module_init(bnx2_init);
8534 module_exit(bnx2_cleanup);
8535
8536
8537