bnx2: Fix lost MSI-X problem on 5709 NICs.
[safe/jmp/linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.8"
62 #define DRV_MODULE_RELDATE      "Feb 15, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         smp_mb();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = bp->cnic_ops;
439         if (c_ops) {
440                 info.cmd = CNIC_CTL_STOP_CMD;
441                 c_ops->cnic_ctl(bp->cnic_data, &info);
442         }
443         mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449         struct cnic_ops *c_ops;
450         struct cnic_ctl_info info;
451
452         mutex_lock(&bp->cnic_lock);
453         c_ops = bp->cnic_ops;
454         if (c_ops) {
455                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458                         bnapi->cnic_tag = bnapi->last_status_idx;
459                 }
460                 info.cmd = CNIC_CTL_START_CMD;
461                 c_ops->cnic_ctl(bp->cnic_data, &info);
462         }
463         mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483         u32 val1;
484         int i, ret;
485
486         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493                 udelay(40);
494         }
495
496         val1 = (bp->phy_addr << 21) | (reg << 16) |
497                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498                 BNX2_EMAC_MDIO_COMM_START_BUSY;
499         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501         for (i = 0; i < 50; i++) {
502                 udelay(10);
503
504                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506                         udelay(5);
507
508                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511                         break;
512                 }
513         }
514
515         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516                 *val = 0x0;
517                 ret = -EBUSY;
518         }
519         else {
520                 *val = val1;
521                 ret = 0;
522         }
523
524         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531                 udelay(40);
532         }
533
534         return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540         u32 val1;
541         int i, ret;
542
543         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550                 udelay(40);
551         }
552
553         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558         for (i = 0; i < 50; i++) {
559                 udelay(10);
560
561                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563                         udelay(5);
564                         break;
565                 }
566         }
567
568         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569                 ret = -EBUSY;
570         else
571                 ret = 0;
572
573         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580                 udelay(40);
581         }
582
583         return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589         int i;
590         struct bnx2_napi *bnapi;
591
592         for (i = 0; i < bp->irq_nvecs; i++) {
593                 bnapi = &bp->bnx2_napi[i];
594                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596         }
597         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603         int i;
604         struct bnx2_napi *bnapi;
605
606         for (i = 0; i < bp->irq_nvecs; i++) {
607                 bnapi = &bp->bnx2_napi[i];
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612                        bnapi->last_status_idx);
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        bnapi->last_status_idx);
617         }
618         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624         int i;
625
626         atomic_inc(&bp->intr_sem);
627         if (!netif_running(bp->dev))
628                 return;
629
630         bnx2_disable_int(bp);
631         for (i = 0; i < bp->irq_nvecs; i++)
632                 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638         int i;
639
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp)
655 {
656         bnx2_cnic_stop(bp);
657         if (netif_running(bp->dev)) {
658                 int i;
659
660                 bnx2_napi_disable(bp);
661                 netif_tx_disable(bp->dev);
662                 /* prevent tx timeout */
663                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
664                         struct netdev_queue *txq;
665
666                         txq = netdev_get_tx_queue(bp->dev, i);
667                         txq->trans_start = jiffies;
668                 }
669         }
670         bnx2_disable_int_sync(bp);
671 }
672
673 static void
674 bnx2_netif_start(struct bnx2 *bp)
675 {
676         if (atomic_dec_and_test(&bp->intr_sem)) {
677                 if (netif_running(bp->dev)) {
678                         netif_tx_wake_all_queues(bp->dev);
679                         bnx2_napi_enable(bp);
680                         bnx2_enable_int(bp);
681                         bnx2_cnic_start(bp);
682                 }
683         }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689         int i;
690
691         for (i = 0; i < bp->num_tx_rings; i++) {
692                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695                 if (txr->tx_desc_ring) {
696                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
697                                             txr->tx_desc_ring,
698                                             txr->tx_desc_mapping);
699                         txr->tx_desc_ring = NULL;
700                 }
701                 kfree(txr->tx_buf_ring);
702                 txr->tx_buf_ring = NULL;
703         }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709         int i;
710
711         for (i = 0; i < bp->num_rx_rings; i++) {
712                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714                 int j;
715
716                 for (j = 0; j < bp->rx_max_ring; j++) {
717                         if (rxr->rx_desc_ring[j])
718                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
719                                                     rxr->rx_desc_ring[j],
720                                                     rxr->rx_desc_mapping[j]);
721                         rxr->rx_desc_ring[j] = NULL;
722                 }
723                 vfree(rxr->rx_buf_ring);
724                 rxr->rx_buf_ring = NULL;
725
726                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727                         if (rxr->rx_pg_desc_ring[j])
728                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
729                                                     rxr->rx_pg_desc_ring[j],
730                                                     rxr->rx_pg_desc_mapping[j]);
731                         rxr->rx_pg_desc_ring[j] = NULL;
732                 }
733                 vfree(rxr->rx_pg_ring);
734                 rxr->rx_pg_ring = NULL;
735         }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741         int i;
742
743         for (i = 0; i < bp->num_tx_rings; i++) {
744                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748                 if (txr->tx_buf_ring == NULL)
749                         return -ENOMEM;
750
751                 txr->tx_desc_ring =
752                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
753                                              &txr->tx_desc_mapping);
754                 if (txr->tx_desc_ring == NULL)
755                         return -ENOMEM;
756         }
757         return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763         int i;
764
765         for (i = 0; i < bp->num_rx_rings; i++) {
766                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768                 int j;
769
770                 rxr->rx_buf_ring =
771                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772                 if (rxr->rx_buf_ring == NULL)
773                         return -ENOMEM;
774
775                 memset(rxr->rx_buf_ring, 0,
776                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
777
778                 for (j = 0; j < bp->rx_max_ring; j++) {
779                         rxr->rx_desc_ring[j] =
780                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
781                                                      &rxr->rx_desc_mapping[j]);
782                         if (rxr->rx_desc_ring[j] == NULL)
783                                 return -ENOMEM;
784
785                 }
786
787                 if (bp->rx_pg_ring_size) {
788                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
789                                                   bp->rx_max_pg_ring);
790                         if (rxr->rx_pg_ring == NULL)
791                                 return -ENOMEM;
792
793                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794                                bp->rx_max_pg_ring);
795                 }
796
797                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798                         rxr->rx_pg_desc_ring[j] =
799                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
800                                                 &rxr->rx_pg_desc_mapping[j]);
801                         if (rxr->rx_pg_desc_ring[j] == NULL)
802                                 return -ENOMEM;
803
804                 }
805         }
806         return 0;
807 }
808
809 static void
810 bnx2_free_mem(struct bnx2 *bp)
811 {
812         int i;
813         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814
815         bnx2_free_tx_mem(bp);
816         bnx2_free_rx_mem(bp);
817
818         for (i = 0; i < bp->ctx_pages; i++) {
819                 if (bp->ctx_blk[i]) {
820                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
821                                             bp->ctx_blk[i],
822                                             bp->ctx_blk_mapping[i]);
823                         bp->ctx_blk[i] = NULL;
824                 }
825         }
826         if (bnapi->status_blk.msi) {
827                 pci_free_consistent(bp->pdev, bp->status_stats_size,
828                                     bnapi->status_blk.msi,
829                                     bp->status_blk_mapping);
830                 bnapi->status_blk.msi = NULL;
831                 bp->stats_blk = NULL;
832         }
833 }
834
835 static int
836 bnx2_alloc_mem(struct bnx2 *bp)
837 {
838         int i, status_blk_size, err;
839         struct bnx2_napi *bnapi;
840         void *status_blk;
841
842         /* Combine status and statistics blocks into one allocation. */
843         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
844         if (bp->flags & BNX2_FLAG_MSIX_CAP)
845                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
846                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
847         bp->status_stats_size = status_blk_size +
848                                 sizeof(struct statistics_block);
849
850         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
851                                           &bp->status_blk_mapping);
852         if (status_blk == NULL)
853                 goto alloc_mem_err;
854
855         memset(status_blk, 0, bp->status_stats_size);
856
857         bnapi = &bp->bnx2_napi[0];
858         bnapi->status_blk.msi = status_blk;
859         bnapi->hw_tx_cons_ptr =
860                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
861         bnapi->hw_rx_cons_ptr =
862                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
863         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
864                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
865                         struct status_block_msix *sblk;
866
867                         bnapi = &bp->bnx2_napi[i];
868
869                         sblk = (void *) (status_blk +
870                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
871                         bnapi->status_blk.msix = sblk;
872                         bnapi->hw_tx_cons_ptr =
873                                 &sblk->status_tx_quick_consumer_index;
874                         bnapi->hw_rx_cons_ptr =
875                                 &sblk->status_rx_quick_consumer_index;
876                         bnapi->int_num = i << 24;
877                 }
878         }
879
880         bp->stats_blk = status_blk + status_blk_size;
881
882         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883
884         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
885                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
886                 if (bp->ctx_pages == 0)
887                         bp->ctx_pages = 1;
888                 for (i = 0; i < bp->ctx_pages; i++) {
889                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
890                                                 BCM_PAGE_SIZE,
891                                                 &bp->ctx_blk_mapping[i]);
892                         if (bp->ctx_blk[i] == NULL)
893                                 goto alloc_mem_err;
894                 }
895         }
896
897         err = bnx2_alloc_rx_mem(bp);
898         if (err)
899                 goto alloc_mem_err;
900
901         err = bnx2_alloc_tx_mem(bp);
902         if (err)
903                 goto alloc_mem_err;
904
905         return 0;
906
907 alloc_mem_err:
908         bnx2_free_mem(bp);
909         return -ENOMEM;
910 }
911
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
914 {
915         u32 fw_link_status = 0;
916
917         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918                 return;
919
920         if (bp->link_up) {
921                 u32 bmsr;
922
923                 switch (bp->line_speed) {
924                 case SPEED_10:
925                         if (bp->duplex == DUPLEX_HALF)
926                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
927                         else
928                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
929                         break;
930                 case SPEED_100:
931                         if (bp->duplex == DUPLEX_HALF)
932                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
933                         else
934                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
935                         break;
936                 case SPEED_1000:
937                         if (bp->duplex == DUPLEX_HALF)
938                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939                         else
940                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941                         break;
942                 case SPEED_2500:
943                         if (bp->duplex == DUPLEX_HALF)
944                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945                         else
946                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947                         break;
948                 }
949
950                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
951
952                 if (bp->autoneg) {
953                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954
955                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957
958                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961                         else
962                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
963                 }
964         }
965         else
966                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967
968         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
969 }
970
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
973 {
974         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
975                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976                  "Copper"));
977 }
978
979 static void
980 bnx2_report_link(struct bnx2 *bp)
981 {
982         if (bp->link_up) {
983                 netif_carrier_on(bp->dev);
984                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985                             bnx2_xceiver_str(bp),
986                             bp->line_speed,
987                             bp->duplex == DUPLEX_FULL ? "full" : "half");
988
989                 if (bp->flow_ctrl) {
990                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
991                                 pr_cont(", receive ");
992                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
993                                         pr_cont("& transmit ");
994                         }
995                         else {
996                                 pr_cont(", transmit ");
997                         }
998                         pr_cont("flow control ON");
999                 }
1000                 pr_cont("\n");
1001         } else {
1002                 netif_carrier_off(bp->dev);
1003                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004                            bnx2_xceiver_str(bp));
1005         }
1006
1007         bnx2_report_fw_link(bp);
1008 }
1009
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1012 {
1013         u32 local_adv, remote_adv;
1014
1015         bp->flow_ctrl = 0;
1016         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1018
1019                 if (bp->duplex == DUPLEX_FULL) {
1020                         bp->flow_ctrl = bp->req_flow_ctrl;
1021                 }
1022                 return;
1023         }
1024
1025         if (bp->duplex != DUPLEX_FULL) {
1026                 return;
1027         }
1028
1029         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031                 u32 val;
1032
1033                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035                         bp->flow_ctrl |= FLOW_CTRL_TX;
1036                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037                         bp->flow_ctrl |= FLOW_CTRL_RX;
1038                 return;
1039         }
1040
1041         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045                 u32 new_local_adv = 0;
1046                 u32 new_remote_adv = 0;
1047
1048                 if (local_adv & ADVERTISE_1000XPAUSE)
1049                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1050                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052                 if (remote_adv & ADVERTISE_1000XPAUSE)
1053                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1056
1057                 local_adv = new_local_adv;
1058                 remote_adv = new_remote_adv;
1059         }
1060
1061         /* See Table 28B-3 of 802.3ab-1999 spec. */
1062         if (local_adv & ADVERTISE_PAUSE_CAP) {
1063                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066                         }
1067                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068                                 bp->flow_ctrl = FLOW_CTRL_RX;
1069                         }
1070                 }
1071                 else {
1072                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1074                         }
1075                 }
1076         }
1077         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1080
1081                         bp->flow_ctrl = FLOW_CTRL_TX;
1082                 }
1083         }
1084 }
1085
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1088 {
1089         u32 val, speed;
1090
1091         bp->link_up = 1;
1092
1093         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1096
1097         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098                 bp->line_speed = bp->req_line_speed;
1099                 bp->duplex = bp->req_duplex;
1100                 return 0;
1101         }
1102         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103         switch (speed) {
1104                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105                         bp->line_speed = SPEED_10;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108                         bp->line_speed = SPEED_100;
1109                         break;
1110                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112                         bp->line_speed = SPEED_1000;
1113                         break;
1114                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115                         bp->line_speed = SPEED_2500;
1116                         break;
1117         }
1118         if (val & MII_BNX2_GP_TOP_AN_FD)
1119                 bp->duplex = DUPLEX_FULL;
1120         else
1121                 bp->duplex = DUPLEX_HALF;
1122         return 0;
1123 }
1124
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1127 {
1128         u32 val;
1129
1130         bp->link_up = 1;
1131         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133                 case BCM5708S_1000X_STAT1_SPEED_10:
1134                         bp->line_speed = SPEED_10;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_100:
1137                         bp->line_speed = SPEED_100;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_1G:
1140                         bp->line_speed = SPEED_1000;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143                         bp->line_speed = SPEED_2500;
1144                         break;
1145         }
1146         if (val & BCM5708S_1000X_STAT1_FD)
1147                 bp->duplex = DUPLEX_FULL;
1148         else
1149                 bp->duplex = DUPLEX_HALF;
1150
1151         return 0;
1152 }
1153
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1156 {
1157         u32 bmcr, local_adv, remote_adv, common;
1158
1159         bp->link_up = 1;
1160         bp->line_speed = SPEED_1000;
1161
1162         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163         if (bmcr & BMCR_FULLDPLX) {
1164                 bp->duplex = DUPLEX_FULL;
1165         }
1166         else {
1167                 bp->duplex = DUPLEX_HALF;
1168         }
1169
1170         if (!(bmcr & BMCR_ANENABLE)) {
1171                 return 0;
1172         }
1173
1174         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1176
1177         common = local_adv & remote_adv;
1178         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1179
1180                 if (common & ADVERTISE_1000XFULL) {
1181                         bp->duplex = DUPLEX_FULL;
1182                 }
1183                 else {
1184                         bp->duplex = DUPLEX_HALF;
1185                 }
1186         }
1187
1188         return 0;
1189 }
1190
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1193 {
1194         u32 bmcr;
1195
1196         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197         if (bmcr & BMCR_ANENABLE) {
1198                 u32 local_adv, remote_adv, common;
1199
1200                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1202
1203                 common = local_adv & (remote_adv >> 2);
1204                 if (common & ADVERTISE_1000FULL) {
1205                         bp->line_speed = SPEED_1000;
1206                         bp->duplex = DUPLEX_FULL;
1207                 }
1208                 else if (common & ADVERTISE_1000HALF) {
1209                         bp->line_speed = SPEED_1000;
1210                         bp->duplex = DUPLEX_HALF;
1211                 }
1212                 else {
1213                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1215
1216                         common = local_adv & remote_adv;
1217                         if (common & ADVERTISE_100FULL) {
1218                                 bp->line_speed = SPEED_100;
1219                                 bp->duplex = DUPLEX_FULL;
1220                         }
1221                         else if (common & ADVERTISE_100HALF) {
1222                                 bp->line_speed = SPEED_100;
1223                                 bp->duplex = DUPLEX_HALF;
1224                         }
1225                         else if (common & ADVERTISE_10FULL) {
1226                                 bp->line_speed = SPEED_10;
1227                                 bp->duplex = DUPLEX_FULL;
1228                         }
1229                         else if (common & ADVERTISE_10HALF) {
1230                                 bp->line_speed = SPEED_10;
1231                                 bp->duplex = DUPLEX_HALF;
1232                         }
1233                         else {
1234                                 bp->line_speed = 0;
1235                                 bp->link_up = 0;
1236                         }
1237                 }
1238         }
1239         else {
1240                 if (bmcr & BMCR_SPEED100) {
1241                         bp->line_speed = SPEED_100;
1242                 }
1243                 else {
1244                         bp->line_speed = SPEED_10;
1245                 }
1246                 if (bmcr & BMCR_FULLDPLX) {
1247                         bp->duplex = DUPLEX_FULL;
1248                 }
1249                 else {
1250                         bp->duplex = DUPLEX_HALF;
1251                 }
1252         }
1253
1254         return 0;
1255 }
1256
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1259 {
1260         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1261
1262         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264         val |= 0x02 << 8;
1265
1266         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1267                 u32 lo_water, hi_water;
1268
1269                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1270                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1271                 else
1272                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1273                 if (lo_water >= bp->rx_ring_size)
1274                         lo_water = 0;
1275
1276                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1277
1278                 if (hi_water <= lo_water)
1279                         lo_water = 0;
1280
1281                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1282                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1283
1284                 if (hi_water > 0xf)
1285                         hi_water = 0xf;
1286                 else if (hi_water == 0)
1287                         lo_water = 0;
1288                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1289         }
1290         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1291 }
1292
1293 static void
1294 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1295 {
1296         int i;
1297         u32 cid;
1298
1299         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1300                 if (i == 1)
1301                         cid = RX_RSS_CID;
1302                 bnx2_init_rx_context(bp, cid);
1303         }
1304 }
1305
1306 static void
1307 bnx2_set_mac_link(struct bnx2 *bp)
1308 {
1309         u32 val;
1310
1311         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1312         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1313                 (bp->duplex == DUPLEX_HALF)) {
1314                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1315         }
1316
1317         /* Configure the EMAC mode register. */
1318         val = REG_RD(bp, BNX2_EMAC_MODE);
1319
1320         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1321                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1322                 BNX2_EMAC_MODE_25G_MODE);
1323
1324         if (bp->link_up) {
1325                 switch (bp->line_speed) {
1326                         case SPEED_10:
1327                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1328                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1329                                         break;
1330                                 }
1331                                 /* fall through */
1332                         case SPEED_100:
1333                                 val |= BNX2_EMAC_MODE_PORT_MII;
1334                                 break;
1335                         case SPEED_2500:
1336                                 val |= BNX2_EMAC_MODE_25G_MODE;
1337                                 /* fall through */
1338                         case SPEED_1000:
1339                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1340                                 break;
1341                 }
1342         }
1343         else {
1344                 val |= BNX2_EMAC_MODE_PORT_GMII;
1345         }
1346
1347         /* Set the MAC to operate in the appropriate duplex mode. */
1348         if (bp->duplex == DUPLEX_HALF)
1349                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1350         REG_WR(bp, BNX2_EMAC_MODE, val);
1351
1352         /* Enable/disable rx PAUSE. */
1353         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1354
1355         if (bp->flow_ctrl & FLOW_CTRL_RX)
1356                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1357         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1358
1359         /* Enable/disable tx PAUSE. */
1360         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1361         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1362
1363         if (bp->flow_ctrl & FLOW_CTRL_TX)
1364                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1365         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1366
1367         /* Acknowledge the interrupt. */
1368         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1369
1370         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1371                 bnx2_init_all_rx_contexts(bp);
1372 }
1373
1374 static void
1375 bnx2_enable_bmsr1(struct bnx2 *bp)
1376 {
1377         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378             (CHIP_NUM(bp) == CHIP_NUM_5709))
1379                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380                                MII_BNX2_BLK_ADDR_GP_STATUS);
1381 }
1382
1383 static void
1384 bnx2_disable_bmsr1(struct bnx2 *bp)
1385 {
1386         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1387             (CHIP_NUM(bp) == CHIP_NUM_5709))
1388                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1389                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1390 }
1391
1392 static int
1393 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1394 {
1395         u32 up1;
1396         int ret = 1;
1397
1398         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1399                 return 0;
1400
1401         if (bp->autoneg & AUTONEG_SPEED)
1402                 bp->advertising |= ADVERTISED_2500baseX_Full;
1403
1404         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1406
1407         bnx2_read_phy(bp, bp->mii_up1, &up1);
1408         if (!(up1 & BCM5708S_UP1_2G5)) {
1409                 up1 |= BCM5708S_UP1_2G5;
1410                 bnx2_write_phy(bp, bp->mii_up1, up1);
1411                 ret = 0;
1412         }
1413
1414         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1416                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1417
1418         return ret;
1419 }
1420
1421 static int
1422 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1423 {
1424         u32 up1;
1425         int ret = 0;
1426
1427         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1428                 return 0;
1429
1430         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1431                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1432
1433         bnx2_read_phy(bp, bp->mii_up1, &up1);
1434         if (up1 & BCM5708S_UP1_2G5) {
1435                 up1 &= ~BCM5708S_UP1_2G5;
1436                 bnx2_write_phy(bp, bp->mii_up1, up1);
1437                 ret = 1;
1438         }
1439
1440         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1441                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1443
1444         return ret;
1445 }
1446
1447 static void
1448 bnx2_enable_forced_2g5(struct bnx2 *bp)
1449 {
1450         u32 bmcr;
1451
1452         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453                 return;
1454
1455         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456                 u32 val;
1457
1458                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1460                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1461                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1463                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1464
1465                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468
1469         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1470                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1471                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1472         } else {
1473                 return;
1474         }
1475
1476         if (bp->autoneg & AUTONEG_SPEED) {
1477                 bmcr &= ~BMCR_ANENABLE;
1478                 if (bp->req_duplex == DUPLEX_FULL)
1479                         bmcr |= BMCR_FULLDPLX;
1480         }
1481         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1482 }
1483
1484 static void
1485 bnx2_disable_forced_2g5(struct bnx2 *bp)
1486 {
1487         u32 bmcr;
1488
1489         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490                 return;
1491
1492         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1493                 u32 val;
1494
1495                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1497                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1498                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1499                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500
1501                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504
1505         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1506                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1508         } else {
1509                 return;
1510         }
1511
1512         if (bp->autoneg & AUTONEG_SPEED)
1513                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1514         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1515 }
1516
1517 static void
1518 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1519 {
1520         u32 val;
1521
1522         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1523         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1524         if (start)
1525                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1526         else
1527                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1528 }
1529
1530 static int
1531 bnx2_set_link(struct bnx2 *bp)
1532 {
1533         u32 bmsr;
1534         u8 link_up;
1535
1536         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1537                 bp->link_up = 1;
1538                 return 0;
1539         }
1540
1541         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542                 return 0;
1543
1544         link_up = bp->link_up;
1545
1546         bnx2_enable_bmsr1(bp);
1547         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1548         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1549         bnx2_disable_bmsr1(bp);
1550
1551         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1552             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1553                 u32 val, an_dbg;
1554
1555                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1556                         bnx2_5706s_force_link_dn(bp, 0);
1557                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1558                 }
1559                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1560
1561                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1562                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1563                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1564
1565                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1566                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1567                         bmsr |= BMSR_LSTATUS;
1568                 else
1569                         bmsr &= ~BMSR_LSTATUS;
1570         }
1571
1572         if (bmsr & BMSR_LSTATUS) {
1573                 bp->link_up = 1;
1574
1575                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1576                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1577                                 bnx2_5706s_linkup(bp);
1578                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1579                                 bnx2_5708s_linkup(bp);
1580                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1581                                 bnx2_5709s_linkup(bp);
1582                 }
1583                 else {
1584                         bnx2_copper_linkup(bp);
1585                 }
1586                 bnx2_resolve_flow_ctrl(bp);
1587         }
1588         else {
1589                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1590                     (bp->autoneg & AUTONEG_SPEED))
1591                         bnx2_disable_forced_2g5(bp);
1592
1593                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1594                         u32 bmcr;
1595
1596                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1597                         bmcr |= BMCR_ANENABLE;
1598                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1599
1600                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1601                 }
1602                 bp->link_up = 0;
1603         }
1604
1605         if (bp->link_up != link_up) {
1606                 bnx2_report_link(bp);
1607         }
1608
1609         bnx2_set_mac_link(bp);
1610
1611         return 0;
1612 }
1613
1614 static int
1615 bnx2_reset_phy(struct bnx2 *bp)
1616 {
1617         int i;
1618         u32 reg;
1619
1620         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1621
1622 #define PHY_RESET_MAX_WAIT 100
1623         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1624                 udelay(10);
1625
1626                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1627                 if (!(reg & BMCR_RESET)) {
1628                         udelay(20);
1629                         break;
1630                 }
1631         }
1632         if (i == PHY_RESET_MAX_WAIT) {
1633                 return -EBUSY;
1634         }
1635         return 0;
1636 }
1637
1638 static u32
1639 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1640 {
1641         u32 adv = 0;
1642
1643         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1644                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1645
1646                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1647                         adv = ADVERTISE_1000XPAUSE;
1648                 }
1649                 else {
1650                         adv = ADVERTISE_PAUSE_CAP;
1651                 }
1652         }
1653         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1654                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1655                         adv = ADVERTISE_1000XPSE_ASYM;
1656                 }
1657                 else {
1658                         adv = ADVERTISE_PAUSE_ASYM;
1659                 }
1660         }
1661         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1662                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1663                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1664                 }
1665                 else {
1666                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667                 }
1668         }
1669         return adv;
1670 }
1671
1672 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1673
1674 static int
1675 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1676 __releases(&bp->phy_lock)
1677 __acquires(&bp->phy_lock)
1678 {
1679         u32 speed_arg = 0, pause_adv;
1680
1681         pause_adv = bnx2_phy_get_pause_adv(bp);
1682
1683         if (bp->autoneg & AUTONEG_SPEED) {
1684                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1685                 if (bp->advertising & ADVERTISED_10baseT_Half)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1687                 if (bp->advertising & ADVERTISED_10baseT_Full)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1689                 if (bp->advertising & ADVERTISED_100baseT_Half)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1691                 if (bp->advertising & ADVERTISED_100baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1696                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697         } else {
1698                 if (bp->req_line_speed == SPEED_2500)
1699                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1700                 else if (bp->req_line_speed == SPEED_1000)
1701                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1702                 else if (bp->req_line_speed == SPEED_100) {
1703                         if (bp->req_duplex == DUPLEX_FULL)
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705                         else
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1707                 } else if (bp->req_line_speed == SPEED_10) {
1708                         if (bp->req_duplex == DUPLEX_FULL)
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710                         else
1711                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712                 }
1713         }
1714
1715         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1716                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1717         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1719
1720         if (port == PORT_TP)
1721                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1722                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1723
1724         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1725
1726         spin_unlock_bh(&bp->phy_lock);
1727         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1728         spin_lock_bh(&bp->phy_lock);
1729
1730         return 0;
1731 }
1732
1733 static int
1734 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1735 __releases(&bp->phy_lock)
1736 __acquires(&bp->phy_lock)
1737 {
1738         u32 adv, bmcr;
1739         u32 new_adv = 0;
1740
1741         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1742                 return (bnx2_setup_remote_phy(bp, port));
1743
1744         if (!(bp->autoneg & AUTONEG_SPEED)) {
1745                 u32 new_bmcr;
1746                 int force_link_down = 0;
1747
1748                 if (bp->req_line_speed == SPEED_2500) {
1749                         if (!bnx2_test_and_enable_2g5(bp))
1750                                 force_link_down = 1;
1751                 } else if (bp->req_line_speed == SPEED_1000) {
1752                         if (bnx2_test_and_disable_2g5(bp))
1753                                 force_link_down = 1;
1754                 }
1755                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1756                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1757
1758                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1759                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1760                 new_bmcr |= BMCR_SPEED1000;
1761
1762                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1763                         if (bp->req_line_speed == SPEED_2500)
1764                                 bnx2_enable_forced_2g5(bp);
1765                         else if (bp->req_line_speed == SPEED_1000) {
1766                                 bnx2_disable_forced_2g5(bp);
1767                                 new_bmcr &= ~0x2000;
1768                         }
1769
1770                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1771                         if (bp->req_line_speed == SPEED_2500)
1772                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1773                         else
1774                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1775                 }
1776
1777                 if (bp->req_duplex == DUPLEX_FULL) {
1778                         adv |= ADVERTISE_1000XFULL;
1779                         new_bmcr |= BMCR_FULLDPLX;
1780                 }
1781                 else {
1782                         adv |= ADVERTISE_1000XHALF;
1783                         new_bmcr &= ~BMCR_FULLDPLX;
1784                 }
1785                 if ((new_bmcr != bmcr) || (force_link_down)) {
1786                         /* Force a link down visible on the other side */
1787                         if (bp->link_up) {
1788                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1789                                                ~(ADVERTISE_1000XFULL |
1790                                                  ADVERTISE_1000XHALF));
1791                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1792                                         BMCR_ANRESTART | BMCR_ANENABLE);
1793
1794                                 bp->link_up = 0;
1795                                 netif_carrier_off(bp->dev);
1796                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797                                 bnx2_report_link(bp);
1798                         }
1799                         bnx2_write_phy(bp, bp->mii_adv, adv);
1800                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801                 } else {
1802                         bnx2_resolve_flow_ctrl(bp);
1803                         bnx2_set_mac_link(bp);
1804                 }
1805                 return 0;
1806         }
1807
1808         bnx2_test_and_enable_2g5(bp);
1809
1810         if (bp->advertising & ADVERTISED_1000baseT_Full)
1811                 new_adv |= ADVERTISE_1000XFULL;
1812
1813         new_adv |= bnx2_phy_get_pause_adv(bp);
1814
1815         bnx2_read_phy(bp, bp->mii_adv, &adv);
1816         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1817
1818         bp->serdes_an_pending = 0;
1819         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1820                 /* Force a link down visible on the other side */
1821                 if (bp->link_up) {
1822                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1823                         spin_unlock_bh(&bp->phy_lock);
1824                         msleep(20);
1825                         spin_lock_bh(&bp->phy_lock);
1826                 }
1827
1828                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1829                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1830                         BMCR_ANENABLE);
1831                 /* Speed up link-up time when the link partner
1832                  * does not autonegotiate which is very common
1833                  * in blade servers. Some blade servers use
1834                  * IPMI for kerboard input and it's important
1835                  * to minimize link disruptions. Autoneg. involves
1836                  * exchanging base pages plus 3 next pages and
1837                  * normally completes in about 120 msec.
1838                  */
1839                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1840                 bp->serdes_an_pending = 1;
1841                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1842         } else {
1843                 bnx2_resolve_flow_ctrl(bp);
1844                 bnx2_set_mac_link(bp);
1845         }
1846
1847         return 0;
1848 }
1849
1850 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1851         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1852                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1853                 (ADVERTISED_1000baseT_Full)
1854
1855 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1856         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1857         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1858         ADVERTISED_1000baseT_Full)
1859
1860 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1861         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1862
1863 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1864
1865 static void
1866 bnx2_set_default_remote_link(struct bnx2 *bp)
1867 {
1868         u32 link;
1869
1870         if (bp->phy_port == PORT_TP)
1871                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1872         else
1873                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1874
1875         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1876                 bp->req_line_speed = 0;
1877                 bp->autoneg |= AUTONEG_SPEED;
1878                 bp->advertising = ADVERTISED_Autoneg;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1880                         bp->advertising |= ADVERTISED_10baseT_Half;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1882                         bp->advertising |= ADVERTISED_10baseT_Full;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1884                         bp->advertising |= ADVERTISED_100baseT_Half;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1886                         bp->advertising |= ADVERTISED_100baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1888                         bp->advertising |= ADVERTISED_1000baseT_Full;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1890                         bp->advertising |= ADVERTISED_2500baseX_Full;
1891         } else {
1892                 bp->autoneg = 0;
1893                 bp->advertising = 0;
1894                 bp->req_duplex = DUPLEX_FULL;
1895                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1896                         bp->req_line_speed = SPEED_10;
1897                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898                                 bp->req_duplex = DUPLEX_HALF;
1899                 }
1900                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1901                         bp->req_line_speed = SPEED_100;
1902                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903                                 bp->req_duplex = DUPLEX_HALF;
1904                 }
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906                         bp->req_line_speed = SPEED_1000;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908                         bp->req_line_speed = SPEED_2500;
1909         }
1910 }
1911
1912 static void
1913 bnx2_set_default_link(struct bnx2 *bp)
1914 {
1915         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1916                 bnx2_set_default_remote_link(bp);
1917                 return;
1918         }
1919
1920         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1921         bp->req_line_speed = 0;
1922         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1923                 u32 reg;
1924
1925                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1926
1927                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1928                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1929                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1930                         bp->autoneg = 0;
1931                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1932                         bp->req_duplex = DUPLEX_FULL;
1933                 }
1934         } else
1935                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1936 }
1937
1938 static void
1939 bnx2_send_heart_beat(struct bnx2 *bp)
1940 {
1941         u32 msg;
1942         u32 addr;
1943
1944         spin_lock(&bp->indirect_lock);
1945         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1946         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1947         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1948         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1949         spin_unlock(&bp->indirect_lock);
1950 }
1951
1952 static void
1953 bnx2_remote_phy_event(struct bnx2 *bp)
1954 {
1955         u32 msg;
1956         u8 link_up = bp->link_up;
1957         u8 old_port;
1958
1959         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1960
1961         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1962                 bnx2_send_heart_beat(bp);
1963
1964         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1965
1966         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1967                 bp->link_up = 0;
1968         else {
1969                 u32 speed;
1970
1971                 bp->link_up = 1;
1972                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1973                 bp->duplex = DUPLEX_FULL;
1974                 switch (speed) {
1975                         case BNX2_LINK_STATUS_10HALF:
1976                                 bp->duplex = DUPLEX_HALF;
1977                         case BNX2_LINK_STATUS_10FULL:
1978                                 bp->line_speed = SPEED_10;
1979                                 break;
1980                         case BNX2_LINK_STATUS_100HALF:
1981                                 bp->duplex = DUPLEX_HALF;
1982                         case BNX2_LINK_STATUS_100BASE_T4:
1983                         case BNX2_LINK_STATUS_100FULL:
1984                                 bp->line_speed = SPEED_100;
1985                                 break;
1986                         case BNX2_LINK_STATUS_1000HALF:
1987                                 bp->duplex = DUPLEX_HALF;
1988                         case BNX2_LINK_STATUS_1000FULL:
1989                                 bp->line_speed = SPEED_1000;
1990                                 break;
1991                         case BNX2_LINK_STATUS_2500HALF:
1992                                 bp->duplex = DUPLEX_HALF;
1993                         case BNX2_LINK_STATUS_2500FULL:
1994                                 bp->line_speed = SPEED_2500;
1995                                 break;
1996                         default:
1997                                 bp->line_speed = 0;
1998                                 break;
1999                 }
2000
2001                 bp->flow_ctrl = 0;
2002                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004                         if (bp->duplex == DUPLEX_FULL)
2005                                 bp->flow_ctrl = bp->req_flow_ctrl;
2006                 } else {
2007                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2009                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2011                 }
2012
2013                 old_port = bp->phy_port;
2014                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015                         bp->phy_port = PORT_FIBRE;
2016                 else
2017                         bp->phy_port = PORT_TP;
2018
2019                 if (old_port != bp->phy_port)
2020                         bnx2_set_default_link(bp);
2021
2022         }
2023         if (bp->link_up != link_up)
2024                 bnx2_report_link(bp);
2025
2026         bnx2_set_mac_link(bp);
2027 }
2028
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032         u32 evt_code;
2033
2034         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035         switch (evt_code) {
2036                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037                         bnx2_remote_phy_event(bp);
2038                         break;
2039                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040                 default:
2041                         bnx2_send_heart_beat(bp);
2042                         break;
2043         }
2044         return 0;
2045 }
2046
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052         u32 bmcr;
2053         u32 new_bmcr;
2054
2055         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056
2057         if (bp->autoneg & AUTONEG_SPEED) {
2058                 u32 adv_reg, adv1000_reg;
2059                 u32 new_adv_reg = 0;
2060                 u32 new_adv1000_reg = 0;
2061
2062                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064                         ADVERTISE_PAUSE_ASYM);
2065
2066                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067                 adv1000_reg &= PHY_ALL_1000_SPEED;
2068
2069                 if (bp->advertising & ADVERTISED_10baseT_Half)
2070                         new_adv_reg |= ADVERTISE_10HALF;
2071                 if (bp->advertising & ADVERTISED_10baseT_Full)
2072                         new_adv_reg |= ADVERTISE_10FULL;
2073                 if (bp->advertising & ADVERTISED_100baseT_Half)
2074                         new_adv_reg |= ADVERTISE_100HALF;
2075                 if (bp->advertising & ADVERTISED_100baseT_Full)
2076                         new_adv_reg |= ADVERTISE_100FULL;
2077                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2078                         new_adv1000_reg |= ADVERTISE_1000FULL;
2079
2080                 new_adv_reg |= ADVERTISE_CSMA;
2081
2082                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2083
2084                 if ((adv1000_reg != new_adv1000_reg) ||
2085                         (adv_reg != new_adv_reg) ||
2086                         ((bmcr & BMCR_ANENABLE) == 0)) {
2087
2088                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2089                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2090                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2091                                 BMCR_ANENABLE);
2092                 }
2093                 else if (bp->link_up) {
2094                         /* Flow ctrl may have changed from auto to forced */
2095                         /* or vice-versa. */
2096
2097                         bnx2_resolve_flow_ctrl(bp);
2098                         bnx2_set_mac_link(bp);
2099                 }
2100                 return 0;
2101         }
2102
2103         new_bmcr = 0;
2104         if (bp->req_line_speed == SPEED_100) {
2105                 new_bmcr |= BMCR_SPEED100;
2106         }
2107         if (bp->req_duplex == DUPLEX_FULL) {
2108                 new_bmcr |= BMCR_FULLDPLX;
2109         }
2110         if (new_bmcr != bmcr) {
2111                 u32 bmsr;
2112
2113                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115
2116                 if (bmsr & BMSR_LSTATUS) {
2117                         /* Force link down */
2118                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2119                         spin_unlock_bh(&bp->phy_lock);
2120                         msleep(50);
2121                         spin_lock_bh(&bp->phy_lock);
2122
2123                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2124                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125                 }
2126
2127                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2128
2129                 /* Normally, the new speed is setup after the link has
2130                  * gone down and up again. In some cases, link will not go
2131                  * down so we need to set up the new speed here.
2132                  */
2133                 if (bmsr & BMSR_LSTATUS) {
2134                         bp->line_speed = bp->req_line_speed;
2135                         bp->duplex = bp->req_duplex;
2136                         bnx2_resolve_flow_ctrl(bp);
2137                         bnx2_set_mac_link(bp);
2138                 }
2139         } else {
2140                 bnx2_resolve_flow_ctrl(bp);
2141                 bnx2_set_mac_link(bp);
2142         }
2143         return 0;
2144 }
2145
2146 static int
2147 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2148 __releases(&bp->phy_lock)
2149 __acquires(&bp->phy_lock)
2150 {
2151         if (bp->loopback == MAC_LOOPBACK)
2152                 return 0;
2153
2154         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2155                 return (bnx2_setup_serdes_phy(bp, port));
2156         }
2157         else {
2158                 return (bnx2_setup_copper_phy(bp));
2159         }
2160 }
2161
2162 static int
2163 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2164 {
2165         u32 val;
2166
2167         bp->mii_bmcr = MII_BMCR + 0x10;
2168         bp->mii_bmsr = MII_BMSR + 0x10;
2169         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2170         bp->mii_adv = MII_ADVERTISE + 0x10;
2171         bp->mii_lpa = MII_LPA + 0x10;
2172         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2175         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2176
2177         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2178         if (reset_phy)
2179                 bnx2_reset_phy(bp);
2180
2181         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2182
2183         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2184         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2185         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2186         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2187
2188         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2189         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2190         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2191                 val |= BCM5708S_UP1_2G5;
2192         else
2193                 val &= ~BCM5708S_UP1_2G5;
2194         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2197         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2198         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2199         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2200
2201         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2202
2203         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2204               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2205         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2206
2207         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2208
2209         return 0;
2210 }
2211
2212 static int
2213 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2214 {
2215         u32 val;
2216
2217         if (reset_phy)
2218                 bnx2_reset_phy(bp);
2219
2220         bp->mii_up1 = BCM5708S_UP1;
2221
2222         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2223         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2224         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2225
2226         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2227         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2228         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2229
2230         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2231         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2232         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2233
2234         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2235                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2236                 val |= BCM5708S_UP1_2G5;
2237                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2238         }
2239
2240         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2241             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2242             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2243                 /* increase tx signal amplitude */
2244                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2245                                BCM5708S_BLK_ADDR_TX_MISC);
2246                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2247                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2248                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2249                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2250         }
2251
2252         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2253               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2254
2255         if (val) {
2256                 u32 is_backplane;
2257
2258                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2259                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2260                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                        BCM5708S_BLK_ADDR_TX_MISC);
2262                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2263                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264                                        BCM5708S_BLK_ADDR_DIG);
2265                 }
2266         }
2267         return 0;
2268 }
2269
2270 static int
2271 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2272 {
2273         if (reset_phy)
2274                 bnx2_reset_phy(bp);
2275
2276         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2277
2278         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2279                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2280
2281         if (bp->dev->mtu > 1500) {
2282                 u32 val;
2283
2284                 /* Set extended packet length bit */
2285                 bnx2_write_phy(bp, 0x18, 0x7);
2286                 bnx2_read_phy(bp, 0x18, &val);
2287                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2288
2289                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290                 bnx2_read_phy(bp, 0x1c, &val);
2291                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2292         }
2293         else {
2294                 u32 val;
2295
2296                 bnx2_write_phy(bp, 0x18, 0x7);
2297                 bnx2_read_phy(bp, 0x18, &val);
2298                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2299
2300                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2301                 bnx2_read_phy(bp, 0x1c, &val);
2302                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2303         }
2304
2305         return 0;
2306 }
2307
2308 static int
2309 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2310 {
2311         u32 val;
2312
2313         if (reset_phy)
2314                 bnx2_reset_phy(bp);
2315
2316         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2317                 bnx2_write_phy(bp, 0x18, 0x0c00);
2318                 bnx2_write_phy(bp, 0x17, 0x000a);
2319                 bnx2_write_phy(bp, 0x15, 0x310b);
2320                 bnx2_write_phy(bp, 0x17, 0x201f);
2321                 bnx2_write_phy(bp, 0x15, 0x9506);
2322                 bnx2_write_phy(bp, 0x17, 0x401f);
2323                 bnx2_write_phy(bp, 0x15, 0x14e2);
2324                 bnx2_write_phy(bp, 0x18, 0x0400);
2325         }
2326
2327         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2328                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2329                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2330                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2331                 val &= ~(1 << 8);
2332                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2333         }
2334
2335         if (bp->dev->mtu > 1500) {
2336                 /* Set extended packet length bit */
2337                 bnx2_write_phy(bp, 0x18, 0x7);
2338                 bnx2_read_phy(bp, 0x18, &val);
2339                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2340
2341                 bnx2_read_phy(bp, 0x10, &val);
2342                 bnx2_write_phy(bp, 0x10, val | 0x1);
2343         }
2344         else {
2345                 bnx2_write_phy(bp, 0x18, 0x7);
2346                 bnx2_read_phy(bp, 0x18, &val);
2347                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2348
2349                 bnx2_read_phy(bp, 0x10, &val);
2350                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2351         }
2352
2353         /* ethernet@wirespeed */
2354         bnx2_write_phy(bp, 0x18, 0x7007);
2355         bnx2_read_phy(bp, 0x18, &val);
2356         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2357         return 0;
2358 }
2359
2360
2361 static int
2362 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2363 __releases(&bp->phy_lock)
2364 __acquires(&bp->phy_lock)
2365 {
2366         u32 val;
2367         int rc = 0;
2368
2369         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2370         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2371
2372         bp->mii_bmcr = MII_BMCR;
2373         bp->mii_bmsr = MII_BMSR;
2374         bp->mii_bmsr1 = MII_BMSR;
2375         bp->mii_adv = MII_ADVERTISE;
2376         bp->mii_lpa = MII_LPA;
2377
2378         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2379
2380         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2381                 goto setup_phy;
2382
2383         bnx2_read_phy(bp, MII_PHYSID1, &val);
2384         bp->phy_id = val << 16;
2385         bnx2_read_phy(bp, MII_PHYSID2, &val);
2386         bp->phy_id |= val & 0xffff;
2387
2388         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2389                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2390                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2391                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2392                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2393                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2394                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2395         }
2396         else {
2397                 rc = bnx2_init_copper_phy(bp, reset_phy);
2398         }
2399
2400 setup_phy:
2401         if (!rc)
2402                 rc = bnx2_setup_phy(bp, bp->phy_port);
2403
2404         return rc;
2405 }
2406
2407 static int
2408 bnx2_set_mac_loopback(struct bnx2 *bp)
2409 {
2410         u32 mac_mode;
2411
2412         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2413         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2414         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2415         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2416         bp->link_up = 1;
2417         return 0;
2418 }
2419
2420 static int bnx2_test_link(struct bnx2 *);
2421
2422 static int
2423 bnx2_set_phy_loopback(struct bnx2 *bp)
2424 {
2425         u32 mac_mode;
2426         int rc, i;
2427
2428         spin_lock_bh(&bp->phy_lock);
2429         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2430                             BMCR_SPEED1000);
2431         spin_unlock_bh(&bp->phy_lock);
2432         if (rc)
2433                 return rc;
2434
2435         for (i = 0; i < 10; i++) {
2436                 if (bnx2_test_link(bp) == 0)
2437                         break;
2438                 msleep(100);
2439         }
2440
2441         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2442         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2443                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2444                       BNX2_EMAC_MODE_25G_MODE);
2445
2446         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2447         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448         bp->link_up = 1;
2449         return 0;
2450 }
2451
2452 static int
2453 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2454 {
2455         int i;
2456         u32 val;
2457
2458         bp->fw_wr_seq++;
2459         msg_data |= bp->fw_wr_seq;
2460
2461         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2462
2463         if (!ack)
2464                 return 0;
2465
2466         /* wait for an acknowledgement. */
2467         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2468                 msleep(10);
2469
2470                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2471
2472                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2473                         break;
2474         }
2475         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2476                 return 0;
2477
2478         /* If we timed out, inform the firmware that this is the case. */
2479         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2480                 if (!silent)
2481                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2482
2483                 msg_data &= ~BNX2_DRV_MSG_CODE;
2484                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2485
2486                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2487
2488                 return -EBUSY;
2489         }
2490
2491         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2492                 return -EIO;
2493
2494         return 0;
2495 }
2496
2497 static int
2498 bnx2_init_5709_context(struct bnx2 *bp)
2499 {
2500         int i, ret = 0;
2501         u32 val;
2502
2503         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2504         val |= (BCM_PAGE_BITS - 8) << 16;
2505         REG_WR(bp, BNX2_CTX_COMMAND, val);
2506         for (i = 0; i < 10; i++) {
2507                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2508                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2509                         break;
2510                 udelay(2);
2511         }
2512         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2513                 return -EBUSY;
2514
2515         for (i = 0; i < bp->ctx_pages; i++) {
2516                 int j;
2517
2518                 if (bp->ctx_blk[i])
2519                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2520                 else
2521                         return -ENOMEM;
2522
2523                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2524                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2525                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2527                        (u64) bp->ctx_blk_mapping[i] >> 32);
2528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2529                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2530                 for (j = 0; j < 10; j++) {
2531
2532                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2533                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2534                                 break;
2535                         udelay(5);
2536                 }
2537                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2538                         ret = -EBUSY;
2539                         break;
2540                 }
2541         }
2542         return ret;
2543 }
2544
2545 static void
2546 bnx2_init_context(struct bnx2 *bp)
2547 {
2548         u32 vcid;
2549
2550         vcid = 96;
2551         while (vcid) {
2552                 u32 vcid_addr, pcid_addr, offset;
2553                 int i;
2554
2555                 vcid--;
2556
2557                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2558                         u32 new_vcid;
2559
2560                         vcid_addr = GET_PCID_ADDR(vcid);
2561                         if (vcid & 0x8) {
2562                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2563                         }
2564                         else {
2565                                 new_vcid = vcid;
2566                         }
2567                         pcid_addr = GET_PCID_ADDR(new_vcid);
2568                 }
2569                 else {
2570                         vcid_addr = GET_CID_ADDR(vcid);
2571                         pcid_addr = vcid_addr;
2572                 }
2573
2574                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2575                         vcid_addr += (i << PHY_CTX_SHIFT);
2576                         pcid_addr += (i << PHY_CTX_SHIFT);
2577
2578                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2579                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2580
2581                         /* Zero out the context. */
2582                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2583                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2584                 }
2585         }
2586 }
2587
2588 static int
2589 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2590 {
2591         u16 *good_mbuf;
2592         u32 good_mbuf_cnt;
2593         u32 val;
2594
2595         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2596         if (good_mbuf == NULL) {
2597                 pr_err("Failed to allocate memory in %s\n", __func__);
2598                 return -ENOMEM;
2599         }
2600
2601         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2602                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2603
2604         good_mbuf_cnt = 0;
2605
2606         /* Allocate a bunch of mbufs and save the good ones in an array. */
2607         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2608         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2609                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2610                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2611
2612                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2613
2614                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2615
2616                 /* The addresses with Bit 9 set are bad memory blocks. */
2617                 if (!(val & (1 << 9))) {
2618                         good_mbuf[good_mbuf_cnt] = (u16) val;
2619                         good_mbuf_cnt++;
2620                 }
2621
2622                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2623         }
2624
2625         /* Free the good ones back to the mbuf pool thus discarding
2626          * all the bad ones. */
2627         while (good_mbuf_cnt) {
2628                 good_mbuf_cnt--;
2629
2630                 val = good_mbuf[good_mbuf_cnt];
2631                 val = (val << 9) | val | 1;
2632
2633                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2634         }
2635         kfree(good_mbuf);
2636         return 0;
2637 }
2638
2639 static void
2640 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2641 {
2642         u32 val;
2643
2644         val = (mac_addr[0] << 8) | mac_addr[1];
2645
2646         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2647
2648         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2649                 (mac_addr[4] << 8) | mac_addr[5];
2650
2651         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2652 }
2653
2654 static inline int
2655 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2656 {
2657         dma_addr_t mapping;
2658         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2659         struct rx_bd *rxbd =
2660                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2661         struct page *page = alloc_page(GFP_ATOMIC);
2662
2663         if (!page)
2664                 return -ENOMEM;
2665         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2666                                PCI_DMA_FROMDEVICE);
2667         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2668                 __free_page(page);
2669                 return -EIO;
2670         }
2671
2672         rx_pg->page = page;
2673         pci_unmap_addr_set(rx_pg, mapping, mapping);
2674         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676         return 0;
2677 }
2678
2679 static void
2680 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2681 {
2682         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2683         struct page *page = rx_pg->page;
2684
2685         if (!page)
2686                 return;
2687
2688         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689                        PCI_DMA_FROMDEVICE);
2690
2691         __free_page(page);
2692         rx_pg->page = NULL;
2693 }
2694
2695 static inline int
2696 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2697 {
2698         struct sk_buff *skb;
2699         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2700         dma_addr_t mapping;
2701         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2702         unsigned long align;
2703
2704         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2705         if (skb == NULL) {
2706                 return -ENOMEM;
2707         }
2708
2709         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2710                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2711
2712         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2713                 PCI_DMA_FROMDEVICE);
2714         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2715                 dev_kfree_skb(skb);
2716                 return -EIO;
2717         }
2718
2719         rx_buf->skb = skb;
2720         pci_unmap_addr_set(rx_buf, mapping, mapping);
2721
2722         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2724
2725         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2726
2727         return 0;
2728 }
2729
2730 static int
2731 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2732 {
2733         struct status_block *sblk = bnapi->status_blk.msi;
2734         u32 new_link_state, old_link_state;
2735         int is_set = 1;
2736
2737         new_link_state = sblk->status_attn_bits & event;
2738         old_link_state = sblk->status_attn_bits_ack & event;
2739         if (new_link_state != old_link_state) {
2740                 if (new_link_state)
2741                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2742                 else
2743                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2744         } else
2745                 is_set = 0;
2746
2747         return is_set;
2748 }
2749
2750 static void
2751 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2752 {
2753         spin_lock(&bp->phy_lock);
2754
2755         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2756                 bnx2_set_link(bp);
2757         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2758                 bnx2_set_remote_link(bp);
2759
2760         spin_unlock(&bp->phy_lock);
2761
2762 }
2763
2764 static inline u16
2765 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2766 {
2767         u16 cons;
2768
2769         /* Tell compiler that status block fields can change. */
2770         barrier();
2771         cons = *bnapi->hw_tx_cons_ptr;
2772         barrier();
2773         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2774                 cons++;
2775         return cons;
2776 }
2777
2778 static int
2779 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2780 {
2781         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2782         u16 hw_cons, sw_cons, sw_ring_cons;
2783         int tx_pkt = 0, index;
2784         struct netdev_queue *txq;
2785
2786         index = (bnapi - bp->bnx2_napi);
2787         txq = netdev_get_tx_queue(bp->dev, index);
2788
2789         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2790         sw_cons = txr->tx_cons;
2791
2792         while (sw_cons != hw_cons) {
2793                 struct sw_tx_bd *tx_buf;
2794                 struct sk_buff *skb;
2795                 int i, last;
2796
2797                 sw_ring_cons = TX_RING_IDX(sw_cons);
2798
2799                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2800                 skb = tx_buf->skb;
2801
2802                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2803                 prefetch(&skb->end);
2804
2805                 /* partial BD completions possible with TSO packets */
2806                 if (tx_buf->is_gso) {
2807                         u16 last_idx, last_ring_idx;
2808
2809                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2810                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2811                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2812                                 last_idx++;
2813                         }
2814                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2815                                 break;
2816                         }
2817                 }
2818
2819                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2820                         skb_headlen(skb), PCI_DMA_TODEVICE);
2821
2822                 tx_buf->skb = NULL;
2823                 last = tx_buf->nr_frags;
2824
2825                 for (i = 0; i < last; i++) {
2826                         sw_cons = NEXT_TX_BD(sw_cons);
2827
2828                         pci_unmap_page(bp->pdev,
2829                                 pci_unmap_addr(
2830                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831                                         mapping),
2832                                 skb_shinfo(skb)->frags[i].size,
2833                                 PCI_DMA_TODEVICE);
2834                 }
2835
2836                 sw_cons = NEXT_TX_BD(sw_cons);
2837
2838                 dev_kfree_skb(skb);
2839                 tx_pkt++;
2840                 if (tx_pkt == budget)
2841                         break;
2842
2843                 if (hw_cons == sw_cons)
2844                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2845         }
2846
2847         txr->hw_tx_cons = hw_cons;
2848         txr->tx_cons = sw_cons;
2849
2850         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2851          * before checking for netif_tx_queue_stopped().  Without the
2852          * memory barrier, there is a small possibility that bnx2_start_xmit()
2853          * will miss it and cause the queue to be stopped forever.
2854          */
2855         smp_mb();
2856
2857         if (unlikely(netif_tx_queue_stopped(txq)) &&
2858                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2859                 __netif_tx_lock(txq, smp_processor_id());
2860                 if ((netif_tx_queue_stopped(txq)) &&
2861                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2862                         netif_tx_wake_queue(txq);
2863                 __netif_tx_unlock(txq);
2864         }
2865
2866         return tx_pkt;
2867 }
2868
2869 static void
2870 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2871                         struct sk_buff *skb, int count)
2872 {
2873         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2874         struct rx_bd *cons_bd, *prod_bd;
2875         int i;
2876         u16 hw_prod, prod;
2877         u16 cons = rxr->rx_pg_cons;
2878
2879         cons_rx_pg = &rxr->rx_pg_ring[cons];
2880
2881         /* The caller was unable to allocate a new page to replace the
2882          * last one in the frags array, so we need to recycle that page
2883          * and then free the skb.
2884          */
2885         if (skb) {
2886                 struct page *page;
2887                 struct skb_shared_info *shinfo;
2888
2889                 shinfo = skb_shinfo(skb);
2890                 shinfo->nr_frags--;
2891                 page = shinfo->frags[shinfo->nr_frags].page;
2892                 shinfo->frags[shinfo->nr_frags].page = NULL;
2893
2894                 cons_rx_pg->page = page;
2895                 dev_kfree_skb(skb);
2896         }
2897
2898         hw_prod = rxr->rx_pg_prod;
2899
2900         for (i = 0; i < count; i++) {
2901                 prod = RX_PG_RING_IDX(hw_prod);
2902
2903                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2904                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2905                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2906                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2907
2908                 if (prod != cons) {
2909                         prod_rx_pg->page = cons_rx_pg->page;
2910                         cons_rx_pg->page = NULL;
2911                         pci_unmap_addr_set(prod_rx_pg, mapping,
2912                                 pci_unmap_addr(cons_rx_pg, mapping));
2913
2914                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2916
2917                 }
2918                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2919                 hw_prod = NEXT_RX_BD(hw_prod);
2920         }
2921         rxr->rx_pg_prod = hw_prod;
2922         rxr->rx_pg_cons = cons;
2923 }
2924
2925 static inline void
2926 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2927                   struct sk_buff *skb, u16 cons, u16 prod)
2928 {
2929         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2930         struct rx_bd *cons_bd, *prod_bd;
2931
2932         cons_rx_buf = &rxr->rx_buf_ring[cons];
2933         prod_rx_buf = &rxr->rx_buf_ring[prod];
2934
2935         pci_dma_sync_single_for_device(bp->pdev,
2936                 pci_unmap_addr(cons_rx_buf, mapping),
2937                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938
2939         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2940
2941         prod_rx_buf->skb = skb;
2942
2943         if (cons == prod)
2944                 return;
2945
2946         pci_unmap_addr_set(prod_rx_buf, mapping,
2947                         pci_unmap_addr(cons_rx_buf, mapping));
2948
2949         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2951         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953 }
2954
2955 static int
2956 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2957             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2958             u32 ring_idx)
2959 {
2960         int err;
2961         u16 prod = ring_idx & 0xffff;
2962
2963         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2964         if (unlikely(err)) {
2965                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2966                 if (hdr_len) {
2967                         unsigned int raw_len = len + 4;
2968                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2969
2970                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2971                 }
2972                 return err;
2973         }
2974
2975         skb_reserve(skb, BNX2_RX_OFFSET);
2976         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2977                          PCI_DMA_FROMDEVICE);
2978
2979         if (hdr_len == 0) {
2980                 skb_put(skb, len);
2981                 return 0;
2982         } else {
2983                 unsigned int i, frag_len, frag_size, pages;
2984                 struct sw_pg *rx_pg;
2985                 u16 pg_cons = rxr->rx_pg_cons;
2986                 u16 pg_prod = rxr->rx_pg_prod;
2987
2988                 frag_size = len + 4 - hdr_len;
2989                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2990                 skb_put(skb, hdr_len);
2991
2992                 for (i = 0; i < pages; i++) {
2993                         dma_addr_t mapping_old;
2994
2995                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2996                         if (unlikely(frag_len <= 4)) {
2997                                 unsigned int tail = 4 - frag_len;
2998
2999                                 rxr->rx_pg_cons = pg_cons;
3000                                 rxr->rx_pg_prod = pg_prod;
3001                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3002                                                         pages - i);
3003                                 skb->len -= tail;
3004                                 if (i == 0) {
3005                                         skb->tail -= tail;
3006                                 } else {
3007                                         skb_frag_t *frag =
3008                                                 &skb_shinfo(skb)->frags[i - 1];
3009                                         frag->size -= tail;
3010                                         skb->data_len -= tail;
3011                                         skb->truesize -= tail;
3012                                 }
3013                                 return 0;
3014                         }
3015                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3016
3017                         /* Don't unmap yet.  If we're unable to allocate a new
3018                          * page, we need to recycle the page and the DMA addr.
3019                          */
3020                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3021                         if (i == pages - 1)
3022                                 frag_len -= 4;
3023
3024                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3025                         rx_pg->page = NULL;
3026
3027                         err = bnx2_alloc_rx_page(bp, rxr,
3028                                                  RX_PG_RING_IDX(pg_prod));
3029                         if (unlikely(err)) {
3030                                 rxr->rx_pg_cons = pg_cons;
3031                                 rxr->rx_pg_prod = pg_prod;
3032                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3033                                                         pages - i);
3034                                 return err;
3035                         }
3036
3037                         pci_unmap_page(bp->pdev, mapping_old,
3038                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3039
3040                         frag_size -= frag_len;
3041                         skb->data_len += frag_len;
3042                         skb->truesize += frag_len;
3043                         skb->len += frag_len;
3044
3045                         pg_prod = NEXT_RX_BD(pg_prod);
3046                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3047                 }
3048                 rxr->rx_pg_prod = pg_prod;
3049                 rxr->rx_pg_cons = pg_cons;
3050         }
3051         return 0;
3052 }
3053
3054 static inline u16
3055 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3056 {
3057         u16 cons;
3058
3059         /* Tell compiler that status block fields can change. */
3060         barrier();
3061         cons = *bnapi->hw_rx_cons_ptr;
3062         barrier();
3063         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3064                 cons++;
3065         return cons;
3066 }
3067
3068 static int
3069 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3070 {
3071         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3072         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073         struct l2_fhdr *rx_hdr;
3074         int rx_pkt = 0, pg_ring_used = 0;
3075
3076         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077         sw_cons = rxr->rx_cons;
3078         sw_prod = rxr->rx_prod;
3079
3080         /* Memory barrier necessary as speculative reads of the rx
3081          * buffer can be ahead of the index in the status block
3082          */
3083         rmb();
3084         while (sw_cons != hw_cons) {
3085                 unsigned int len, hdr_len;
3086                 u32 status;
3087                 struct sw_bd *rx_buf;
3088                 struct sk_buff *skb;
3089                 dma_addr_t dma_addr;
3090                 u16 vtag = 0;
3091                 int hw_vlan __maybe_unused = 0;
3092
3093                 sw_ring_cons = RX_RING_IDX(sw_cons);
3094                 sw_ring_prod = RX_RING_IDX(sw_prod);
3095
3096                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097                 skb = rx_buf->skb;
3098
3099                 rx_buf->skb = NULL;
3100
3101                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3102
3103                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105                         PCI_DMA_FROMDEVICE);
3106
3107                 rx_hdr = (struct l2_fhdr *) skb->data;
3108                 len = rx_hdr->l2_fhdr_pkt_len;
3109                 status = rx_hdr->l2_fhdr_status;
3110
3111                 hdr_len = 0;
3112                 if (status & L2_FHDR_STATUS_SPLIT) {
3113                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3114                         pg_ring_used = 1;
3115                 } else if (len > bp->rx_jumbo_thresh) {
3116                         hdr_len = bp->rx_jumbo_thresh;
3117                         pg_ring_used = 1;
3118                 }
3119
3120                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3121                                        L2_FHDR_ERRORS_PHY_DECODE |
3122                                        L2_FHDR_ERRORS_ALIGNMENT |
3123                                        L2_FHDR_ERRORS_TOO_SHORT |
3124                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3125
3126                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3127                                           sw_ring_prod);
3128                         if (pg_ring_used) {
3129                                 int pages;
3130
3131                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3132
3133                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3134                         }
3135                         goto next_rx;
3136                 }
3137
3138                 len -= 4;
3139
3140                 if (len <= bp->rx_copy_thresh) {
3141                         struct sk_buff *new_skb;
3142
3143                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3144                         if (new_skb == NULL) {
3145                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146                                                   sw_ring_prod);
3147                                 goto next_rx;
3148                         }
3149
3150                         /* aligned copy */
3151                         skb_copy_from_linear_data_offset(skb,
3152                                                          BNX2_RX_OFFSET - 6,
3153                                       new_skb->data, len + 6);
3154                         skb_reserve(new_skb, 6);
3155                         skb_put(new_skb, len);
3156
3157                         bnx2_reuse_rx_skb(bp, rxr, skb,
3158                                 sw_ring_cons, sw_ring_prod);
3159
3160                         skb = new_skb;
3161                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3162                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3163                         goto next_rx;
3164
3165                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3166                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3167                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3168 #ifdef BCM_VLAN
3169                         if (bp->vlgrp)
3170                                 hw_vlan = 1;
3171                         else
3172 #endif
3173                         {
3174                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3175                                         __skb_push(skb, 4);
3176
3177                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3178                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3179                                 ve->h_vlan_TCI = htons(vtag);
3180                                 len += 4;
3181                         }
3182                 }
3183
3184                 skb->protocol = eth_type_trans(skb, bp->dev);
3185
3186                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3187                         (ntohs(skb->protocol) != 0x8100)) {
3188
3189                         dev_kfree_skb(skb);
3190                         goto next_rx;
3191
3192                 }
3193
3194                 skb->ip_summed = CHECKSUM_NONE;
3195                 if (bp->rx_csum &&
3196                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3197                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3198
3199                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3200                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3201                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3202                 }
3203
3204                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3205
3206 #ifdef BCM_VLAN
3207                 if (hw_vlan)
3208                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3209                 else
3210 #endif
3211                         netif_receive_skb(skb);
3212
3213                 rx_pkt++;
3214
3215 next_rx:
3216                 sw_cons = NEXT_RX_BD(sw_cons);
3217                 sw_prod = NEXT_RX_BD(sw_prod);
3218
3219                 if ((rx_pkt == budget))
3220                         break;
3221
3222                 /* Refresh hw_cons to see if there is new work */
3223                 if (sw_cons == hw_cons) {
3224                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3225                         rmb();
3226                 }
3227         }
3228         rxr->rx_cons = sw_cons;
3229         rxr->rx_prod = sw_prod;
3230
3231         if (pg_ring_used)
3232                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3233
3234         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3235
3236         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3237
3238         mmiowb();
3239
3240         return rx_pkt;
3241
3242 }
3243
3244 /* MSI ISR - The only difference between this and the INTx ISR
3245  * is that the MSI interrupt is always serviced.
3246  */
3247 static irqreturn_t
3248 bnx2_msi(int irq, void *dev_instance)
3249 {
3250         struct bnx2_napi *bnapi = dev_instance;
3251         struct bnx2 *bp = bnapi->bp;
3252
3253         prefetch(bnapi->status_blk.msi);
3254         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3255                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3256                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3257
3258         /* Return here if interrupt is disabled. */
3259         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3260                 return IRQ_HANDLED;
3261
3262         napi_schedule(&bnapi->napi);
3263
3264         return IRQ_HANDLED;
3265 }
3266
3267 static irqreturn_t
3268 bnx2_msi_1shot(int irq, void *dev_instance)
3269 {
3270         struct bnx2_napi *bnapi = dev_instance;
3271         struct bnx2 *bp = bnapi->bp;
3272
3273         prefetch(bnapi->status_blk.msi);
3274
3275         /* Return here if interrupt is disabled. */
3276         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3277                 return IRQ_HANDLED;
3278
3279         napi_schedule(&bnapi->napi);
3280
3281         return IRQ_HANDLED;
3282 }
3283
3284 static irqreturn_t
3285 bnx2_interrupt(int irq, void *dev_instance)
3286 {
3287         struct bnx2_napi *bnapi = dev_instance;
3288         struct bnx2 *bp = bnapi->bp;
3289         struct status_block *sblk = bnapi->status_blk.msi;
3290
3291         /* When using INTx, it is possible for the interrupt to arrive
3292          * at the CPU before the status block posted prior to the
3293          * interrupt. Reading a register will flush the status block.
3294          * When using MSI, the MSI message will always complete after
3295          * the status block write.
3296          */
3297         if ((sblk->status_idx == bnapi->last_status_idx) &&
3298             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3299              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3300                 return IRQ_NONE;
3301
3302         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3303                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3304                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3305
3306         /* Read back to deassert IRQ immediately to avoid too many
3307          * spurious interrupts.
3308          */
3309         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3310
3311         /* Return here if interrupt is shared and is disabled. */
3312         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3313                 return IRQ_HANDLED;
3314
3315         if (napi_schedule_prep(&bnapi->napi)) {
3316                 bnapi->last_status_idx = sblk->status_idx;
3317                 __napi_schedule(&bnapi->napi);
3318         }
3319
3320         return IRQ_HANDLED;
3321 }
3322
3323 static inline int
3324 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3325 {
3326         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3327         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3328
3329         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3330             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3331                 return 1;
3332         return 0;
3333 }
3334
3335 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3336                                  STATUS_ATTN_BITS_TIMER_ABORT)
3337
3338 static inline int
3339 bnx2_has_work(struct bnx2_napi *bnapi)
3340 {
3341         struct status_block *sblk = bnapi->status_blk.msi;
3342
3343         if (bnx2_has_fast_work(bnapi))
3344                 return 1;
3345
3346 #ifdef BCM_CNIC
3347         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3348                 return 1;
3349 #endif
3350
3351         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3352             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3353                 return 1;
3354
3355         return 0;
3356 }
3357
3358 static void
3359 bnx2_chk_missed_msi(struct bnx2 *bp)
3360 {
3361         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3362         u32 msi_ctrl;
3363
3364         if (bnx2_has_work(bnapi)) {
3365                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3366                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3367                         return;
3368
3369                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3370                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3371                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3372                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3373                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3374                 }
3375         }
3376
3377         bp->idle_chk_status_idx = bnapi->last_status_idx;
3378 }
3379
3380 #ifdef BCM_CNIC
3381 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3382 {
3383         struct cnic_ops *c_ops;
3384
3385         if (!bnapi->cnic_present)
3386                 return;
3387
3388         rcu_read_lock();
3389         c_ops = rcu_dereference(bp->cnic_ops);
3390         if (c_ops)
3391                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3392                                                       bnapi->status_blk.msi);
3393         rcu_read_unlock();
3394 }
3395 #endif
3396
3397 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3398 {
3399         struct status_block *sblk = bnapi->status_blk.msi;
3400         u32 status_attn_bits = sblk->status_attn_bits;
3401         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3402
3403         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3404             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3405
3406                 bnx2_phy_int(bp, bnapi);
3407
3408                 /* This is needed to take care of transient status
3409                  * during link changes.
3410                  */
3411                 REG_WR(bp, BNX2_HC_COMMAND,
3412                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3413                 REG_RD(bp, BNX2_HC_COMMAND);
3414         }
3415 }
3416
3417 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3418                           int work_done, int budget)
3419 {
3420         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3421         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3422
3423         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3424                 bnx2_tx_int(bp, bnapi, 0);
3425
3426         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3427                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3428
3429         return work_done;
3430 }
3431
3432 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3433 {
3434         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3435         struct bnx2 *bp = bnapi->bp;
3436         int work_done = 0;
3437         struct status_block_msix *sblk = bnapi->status_blk.msix;
3438
3439         while (1) {
3440                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3441                 if (unlikely(work_done >= budget))
3442                         break;
3443
3444                 bnapi->last_status_idx = sblk->status_idx;
3445                 /* status idx must be read before checking for more work. */
3446                 rmb();
3447                 if (likely(!bnx2_has_fast_work(bnapi))) {
3448
3449                         napi_complete(napi);
3450                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3451                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3452                                bnapi->last_status_idx);
3453                         break;
3454                 }
3455         }
3456         return work_done;
3457 }
3458
3459 static int bnx2_poll(struct napi_struct *napi, int budget)
3460 {
3461         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3462         struct bnx2 *bp = bnapi->bp;
3463         int work_done = 0;
3464         struct status_block *sblk = bnapi->status_blk.msi;
3465
3466         while (1) {
3467                 bnx2_poll_link(bp, bnapi);
3468
3469                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470
3471 #ifdef BCM_CNIC
3472                 bnx2_poll_cnic(bp, bnapi);
3473 #endif
3474
3475                 /* bnapi->last_status_idx is used below to tell the hw how
3476                  * much work has been processed, so we must read it before
3477                  * checking for more work.
3478                  */
3479                 bnapi->last_status_idx = sblk->status_idx;
3480
3481                 if (unlikely(work_done >= budget))
3482                         break;
3483
3484                 rmb();
3485                 if (likely(!bnx2_has_work(bnapi))) {
3486                         napi_complete(napi);
3487                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3488                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3489                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3490                                        bnapi->last_status_idx);
3491                                 break;
3492                         }
3493                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3494                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3495                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3496                                bnapi->last_status_idx);
3497
3498                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3499                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3500                                bnapi->last_status_idx);
3501                         break;
3502                 }
3503         }
3504
3505         return work_done;
3506 }
3507
3508 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3509  * from set_multicast.
3510  */
3511 static void
3512 bnx2_set_rx_mode(struct net_device *dev)
3513 {
3514         struct bnx2 *bp = netdev_priv(dev);
3515         u32 rx_mode, sort_mode;
3516         struct netdev_hw_addr *ha;
3517         int i;
3518
3519         if (!netif_running(dev))
3520                 return;
3521
3522         spin_lock_bh(&bp->phy_lock);
3523
3524         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3525                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3526         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3527 #ifdef BCM_VLAN
3528         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3529                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3530 #else
3531         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3532                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3533 #endif
3534         if (dev->flags & IFF_PROMISC) {
3535                 /* Promiscuous mode. */
3536                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3537                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3538                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3539         }
3540         else if (dev->flags & IFF_ALLMULTI) {
3541                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3542                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3543                                0xffffffff);
3544                 }
3545                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3546         }
3547         else {
3548                 /* Accept one or more multicast(s). */
3549                 struct dev_mc_list *mclist;
3550                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3551                 u32 regidx;
3552                 u32 bit;
3553                 u32 crc;
3554
3555                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3556
3557                 netdev_for_each_mc_addr(mclist, dev) {
3558                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3559                         bit = crc & 0xff;
3560                         regidx = (bit & 0xe0) >> 5;
3561                         bit &= 0x1f;
3562                         mc_filter[regidx] |= (1 << bit);
3563                 }
3564
3565                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567                                mc_filter[i]);
3568                 }
3569
3570                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3571         }
3572
3573         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3574                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3575                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3576                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3577         } else if (!(dev->flags & IFF_PROMISC)) {
3578                 /* Add all entries into to the match filter list */
3579                 i = 0;
3580                 netdev_for_each_uc_addr(ha, dev) {
3581                         bnx2_set_mac_addr(bp, ha->addr,
3582                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3583                         sort_mode |= (1 <<
3584                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3585                         i++;
3586                 }
3587
3588         }
3589
3590         if (rx_mode != bp->rx_mode) {
3591                 bp->rx_mode = rx_mode;
3592                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3593         }
3594
3595         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3596         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3597         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3598
3599         spin_unlock_bh(&bp->phy_lock);
3600 }
3601
3602 static int __devinit
3603 check_fw_section(const struct firmware *fw,
3604                  const struct bnx2_fw_file_section *section,
3605                  u32 alignment, bool non_empty)
3606 {
3607         u32 offset = be32_to_cpu(section->offset);
3608         u32 len = be32_to_cpu(section->len);
3609
3610         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3611                 return -EINVAL;
3612         if ((non_empty && len == 0) || len > fw->size - offset ||
3613             len & (alignment - 1))
3614                 return -EINVAL;
3615         return 0;
3616 }
3617
3618 static int __devinit
3619 check_mips_fw_entry(const struct firmware *fw,
3620                     const struct bnx2_mips_fw_file_entry *entry)
3621 {
3622         if (check_fw_section(fw, &entry->text, 4, true) ||
3623             check_fw_section(fw, &entry->data, 4, false) ||
3624             check_fw_section(fw, &entry->rodata, 4, false))
3625                 return -EINVAL;
3626         return 0;
3627 }
3628
3629 static int __devinit
3630 bnx2_request_firmware(struct bnx2 *bp)
3631 {
3632         const char *mips_fw_file, *rv2p_fw_file;
3633         const struct bnx2_mips_fw_file *mips_fw;
3634         const struct bnx2_rv2p_fw_file *rv2p_fw;
3635         int rc;
3636
3637         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3638                 mips_fw_file = FW_MIPS_FILE_09;
3639                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3640                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3641                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3642                 else
3643                         rv2p_fw_file = FW_RV2P_FILE_09;
3644         } else {
3645                 mips_fw_file = FW_MIPS_FILE_06;
3646                 rv2p_fw_file = FW_RV2P_FILE_06;
3647         }
3648
3649         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3650         if (rc) {
3651                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3652                 return rc;
3653         }
3654
3655         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3656         if (rc) {
3657                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3658                 return rc;
3659         }
3660         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3661         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3662         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3663             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3664             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3665             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3666             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3667             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3668                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3669                 return -EINVAL;
3670         }
3671         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3672             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3673             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3674                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3675                 return -EINVAL;
3676         }
3677
3678         return 0;
3679 }
3680
3681 static u32
3682 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3683 {
3684         switch (idx) {
3685         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3686                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3687                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3688                 break;
3689         }
3690         return rv2p_code;
3691 }
3692
3693 static int
3694 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3695              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3696 {
3697         u32 rv2p_code_len, file_offset;
3698         __be32 *rv2p_code;
3699         int i;
3700         u32 val, cmd, addr;
3701
3702         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3703         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3704
3705         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3706
3707         if (rv2p_proc == RV2P_PROC1) {
3708                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3709                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3710         } else {
3711                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3712                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3713         }
3714
3715         for (i = 0; i < rv2p_code_len; i += 8) {
3716                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3717                 rv2p_code++;
3718                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3719                 rv2p_code++;
3720
3721                 val = (i / 8) | cmd;
3722                 REG_WR(bp, addr, val);
3723         }
3724
3725         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3726         for (i = 0; i < 8; i++) {
3727                 u32 loc, code;
3728
3729                 loc = be32_to_cpu(fw_entry->fixup[i]);
3730                 if (loc && ((loc * 4) < rv2p_code_len)) {
3731                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3732                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3733                         code = be32_to_cpu(*(rv2p_code + loc));
3734                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3735                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3736
3737                         val = (loc / 2) | cmd;
3738                         REG_WR(bp, addr, val);
3739                 }
3740         }
3741
3742         /* Reset the processor, un-stall is done later. */
3743         if (rv2p_proc == RV2P_PROC1) {
3744                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3745         }
3746         else {
3747                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3748         }
3749
3750         return 0;
3751 }
3752
3753 static int
3754 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3755             const struct bnx2_mips_fw_file_entry *fw_entry)
3756 {
3757         u32 addr, len, file_offset;
3758         __be32 *data;
3759         u32 offset;
3760         u32 val;
3761
3762         /* Halt the CPU. */
3763         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3764         val |= cpu_reg->mode_value_halt;
3765         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3766         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3767
3768         /* Load the Text area. */
3769         addr = be32_to_cpu(fw_entry->text.addr);
3770         len = be32_to_cpu(fw_entry->text.len);
3771         file_offset = be32_to_cpu(fw_entry->text.offset);
3772         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3773
3774         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775         if (len) {
3776                 int j;
3777
3778                 for (j = 0; j < (len / 4); j++, offset += 4)
3779                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3780         }
3781
3782         /* Load the Data area. */
3783         addr = be32_to_cpu(fw_entry->data.addr);
3784         len = be32_to_cpu(fw_entry->data.len);
3785         file_offset = be32_to_cpu(fw_entry->data.offset);
3786         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3787
3788         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3789         if (len) {
3790                 int j;
3791
3792                 for (j = 0; j < (len / 4); j++, offset += 4)
3793                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3794         }
3795
3796         /* Load the Read-Only area. */
3797         addr = be32_to_cpu(fw_entry->rodata.addr);
3798         len = be32_to_cpu(fw_entry->rodata.len);
3799         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3800         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3801
3802         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3803         if (len) {
3804                 int j;
3805
3806                 for (j = 0; j < (len / 4); j++, offset += 4)
3807                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3808         }
3809
3810         /* Clear the pre-fetch instruction. */
3811         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3812
3813         val = be32_to_cpu(fw_entry->start_addr);
3814         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3815
3816         /* Start the CPU. */
3817         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3818         val &= ~cpu_reg->mode_value_halt;
3819         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3820         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3821
3822         return 0;
3823 }
3824
3825 static int
3826 bnx2_init_cpus(struct bnx2 *bp)
3827 {
3828         const struct bnx2_mips_fw_file *mips_fw =
3829                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3830         const struct bnx2_rv2p_fw_file *rv2p_fw =
3831                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3832         int rc;
3833
3834         /* Initialize the RV2P processor. */
3835         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3836         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3837
3838         /* Initialize the RX Processor. */
3839         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3840         if (rc)
3841                 goto init_cpu_err;
3842
3843         /* Initialize the TX Processor. */
3844         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3845         if (rc)
3846                 goto init_cpu_err;
3847
3848         /* Initialize the TX Patch-up Processor. */
3849         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3850         if (rc)
3851                 goto init_cpu_err;
3852
3853         /* Initialize the Completion Processor. */
3854         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3855         if (rc)
3856                 goto init_cpu_err;
3857
3858         /* Initialize the Command Processor. */
3859         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3860
3861 init_cpu_err:
3862         return rc;
3863 }
3864
3865 static int
3866 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3867 {
3868         u16 pmcsr;
3869
3870         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3871
3872         switch (state) {
3873         case PCI_D0: {
3874                 u32 val;
3875
3876                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3877                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3878                         PCI_PM_CTRL_PME_STATUS);
3879
3880                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3881                         /* delay required during transition out of D3hot */
3882                         msleep(20);
3883
3884                 val = REG_RD(bp, BNX2_EMAC_MODE);
3885                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3886                 val &= ~BNX2_EMAC_MODE_MPKT;
3887                 REG_WR(bp, BNX2_EMAC_MODE, val);
3888
3889                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3890                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3891                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3892                 break;
3893         }
3894         case PCI_D3hot: {
3895                 int i;
3896                 u32 val, wol_msg;
3897
3898                 if (bp->wol) {
3899                         u32 advertising;
3900                         u8 autoneg;
3901
3902                         autoneg = bp->autoneg;
3903                         advertising = bp->advertising;
3904
3905                         if (bp->phy_port == PORT_TP) {
3906                                 bp->autoneg = AUTONEG_SPEED;
3907                                 bp->advertising = ADVERTISED_10baseT_Half |
3908                                         ADVERTISED_10baseT_Full |
3909                                         ADVERTISED_100baseT_Half |
3910                                         ADVERTISED_100baseT_Full |
3911                                         ADVERTISED_Autoneg;
3912                         }
3913
3914                         spin_lock_bh(&bp->phy_lock);
3915                         bnx2_setup_phy(bp, bp->phy_port);
3916                         spin_unlock_bh(&bp->phy_lock);
3917
3918                         bp->autoneg = autoneg;
3919                         bp->advertising = advertising;
3920
3921                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3922
3923                         val = REG_RD(bp, BNX2_EMAC_MODE);
3924
3925                         /* Enable port mode. */
3926                         val &= ~BNX2_EMAC_MODE_PORT;
3927                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3928                                BNX2_EMAC_MODE_ACPI_RCVD |
3929                                BNX2_EMAC_MODE_MPKT;
3930                         if (bp->phy_port == PORT_TP)
3931                                 val |= BNX2_EMAC_MODE_PORT_MII;
3932                         else {
3933                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3934                                 if (bp->line_speed == SPEED_2500)
3935                                         val |= BNX2_EMAC_MODE_25G_MODE;
3936                         }
3937
3938                         REG_WR(bp, BNX2_EMAC_MODE, val);
3939
3940                         /* receive all multicast */
3941                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3942                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3943                                        0xffffffff);
3944                         }
3945                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3946                                BNX2_EMAC_RX_MODE_SORT_MODE);
3947
3948                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3949                               BNX2_RPM_SORT_USER0_MC_EN;
3950                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3951                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3952                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3953                                BNX2_RPM_SORT_USER0_ENA);
3954
3955                         /* Need to enable EMAC and RPM for WOL. */
3956                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3957                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3958                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3959                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3960
3961                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3962                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3963                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3964
3965                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3966                 }
3967                 else {
3968                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3969                 }
3970
3971                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3972                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3973                                      1, 0);
3974
3975                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3976                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3977                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3978
3979                         if (bp->wol)
3980                                 pmcsr |= 3;
3981                 }
3982                 else {
3983                         pmcsr |= 3;
3984                 }
3985                 if (bp->wol) {
3986                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3987                 }
3988                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3989                                       pmcsr);
3990
3991                 /* No more memory access after this point until
3992                  * device is brought back to D0.
3993                  */
3994                 udelay(50);
3995                 break;
3996         }
3997         default:
3998                 return -EINVAL;
3999         }
4000         return 0;
4001 }
4002
4003 static int
4004 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4005 {
4006         u32 val;
4007         int j;
4008
4009         /* Request access to the flash interface. */
4010         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4011         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4012                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4013                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4014                         break;
4015
4016                 udelay(5);
4017         }
4018
4019         if (j >= NVRAM_TIMEOUT_COUNT)
4020                 return -EBUSY;
4021
4022         return 0;
4023 }
4024
4025 static int
4026 bnx2_release_nvram_lock(struct bnx2 *bp)
4027 {
4028         int j;
4029         u32 val;
4030
4031         /* Relinquish nvram interface. */
4032         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4033
4034         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4037                         break;
4038
4039                 udelay(5);
4040         }
4041
4042         if (j >= NVRAM_TIMEOUT_COUNT)
4043                 return -EBUSY;
4044
4045         return 0;
4046 }
4047
4048
4049 static int
4050 bnx2_enable_nvram_write(struct bnx2 *bp)
4051 {
4052         u32 val;
4053
4054         val = REG_RD(bp, BNX2_MISC_CFG);
4055         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4056
4057         if (bp->flash_info->flags & BNX2_NV_WREN) {
4058                 int j;
4059
4060                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4061                 REG_WR(bp, BNX2_NVM_COMMAND,
4062                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4063
4064                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065                         udelay(5);
4066
4067                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4068                         if (val & BNX2_NVM_COMMAND_DONE)
4069                                 break;
4070                 }
4071
4072                 if (j >= NVRAM_TIMEOUT_COUNT)
4073                         return -EBUSY;
4074         }
4075         return 0;
4076 }
4077
4078 static void
4079 bnx2_disable_nvram_write(struct bnx2 *bp)
4080 {
4081         u32 val;
4082
4083         val = REG_RD(bp, BNX2_MISC_CFG);
4084         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4085 }
4086
4087
4088 static void
4089 bnx2_enable_nvram_access(struct bnx2 *bp)
4090 {
4091         u32 val;
4092
4093         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4094         /* Enable both bits, even on read. */
4095         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4096                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4097 }
4098
4099 static void
4100 bnx2_disable_nvram_access(struct bnx2 *bp)
4101 {
4102         u32 val;
4103
4104         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105         /* Disable both bits, even after read. */
4106         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4107                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4108                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4109 }
4110
4111 static int
4112 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4113 {
4114         u32 cmd;
4115         int j;
4116
4117         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4118                 /* Buffered flash, no erase needed */
4119                 return 0;
4120
4121         /* Build an erase command */
4122         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4123               BNX2_NVM_COMMAND_DOIT;
4124
4125         /* Need to clear DONE bit separately. */
4126         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4127
4128         /* Address of the NVRAM to read from. */
4129         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4130
4131         /* Issue an erase command. */
4132         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4133
4134         /* Wait for completion. */
4135         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4136                 u32 val;
4137
4138                 udelay(5);
4139
4140                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4141                 if (val & BNX2_NVM_COMMAND_DONE)
4142                         break;
4143         }
4144
4145         if (j >= NVRAM_TIMEOUT_COUNT)
4146                 return -EBUSY;
4147
4148         return 0;
4149 }
4150
4151 static int
4152 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4153 {
4154         u32 cmd;
4155         int j;
4156
4157         /* Build the command word. */
4158         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4159
4160         /* Calculate an offset of a buffered flash, not needed for 5709. */
4161         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4162                 offset = ((offset / bp->flash_info->page_size) <<
4163                            bp->flash_info->page_bits) +
4164                           (offset % bp->flash_info->page_size);
4165         }
4166
4167         /* Need to clear DONE bit separately. */
4168         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4169
4170         /* Address of the NVRAM to read from. */
4171         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4172
4173         /* Issue a read command. */
4174         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4175
4176         /* Wait for completion. */
4177         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4178                 u32 val;
4179
4180                 udelay(5);
4181
4182                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4183                 if (val & BNX2_NVM_COMMAND_DONE) {
4184                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4185                         memcpy(ret_val, &v, 4);
4186                         break;
4187                 }
4188         }
4189         if (j >= NVRAM_TIMEOUT_COUNT)
4190                 return -EBUSY;
4191
4192         return 0;
4193 }
4194
4195
4196 static int
4197 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4198 {
4199         u32 cmd;
4200         __be32 val32;
4201         int j;
4202
4203         /* Build the command word. */
4204         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4205
4206         /* Calculate an offset of a buffered flash, not needed for 5709. */
4207         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4208                 offset = ((offset / bp->flash_info->page_size) <<
4209                           bp->flash_info->page_bits) +
4210                          (offset % bp->flash_info->page_size);
4211         }
4212
4213         /* Need to clear DONE bit separately. */
4214         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215
4216         memcpy(&val32, val, 4);
4217
4218         /* Write the data. */
4219         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4220
4221         /* Address of the NVRAM to write to. */
4222         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4223
4224         /* Issue the write command. */
4225         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4226
4227         /* Wait for completion. */
4228         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4229                 udelay(5);
4230
4231                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4232                         break;
4233         }
4234         if (j >= NVRAM_TIMEOUT_COUNT)
4235                 return -EBUSY;
4236
4237         return 0;
4238 }
4239
4240 static int
4241 bnx2_init_nvram(struct bnx2 *bp)
4242 {
4243         u32 val;
4244         int j, entry_count, rc = 0;
4245         const struct flash_spec *flash;
4246
4247         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4248                 bp->flash_info = &flash_5709;
4249                 goto get_flash_size;
4250         }
4251
4252         /* Determine the selected interface. */
4253         val = REG_RD(bp, BNX2_NVM_CFG1);
4254
4255         entry_count = ARRAY_SIZE(flash_table);
4256
4257         if (val & 0x40000000) {
4258
4259                 /* Flash interface has been reconfigured */
4260                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4261                      j++, flash++) {
4262                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4263                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4264                                 bp->flash_info = flash;
4265                                 break;
4266                         }
4267                 }
4268         }
4269         else {
4270                 u32 mask;
4271                 /* Not yet been reconfigured */
4272
4273                 if (val & (1 << 23))
4274                         mask = FLASH_BACKUP_STRAP_MASK;
4275                 else
4276                         mask = FLASH_STRAP_MASK;
4277
4278                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4279                         j++, flash++) {
4280
4281                         if ((val & mask) == (flash->strapping & mask)) {
4282                                 bp->flash_info = flash;
4283
4284                                 /* Request access to the flash interface. */
4285                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4286                                         return rc;
4287
4288                                 /* Enable access to flash interface */
4289                                 bnx2_enable_nvram_access(bp);
4290
4291                                 /* Reconfigure the flash interface */
4292                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4293                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4294                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4295                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4296
4297                                 /* Disable access to flash interface */
4298                                 bnx2_disable_nvram_access(bp);
4299                                 bnx2_release_nvram_lock(bp);
4300
4301                                 break;
4302                         }
4303                 }
4304         } /* if (val & 0x40000000) */
4305
4306         if (j == entry_count) {
4307                 bp->flash_info = NULL;
4308                 pr_alert("Unknown flash/EEPROM type\n");
4309                 return -ENODEV;
4310         }
4311
4312 get_flash_size:
4313         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4314         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4315         if (val)
4316                 bp->flash_size = val;
4317         else
4318                 bp->flash_size = bp->flash_info->total_size;
4319
4320         return rc;
4321 }
4322
4323 static int
4324 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4325                 int buf_size)
4326 {
4327         int rc = 0;
4328         u32 cmd_flags, offset32, len32, extra;
4329
4330         if (buf_size == 0)
4331                 return 0;
4332
4333         /* Request access to the flash interface. */
4334         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4335                 return rc;
4336
4337         /* Enable access to flash interface */
4338         bnx2_enable_nvram_access(bp);
4339
4340         len32 = buf_size;
4341         offset32 = offset;
4342         extra = 0;
4343
4344         cmd_flags = 0;
4345
4346         if (offset32 & 3) {
4347                 u8 buf[4];
4348                 u32 pre_len;
4349
4350                 offset32 &= ~3;
4351                 pre_len = 4 - (offset & 3);
4352
4353                 if (pre_len >= len32) {
4354                         pre_len = len32;
4355                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4356                                     BNX2_NVM_COMMAND_LAST;
4357                 }
4358                 else {
4359                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4360                 }
4361
4362                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4363
4364                 if (rc)
4365                         return rc;
4366
4367                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4368
4369                 offset32 += 4;
4370                 ret_buf += pre_len;
4371                 len32 -= pre_len;
4372         }
4373         if (len32 & 3) {
4374                 extra = 4 - (len32 & 3);
4375                 len32 = (len32 + 4) & ~3;
4376         }
4377
4378         if (len32 == 4) {
4379                 u8 buf[4];
4380
4381                 if (cmd_flags)
4382                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4383                 else
4384                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4385                                     BNX2_NVM_COMMAND_LAST;
4386
4387                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4388
4389                 memcpy(ret_buf, buf, 4 - extra);
4390         }
4391         else if (len32 > 0) {
4392                 u8 buf[4];
4393
4394                 /* Read the first word. */
4395                 if (cmd_flags)
4396                         cmd_flags = 0;
4397                 else
4398                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4399
4400                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4401
4402                 /* Advance to the next dword. */
4403                 offset32 += 4;
4404                 ret_buf += 4;
4405                 len32 -= 4;
4406
4407                 while (len32 > 4 && rc == 0) {
4408                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4409
4410                         /* Advance to the next dword. */
4411                         offset32 += 4;
4412                         ret_buf += 4;
4413                         len32 -= 4;
4414                 }
4415
4416                 if (rc)
4417                         return rc;
4418
4419                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4420                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4421
4422                 memcpy(ret_buf, buf, 4 - extra);
4423         }
4424
4425         /* Disable access to flash interface */
4426         bnx2_disable_nvram_access(bp);
4427
4428         bnx2_release_nvram_lock(bp);
4429
4430         return rc;
4431 }
4432
4433 static int
4434 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4435                 int buf_size)
4436 {
4437         u32 written, offset32, len32;
4438         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4439         int rc = 0;
4440         int align_start, align_end;
4441
4442         buf = data_buf;
4443         offset32 = offset;
4444         len32 = buf_size;
4445         align_start = align_end = 0;
4446
4447         if ((align_start = (offset32 & 3))) {
4448                 offset32 &= ~3;
4449                 len32 += align_start;
4450                 if (len32 < 4)
4451                         len32 = 4;
4452                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4453                         return rc;
4454         }
4455
4456         if (len32 & 3) {
4457                 align_end = 4 - (len32 & 3);
4458                 len32 += align_end;
4459                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4460                         return rc;
4461         }
4462
4463         if (align_start || align_end) {
4464                 align_buf = kmalloc(len32, GFP_KERNEL);
4465                 if (align_buf == NULL)
4466                         return -ENOMEM;
4467                 if (align_start) {
4468                         memcpy(align_buf, start, 4);
4469                 }
4470                 if (align_end) {
4471                         memcpy(align_buf + len32 - 4, end, 4);
4472                 }
4473                 memcpy(align_buf + align_start, data_buf, buf_size);
4474                 buf = align_buf;
4475         }
4476
4477         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4478                 flash_buffer = kmalloc(264, GFP_KERNEL);
4479                 if (flash_buffer == NULL) {
4480                         rc = -ENOMEM;
4481                         goto nvram_write_end;
4482                 }
4483         }
4484
4485         written = 0;
4486         while ((written < len32) && (rc == 0)) {
4487                 u32 page_start, page_end, data_start, data_end;
4488                 u32 addr, cmd_flags;
4489                 int i;
4490
4491                 /* Find the page_start addr */
4492                 page_start = offset32 + written;
4493                 page_start -= (page_start % bp->flash_info->page_size);
4494                 /* Find the page_end addr */
4495                 page_end = page_start + bp->flash_info->page_size;
4496                 /* Find the data_start addr */
4497                 data_start = (written == 0) ? offset32 : page_start;
4498                 /* Find the data_end addr */
4499                 data_end = (page_end > offset32 + len32) ?
4500                         (offset32 + len32) : page_end;
4501
4502                 /* Request access to the flash interface. */
4503                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4504                         goto nvram_write_end;
4505
4506                 /* Enable access to flash interface */
4507                 bnx2_enable_nvram_access(bp);
4508
4509                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4510                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4511                         int j;
4512
4513                         /* Read the whole page into the buffer
4514                          * (non-buffer flash only) */
4515                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4516                                 if (j == (bp->flash_info->page_size - 4)) {
4517                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4518                                 }
4519                                 rc = bnx2_nvram_read_dword(bp,
4520                                         page_start + j,
4521                                         &flash_buffer[j],
4522                                         cmd_flags);
4523
4524                                 if (rc)
4525                                         goto nvram_write_end;
4526
4527                                 cmd_flags = 0;
4528                         }
4529                 }
4530
4531                 /* Enable writes to flash interface (unlock write-protect) */
4532                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4533                         goto nvram_write_end;
4534
4535                 /* Loop to write back the buffer data from page_start to
4536                  * data_start */
4537                 i = 0;
4538                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4539                         /* Erase the page */
4540                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4541                                 goto nvram_write_end;
4542
4543                         /* Re-enable the write again for the actual write */
4544                         bnx2_enable_nvram_write(bp);
4545
4546                         for (addr = page_start; addr < data_start;
4547                                 addr += 4, i += 4) {
4548
4549                                 rc = bnx2_nvram_write_dword(bp, addr,
4550                                         &flash_buffer[i], cmd_flags);
4551
4552                                 if (rc != 0)
4553                                         goto nvram_write_end;
4554
4555                                 cmd_flags = 0;
4556                         }
4557                 }
4558
4559                 /* Loop to write the new data from data_start to data_end */
4560                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4561                         if ((addr == page_end - 4) ||
4562                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4563                                  (addr == data_end - 4))) {
4564
4565                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4566                         }
4567                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4568                                 cmd_flags);
4569
4570                         if (rc != 0)
4571                                 goto nvram_write_end;
4572
4573                         cmd_flags = 0;
4574                         buf += 4;
4575                 }
4576
4577                 /* Loop to write back the buffer data from data_end
4578                  * to page_end */
4579                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4580                         for (addr = data_end; addr < page_end;
4581                                 addr += 4, i += 4) {
4582
4583                                 if (addr == page_end-4) {
4584                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4585                                 }
4586                                 rc = bnx2_nvram_write_dword(bp, addr,
4587                                         &flash_buffer[i], cmd_flags);
4588
4589                                 if (rc != 0)
4590                                         goto nvram_write_end;
4591
4592                                 cmd_flags = 0;
4593                         }
4594                 }
4595
4596                 /* Disable writes to flash interface (lock write-protect) */
4597                 bnx2_disable_nvram_write(bp);
4598
4599                 /* Disable access to flash interface */
4600                 bnx2_disable_nvram_access(bp);
4601                 bnx2_release_nvram_lock(bp);
4602
4603                 /* Increment written */
4604                 written += data_end - data_start;
4605         }
4606
4607 nvram_write_end:
4608         kfree(flash_buffer);
4609         kfree(align_buf);
4610         return rc;
4611 }
4612
4613 static void
4614 bnx2_init_fw_cap(struct bnx2 *bp)
4615 {
4616         u32 val, sig = 0;
4617
4618         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4619         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4620
4621         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4622                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4623
4624         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4625         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4626                 return;
4627
4628         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4629                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4630                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4631         }
4632
4633         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4634             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4635                 u32 link;
4636
4637                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4638
4639                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4640                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4641                         bp->phy_port = PORT_FIBRE;
4642                 else
4643                         bp->phy_port = PORT_TP;
4644
4645                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4646                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4647         }
4648
4649         if (netif_running(bp->dev) && sig)
4650                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4651 }
4652
4653 static void
4654 bnx2_setup_msix_tbl(struct bnx2 *bp)
4655 {
4656         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4657
4658         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4659         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4660 }
4661
4662 static int
4663 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4664 {
4665         u32 val;
4666         int i, rc = 0;
4667         u8 old_port;
4668
4669         /* Wait for the current PCI transaction to complete before
4670          * issuing a reset. */
4671         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4672                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4673                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4674                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4675                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4676         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4677         udelay(5);
4678
4679         /* Wait for the firmware to tell us it is ok to issue a reset. */
4680         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4681
4682         /* Deposit a driver reset signature so the firmware knows that
4683          * this is a soft reset. */
4684         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4685                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4686
4687         /* Do a dummy read to force the chip to complete all current transaction
4688          * before we issue a reset. */
4689         val = REG_RD(bp, BNX2_MISC_ID);
4690
4691         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4692                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4693                 REG_RD(bp, BNX2_MISC_COMMAND);
4694                 udelay(5);
4695
4696                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4697                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4698
4699                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4700
4701         } else {
4702                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4703                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4704                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4705
4706                 /* Chip reset. */
4707                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4708
4709                 /* Reading back any register after chip reset will hang the
4710                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4711                  * of margin for write posting.
4712                  */
4713                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4714                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4715                         msleep(20);
4716
4717                 /* Reset takes approximate 30 usec */
4718                 for (i = 0; i < 10; i++) {
4719                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4720                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4721                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4722                                 break;
4723                         udelay(10);
4724                 }
4725
4726                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4727                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4728                         pr_err("Chip reset did not complete\n");
4729                         return -EBUSY;
4730                 }
4731         }
4732
4733         /* Make sure byte swapping is properly configured. */
4734         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4735         if (val != 0x01020304) {
4736                 pr_err("Chip not in correct endian mode\n");
4737                 return -ENODEV;
4738         }
4739
4740         /* Wait for the firmware to finish its initialization. */
4741         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4742         if (rc)
4743                 return rc;
4744
4745         spin_lock_bh(&bp->phy_lock);
4746         old_port = bp->phy_port;
4747         bnx2_init_fw_cap(bp);
4748         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4749             old_port != bp->phy_port)
4750                 bnx2_set_default_remote_link(bp);
4751         spin_unlock_bh(&bp->phy_lock);
4752
4753         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4754                 /* Adjust the voltage regular to two steps lower.  The default
4755                  * of this register is 0x0000000e. */
4756                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4757
4758                 /* Remove bad rbuf memory from the free pool. */
4759                 rc = bnx2_alloc_bad_rbuf(bp);
4760         }
4761
4762         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4763                 bnx2_setup_msix_tbl(bp);
4764                 /* Prevent MSIX table reads and write from timing out */
4765                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4766                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4767         }
4768
4769         return rc;
4770 }
4771
4772 static int
4773 bnx2_init_chip(struct bnx2 *bp)
4774 {
4775         u32 val, mtu;
4776         int rc, i;
4777
4778         /* Make sure the interrupt is not active. */
4779         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4780
4781         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4782               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4783 #ifdef __BIG_ENDIAN
4784               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4785 #endif
4786               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4787               DMA_READ_CHANS << 12 |
4788               DMA_WRITE_CHANS << 16;
4789
4790         val |= (0x2 << 20) | (1 << 11);
4791
4792         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4793                 val |= (1 << 23);
4794
4795         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4796             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4797                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4798
4799         REG_WR(bp, BNX2_DMA_CONFIG, val);
4800
4801         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4802                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4803                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4804                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4805         }
4806
4807         if (bp->flags & BNX2_FLAG_PCIX) {
4808                 u16 val16;
4809
4810                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4811                                      &val16);
4812                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4813                                       val16 & ~PCI_X_CMD_ERO);
4814         }
4815
4816         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4817                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4818                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4819                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4820
4821         /* Initialize context mapping and zero out the quick contexts.  The
4822          * context block must have already been enabled. */
4823         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4824                 rc = bnx2_init_5709_context(bp);
4825                 if (rc)
4826                         return rc;
4827         } else
4828                 bnx2_init_context(bp);
4829
4830         if ((rc = bnx2_init_cpus(bp)) != 0)
4831                 return rc;
4832
4833         bnx2_init_nvram(bp);
4834
4835         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4836
4837         val = REG_RD(bp, BNX2_MQ_CONFIG);
4838         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4839         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4840         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4841                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4842                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4843                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4844         }
4845
4846         REG_WR(bp, BNX2_MQ_CONFIG, val);
4847
4848         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4849         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4850         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4851
4852         val = (BCM_PAGE_BITS - 8) << 24;
4853         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4854
4855         /* Configure page size. */
4856         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4857         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4858         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4859         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4860
4861         val = bp->mac_addr[0] +
4862               (bp->mac_addr[1] << 8) +
4863               (bp->mac_addr[2] << 16) +
4864               bp->mac_addr[3] +
4865               (bp->mac_addr[4] << 8) +
4866               (bp->mac_addr[5] << 16);
4867         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4868
4869         /* Program the MTU.  Also include 4 bytes for CRC32. */
4870         mtu = bp->dev->mtu;
4871         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4872         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4873                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4874         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4875
4876         if (mtu < 1500)
4877                 mtu = 1500;
4878
4879         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4880         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4881         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4882
4883         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4884         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4885                 bp->bnx2_napi[i].last_status_idx = 0;
4886
4887         bp->idle_chk_status_idx = 0xffff;
4888
4889         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4890
4891         /* Set up how to generate a link change interrupt. */
4892         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4893
4894         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4895                (u64) bp->status_blk_mapping & 0xffffffff);
4896         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4897
4898         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4899                (u64) bp->stats_blk_mapping & 0xffffffff);
4900         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4901                (u64) bp->stats_blk_mapping >> 32);
4902
4903         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4904                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4905
4906         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4907                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4908
4909         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4910                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4911
4912         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4913
4914         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4915
4916         REG_WR(bp, BNX2_HC_COM_TICKS,
4917                (bp->com_ticks_int << 16) | bp->com_ticks);
4918
4919         REG_WR(bp, BNX2_HC_CMD_TICKS,
4920                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4921
4922         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4923                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4924         else
4925                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4926         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4927
4928         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4929                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4930         else {
4931                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4932                       BNX2_HC_CONFIG_COLLECT_STATS;
4933         }
4934
4935         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4936                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4937                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4938
4939                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4940         }
4941
4942         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4943                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4944
4945         REG_WR(bp, BNX2_HC_CONFIG, val);
4946
4947         for (i = 1; i < bp->irq_nvecs; i++) {
4948                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4949                            BNX2_HC_SB_CONFIG_1;
4950
4951                 REG_WR(bp, base,
4952                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4953                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4954                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4955
4956                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4957                         (bp->tx_quick_cons_trip_int << 16) |
4958                          bp->tx_quick_cons_trip);
4959
4960                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4961                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4962
4963                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4964                        (bp->rx_quick_cons_trip_int << 16) |
4965                         bp->rx_quick_cons_trip);
4966
4967                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4968                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4969         }
4970
4971         /* Clear internal stats counters. */
4972         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4973
4974         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4975
4976         /* Initialize the receive filter. */
4977         bnx2_set_rx_mode(bp->dev);
4978
4979         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4980                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4981                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4982                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4983         }
4984         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4985                           1, 0);
4986
4987         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4988         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4989
4990         udelay(20);
4991
4992         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4993
4994         return rc;
4995 }
4996
4997 static void
4998 bnx2_clear_ring_states(struct bnx2 *bp)
4999 {
5000         struct bnx2_napi *bnapi;
5001         struct bnx2_tx_ring_info *txr;
5002         struct bnx2_rx_ring_info *rxr;
5003         int i;
5004
5005         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5006                 bnapi = &bp->bnx2_napi[i];
5007                 txr = &bnapi->tx_ring;
5008                 rxr = &bnapi->rx_ring;
5009
5010                 txr->tx_cons = 0;
5011                 txr->hw_tx_cons = 0;
5012                 rxr->rx_prod_bseq = 0;
5013                 rxr->rx_prod = 0;
5014                 rxr->rx_cons = 0;
5015                 rxr->rx_pg_prod = 0;
5016                 rxr->rx_pg_cons = 0;
5017         }
5018 }
5019
5020 static void
5021 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5022 {
5023         u32 val, offset0, offset1, offset2, offset3;
5024         u32 cid_addr = GET_CID_ADDR(cid);
5025
5026         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5027                 offset0 = BNX2_L2CTX_TYPE_XI;
5028                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5029                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5030                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5031         } else {
5032                 offset0 = BNX2_L2CTX_TYPE;
5033                 offset1 = BNX2_L2CTX_CMD_TYPE;
5034                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5035                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5036         }
5037         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5038         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5039
5040         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5041         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5042
5043         val = (u64) txr->tx_desc_mapping >> 32;
5044         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5045
5046         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5047         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5048 }
5049
5050 static void
5051 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5052 {
5053         struct tx_bd *txbd;
5054         u32 cid = TX_CID;
5055         struct bnx2_napi *bnapi;
5056         struct bnx2_tx_ring_info *txr;
5057
5058         bnapi = &bp->bnx2_napi[ring_num];
5059         txr = &bnapi->tx_ring;
5060
5061         if (ring_num == 0)
5062                 cid = TX_CID;
5063         else
5064                 cid = TX_TSS_CID + ring_num - 1;
5065
5066         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5067
5068         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5069
5070         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5071         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5072
5073         txr->tx_prod = 0;
5074         txr->tx_prod_bseq = 0;
5075
5076         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5077         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5078
5079         bnx2_init_tx_context(bp, cid, txr);
5080 }
5081
5082 static void
5083 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5084                      int num_rings)
5085 {
5086         int i;
5087         struct rx_bd *rxbd;
5088
5089         for (i = 0; i < num_rings; i++) {
5090                 int j;
5091
5092                 rxbd = &rx_ring[i][0];
5093                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5094                         rxbd->rx_bd_len = buf_size;
5095                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5096                 }
5097                 if (i == (num_rings - 1))
5098                         j = 0;
5099                 else
5100                         j = i + 1;
5101                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5102                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5103         }
5104 }
5105
5106 static void
5107 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5108 {
5109         int i;
5110         u16 prod, ring_prod;
5111         u32 cid, rx_cid_addr, val;
5112         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5113         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5114
5115         if (ring_num == 0)
5116                 cid = RX_CID;
5117         else
5118                 cid = RX_RSS_CID + ring_num - 1;
5119
5120         rx_cid_addr = GET_CID_ADDR(cid);
5121
5122         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5123                              bp->rx_buf_use_size, bp->rx_max_ring);
5124
5125         bnx2_init_rx_context(bp, cid);
5126
5127         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5128                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5129                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5130         }
5131
5132         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5133         if (bp->rx_pg_ring_size) {
5134                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5135                                      rxr->rx_pg_desc_mapping,
5136                                      PAGE_SIZE, bp->rx_max_pg_ring);
5137                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5138                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5140                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5141
5142                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5143                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5144
5145                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5146                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5147
5148                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5149                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5150         }
5151
5152         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5153         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5154
5155         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5156         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5157
5158         ring_prod = prod = rxr->rx_pg_prod;
5159         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5160                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5161                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5162                                     ring_num, i, bp->rx_pg_ring_size);
5163                         break;
5164                 }
5165                 prod = NEXT_RX_BD(prod);
5166                 ring_prod = RX_PG_RING_IDX(prod);
5167         }
5168         rxr->rx_pg_prod = prod;
5169
5170         ring_prod = prod = rxr->rx_prod;
5171         for (i = 0; i < bp->rx_ring_size; i++) {
5172                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5173                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5174                                     ring_num, i, bp->rx_ring_size);
5175                         break;
5176                 }
5177                 prod = NEXT_RX_BD(prod);
5178                 ring_prod = RX_RING_IDX(prod);
5179         }
5180         rxr->rx_prod = prod;
5181
5182         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5183         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5184         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5185
5186         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5187         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5188
5189         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5190 }
5191
5192 static void
5193 bnx2_init_all_rings(struct bnx2 *bp)
5194 {
5195         int i;
5196         u32 val;
5197
5198         bnx2_clear_ring_states(bp);
5199
5200         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5201         for (i = 0; i < bp->num_tx_rings; i++)
5202                 bnx2_init_tx_ring(bp, i);
5203
5204         if (bp->num_tx_rings > 1)
5205                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5206                        (TX_TSS_CID << 7));
5207
5208         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5209         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5210
5211         for (i = 0; i < bp->num_rx_rings; i++)
5212                 bnx2_init_rx_ring(bp, i);
5213
5214         if (bp->num_rx_rings > 1) {
5215                 u32 tbl_32;
5216                 u8 *tbl = (u8 *) &tbl_32;
5217
5218                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5219                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5220
5221                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5222                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5223                         if ((i % 4) == 3)
5224                                 bnx2_reg_wr_ind(bp,
5225                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5226                                                 cpu_to_be32(tbl_32));
5227                 }
5228
5229                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5230                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5231
5232                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5233
5234         }
5235 }
5236
5237 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5238 {
5239         u32 max, num_rings = 1;
5240
5241         while (ring_size > MAX_RX_DESC_CNT) {
5242                 ring_size -= MAX_RX_DESC_CNT;
5243                 num_rings++;
5244         }
5245         /* round to next power of 2 */
5246         max = max_size;
5247         while ((max & num_rings) == 0)
5248                 max >>= 1;
5249
5250         if (num_rings != max)
5251                 max <<= 1;
5252
5253         return max;
5254 }
5255
5256 static void
5257 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5258 {
5259         u32 rx_size, rx_space, jumbo_size;
5260
5261         /* 8 for CRC and VLAN */
5262         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5263
5264         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5265                 sizeof(struct skb_shared_info);
5266
5267         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5268         bp->rx_pg_ring_size = 0;
5269         bp->rx_max_pg_ring = 0;
5270         bp->rx_max_pg_ring_idx = 0;
5271         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5272                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5273
5274                 jumbo_size = size * pages;
5275                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5276                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5277
5278                 bp->rx_pg_ring_size = jumbo_size;
5279                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5280                                                         MAX_RX_PG_RINGS);
5281                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5282                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5283                 bp->rx_copy_thresh = 0;
5284         }
5285
5286         bp->rx_buf_use_size = rx_size;
5287         /* hw alignment */
5288         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5289         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5290         bp->rx_ring_size = size;
5291         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5292         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5293 }
5294
5295 static void
5296 bnx2_free_tx_skbs(struct bnx2 *bp)
5297 {
5298         int i;
5299
5300         for (i = 0; i < bp->num_tx_rings; i++) {
5301                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5302                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5303                 int j;
5304
5305                 if (txr->tx_buf_ring == NULL)
5306                         continue;
5307
5308                 for (j = 0; j < TX_DESC_CNT; ) {
5309                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5310                         struct sk_buff *skb = tx_buf->skb;
5311                         int k, last;
5312
5313                         if (skb == NULL) {
5314                                 j++;
5315                                 continue;
5316                         }
5317
5318                         pci_unmap_single(bp->pdev,
5319                                          pci_unmap_addr(tx_buf, mapping),
5320                                          skb_headlen(skb),
5321                                          PCI_DMA_TODEVICE);
5322
5323                         tx_buf->skb = NULL;
5324
5325                         last = tx_buf->nr_frags;
5326                         j++;
5327                         for (k = 0; k < last; k++, j++) {
5328                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5329                                 pci_unmap_page(bp->pdev,
5330                                         pci_unmap_addr(tx_buf, mapping),
5331                                         skb_shinfo(skb)->frags[k].size,
5332                                         PCI_DMA_TODEVICE);
5333                         }
5334                         dev_kfree_skb(skb);
5335                 }
5336         }
5337 }
5338
5339 static void
5340 bnx2_free_rx_skbs(struct bnx2 *bp)
5341 {
5342         int i;
5343
5344         for (i = 0; i < bp->num_rx_rings; i++) {
5345                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5346                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5347                 int j;
5348
5349                 if (rxr->rx_buf_ring == NULL)
5350                         return;
5351
5352                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5353                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5354                         struct sk_buff *skb = rx_buf->skb;
5355
5356                         if (skb == NULL)
5357                                 continue;
5358
5359                         pci_unmap_single(bp->pdev,
5360                                          pci_unmap_addr(rx_buf, mapping),
5361                                          bp->rx_buf_use_size,
5362                                          PCI_DMA_FROMDEVICE);
5363
5364                         rx_buf->skb = NULL;
5365
5366                         dev_kfree_skb(skb);
5367                 }
5368                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5369                         bnx2_free_rx_page(bp, rxr, j);
5370         }
5371 }
5372
5373 static void
5374 bnx2_free_skbs(struct bnx2 *bp)
5375 {
5376         bnx2_free_tx_skbs(bp);
5377         bnx2_free_rx_skbs(bp);
5378 }
5379
5380 static int
5381 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5382 {
5383         int rc;
5384
5385         rc = bnx2_reset_chip(bp, reset_code);
5386         bnx2_free_skbs(bp);
5387         if (rc)
5388                 return rc;
5389
5390         if ((rc = bnx2_init_chip(bp)) != 0)
5391                 return rc;
5392
5393         bnx2_init_all_rings(bp);
5394         return 0;
5395 }
5396
5397 static int
5398 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5399 {
5400         int rc;
5401
5402         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5403                 return rc;
5404
5405         spin_lock_bh(&bp->phy_lock);
5406         bnx2_init_phy(bp, reset_phy);
5407         bnx2_set_link(bp);
5408         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5409                 bnx2_remote_phy_event(bp);
5410         spin_unlock_bh(&bp->phy_lock);
5411         return 0;
5412 }
5413
5414 static int
5415 bnx2_shutdown_chip(struct bnx2 *bp)
5416 {
5417         u32 reset_code;
5418
5419         if (bp->flags & BNX2_FLAG_NO_WOL)
5420                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5421         else if (bp->wol)
5422                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5423         else
5424                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5425
5426         return bnx2_reset_chip(bp, reset_code);
5427 }
5428
5429 static int
5430 bnx2_test_registers(struct bnx2 *bp)
5431 {
5432         int ret;
5433         int i, is_5709;
5434         static const struct {
5435                 u16   offset;
5436                 u16   flags;
5437 #define BNX2_FL_NOT_5709        1
5438                 u32   rw_mask;
5439                 u32   ro_mask;
5440         } reg_tbl[] = {
5441                 { 0x006c, 0, 0x00000000, 0x0000003f },
5442                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5443                 { 0x0094, 0, 0x00000000, 0x00000000 },
5444
5445                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5446                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5447                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5449                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5450                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5451                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5452                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5454
5455                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5456                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5457                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5458                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5459                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5460                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5461
5462                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5463                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5464                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5465
5466                 { 0x1000, 0, 0x00000000, 0x00000001 },
5467                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5468
5469                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5470                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5471                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5472                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5473                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5474                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5475                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5476                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5477                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5478                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5479
5480                 { 0x1800, 0, 0x00000000, 0x00000001 },
5481                 { 0x1804, 0, 0x00000000, 0x00000003 },
5482
5483                 { 0x2800, 0, 0x00000000, 0x00000001 },
5484                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5485                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5486                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5487                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5488                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5489                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5490                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5491                 { 0x2840, 0, 0x00000000, 0xffffffff },
5492                 { 0x2844, 0, 0x00000000, 0xffffffff },
5493                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5494                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5495
5496                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5497                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5498
5499                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5500                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5501                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5502                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5503                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5504                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5505                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5506                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5507                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5508
5509                 { 0x5004, 0, 0x00000000, 0x0000007f },
5510                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5511
5512                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5513                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5514                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5515                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5516                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5517                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5518                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5519                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5520                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5521
5522                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5523                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5524                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5525                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5526                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5527                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5528                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5529                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5530                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5531                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5532                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5533                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5534                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5535                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5536                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5537                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5538                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5539                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5540                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5541                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5542                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5543                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5544                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5545
5546                 { 0xffff, 0, 0x00000000, 0x00000000 },
5547         };
5548
5549         ret = 0;
5550         is_5709 = 0;
5551         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5552                 is_5709 = 1;
5553
5554         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5555                 u32 offset, rw_mask, ro_mask, save_val, val;
5556                 u16 flags = reg_tbl[i].flags;
5557
5558                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5559                         continue;
5560
5561                 offset = (u32) reg_tbl[i].offset;
5562                 rw_mask = reg_tbl[i].rw_mask;
5563                 ro_mask = reg_tbl[i].ro_mask;
5564
5565                 save_val = readl(bp->regview + offset);
5566
5567                 writel(0, bp->regview + offset);
5568
5569                 val = readl(bp->regview + offset);
5570                 if ((val & rw_mask) != 0) {
5571                         goto reg_test_err;
5572                 }
5573
5574                 if ((val & ro_mask) != (save_val & ro_mask)) {
5575                         goto reg_test_err;
5576                 }
5577
5578                 writel(0xffffffff, bp->regview + offset);
5579
5580                 val = readl(bp->regview + offset);
5581                 if ((val & rw_mask) != rw_mask) {
5582                         goto reg_test_err;
5583                 }
5584
5585                 if ((val & ro_mask) != (save_val & ro_mask)) {
5586                         goto reg_test_err;
5587                 }
5588
5589                 writel(save_val, bp->regview + offset);
5590                 continue;
5591
5592 reg_test_err:
5593                 writel(save_val, bp->regview + offset);
5594                 ret = -ENODEV;
5595                 break;
5596         }
5597         return ret;
5598 }
5599
5600 static int
5601 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5602 {
5603         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5604                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5605         int i;
5606
5607         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5608                 u32 offset;
5609
5610                 for (offset = 0; offset < size; offset += 4) {
5611
5612                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5613
5614                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5615                                 test_pattern[i]) {
5616                                 return -ENODEV;
5617                         }
5618                 }
5619         }
5620         return 0;
5621 }
5622
5623 static int
5624 bnx2_test_memory(struct bnx2 *bp)
5625 {
5626         int ret = 0;
5627         int i;
5628         static struct mem_entry {
5629                 u32   offset;
5630                 u32   len;
5631         } mem_tbl_5706[] = {
5632                 { 0x60000,  0x4000 },
5633                 { 0xa0000,  0x3000 },
5634                 { 0xe0000,  0x4000 },
5635                 { 0x120000, 0x4000 },
5636                 { 0x1a0000, 0x4000 },
5637                 { 0x160000, 0x4000 },
5638                 { 0xffffffff, 0    },
5639         },
5640         mem_tbl_5709[] = {
5641                 { 0x60000,  0x4000 },
5642                 { 0xa0000,  0x3000 },
5643                 { 0xe0000,  0x4000 },
5644                 { 0x120000, 0x4000 },
5645                 { 0x1a0000, 0x4000 },
5646                 { 0xffffffff, 0    },
5647         };
5648         struct mem_entry *mem_tbl;
5649
5650         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5651                 mem_tbl = mem_tbl_5709;
5652         else
5653                 mem_tbl = mem_tbl_5706;
5654
5655         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5656                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5657                         mem_tbl[i].len)) != 0) {
5658                         return ret;
5659                 }
5660         }
5661
5662         return ret;
5663 }
5664
5665 #define BNX2_MAC_LOOPBACK       0
5666 #define BNX2_PHY_LOOPBACK       1
5667
5668 static int
5669 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5670 {
5671         unsigned int pkt_size, num_pkts, i;
5672         struct sk_buff *skb, *rx_skb;
5673         unsigned char *packet;
5674         u16 rx_start_idx, rx_idx;
5675         dma_addr_t map;
5676         struct tx_bd *txbd;
5677         struct sw_bd *rx_buf;
5678         struct l2_fhdr *rx_hdr;
5679         int ret = -ENODEV;
5680         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5681         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5682         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5683
5684         tx_napi = bnapi;
5685
5686         txr = &tx_napi->tx_ring;
5687         rxr = &bnapi->rx_ring;
5688         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5689                 bp->loopback = MAC_LOOPBACK;
5690                 bnx2_set_mac_loopback(bp);
5691         }
5692         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5693                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5694                         return 0;
5695
5696                 bp->loopback = PHY_LOOPBACK;
5697                 bnx2_set_phy_loopback(bp);
5698         }
5699         else
5700                 return -EINVAL;
5701
5702         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5703         skb = netdev_alloc_skb(bp->dev, pkt_size);
5704         if (!skb)
5705                 return -ENOMEM;
5706         packet = skb_put(skb, pkt_size);
5707         memcpy(packet, bp->dev->dev_addr, 6);
5708         memset(packet + 6, 0x0, 8);
5709         for (i = 14; i < pkt_size; i++)
5710                 packet[i] = (unsigned char) (i & 0xff);
5711
5712         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5713                 PCI_DMA_TODEVICE);
5714         if (pci_dma_mapping_error(bp->pdev, map)) {
5715                 dev_kfree_skb(skb);
5716                 return -EIO;
5717         }
5718
5719         REG_WR(bp, BNX2_HC_COMMAND,
5720                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5721
5722         REG_RD(bp, BNX2_HC_COMMAND);
5723
5724         udelay(5);
5725         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5726
5727         num_pkts = 0;
5728
5729         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5730
5731         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5732         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5733         txbd->tx_bd_mss_nbytes = pkt_size;
5734         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5735
5736         num_pkts++;
5737         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5738         txr->tx_prod_bseq += pkt_size;
5739
5740         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5741         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5742
5743         udelay(100);
5744
5745         REG_WR(bp, BNX2_HC_COMMAND,
5746                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5747
5748         REG_RD(bp, BNX2_HC_COMMAND);
5749
5750         udelay(5);
5751
5752         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5753         dev_kfree_skb(skb);
5754
5755         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5756                 goto loopback_test_done;
5757
5758         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5759         if (rx_idx != rx_start_idx + num_pkts) {
5760                 goto loopback_test_done;
5761         }
5762
5763         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5764         rx_skb = rx_buf->skb;
5765
5766         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5767         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5768
5769         pci_dma_sync_single_for_cpu(bp->pdev,
5770                 pci_unmap_addr(rx_buf, mapping),
5771                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5772
5773         if (rx_hdr->l2_fhdr_status &
5774                 (L2_FHDR_ERRORS_BAD_CRC |
5775                 L2_FHDR_ERRORS_PHY_DECODE |
5776                 L2_FHDR_ERRORS_ALIGNMENT |
5777                 L2_FHDR_ERRORS_TOO_SHORT |
5778                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5779
5780                 goto loopback_test_done;
5781         }
5782
5783         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5784                 goto loopback_test_done;
5785         }
5786
5787         for (i = 14; i < pkt_size; i++) {
5788                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5789                         goto loopback_test_done;
5790                 }
5791         }
5792
5793         ret = 0;
5794
5795 loopback_test_done:
5796         bp->loopback = 0;
5797         return ret;
5798 }
5799
5800 #define BNX2_MAC_LOOPBACK_FAILED        1
5801 #define BNX2_PHY_LOOPBACK_FAILED        2
5802 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5803                                          BNX2_PHY_LOOPBACK_FAILED)
5804
5805 static int
5806 bnx2_test_loopback(struct bnx2 *bp)
5807 {
5808         int rc = 0;
5809
5810         if (!netif_running(bp->dev))
5811                 return BNX2_LOOPBACK_FAILED;
5812
5813         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5814         spin_lock_bh(&bp->phy_lock);
5815         bnx2_init_phy(bp, 1);
5816         spin_unlock_bh(&bp->phy_lock);
5817         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5818                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5819         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5820                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5821         return rc;
5822 }
5823
5824 #define NVRAM_SIZE 0x200
5825 #define CRC32_RESIDUAL 0xdebb20e3
5826
5827 static int
5828 bnx2_test_nvram(struct bnx2 *bp)
5829 {
5830         __be32 buf[NVRAM_SIZE / 4];
5831         u8 *data = (u8 *) buf;
5832         int rc = 0;
5833         u32 magic, csum;
5834
5835         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5836                 goto test_nvram_done;
5837
5838         magic = be32_to_cpu(buf[0]);
5839         if (magic != 0x669955aa) {
5840                 rc = -ENODEV;
5841                 goto test_nvram_done;
5842         }
5843
5844         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5845                 goto test_nvram_done;
5846
5847         csum = ether_crc_le(0x100, data);
5848         if (csum != CRC32_RESIDUAL) {
5849                 rc = -ENODEV;
5850                 goto test_nvram_done;
5851         }
5852
5853         csum = ether_crc_le(0x100, data + 0x100);
5854         if (csum != CRC32_RESIDUAL) {
5855                 rc = -ENODEV;
5856         }
5857
5858 test_nvram_done:
5859         return rc;
5860 }
5861
5862 static int
5863 bnx2_test_link(struct bnx2 *bp)
5864 {
5865         u32 bmsr;
5866
5867         if (!netif_running(bp->dev))
5868                 return -ENODEV;
5869
5870         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5871                 if (bp->link_up)
5872                         return 0;
5873                 return -ENODEV;
5874         }
5875         spin_lock_bh(&bp->phy_lock);
5876         bnx2_enable_bmsr1(bp);
5877         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5878         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5879         bnx2_disable_bmsr1(bp);
5880         spin_unlock_bh(&bp->phy_lock);
5881
5882         if (bmsr & BMSR_LSTATUS) {
5883                 return 0;
5884         }
5885         return -ENODEV;
5886 }
5887
5888 static int
5889 bnx2_test_intr(struct bnx2 *bp)
5890 {
5891         int i;
5892         u16 status_idx;
5893
5894         if (!netif_running(bp->dev))
5895                 return -ENODEV;
5896
5897         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5898
5899         /* This register is not touched during run-time. */
5900         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5901         REG_RD(bp, BNX2_HC_COMMAND);
5902
5903         for (i = 0; i < 10; i++) {
5904                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5905                         status_idx) {
5906
5907                         break;
5908                 }
5909
5910                 msleep_interruptible(10);
5911         }
5912         if (i < 10)
5913                 return 0;
5914
5915         return -ENODEV;
5916 }
5917
5918 /* Determining link for parallel detection. */
5919 static int
5920 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5921 {
5922         u32 mode_ctl, an_dbg, exp;
5923
5924         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5925                 return 0;
5926
5927         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5928         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5929
5930         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5931                 return 0;
5932
5933         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5934         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5935         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5936
5937         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5938                 return 0;
5939
5940         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5941         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5942         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5943
5944         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5945                 return 0;
5946
5947         return 1;
5948 }
5949
5950 static void
5951 bnx2_5706_serdes_timer(struct bnx2 *bp)
5952 {
5953         int check_link = 1;
5954
5955         spin_lock(&bp->phy_lock);
5956         if (bp->serdes_an_pending) {
5957                 bp->serdes_an_pending--;
5958                 check_link = 0;
5959         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5960                 u32 bmcr;
5961
5962                 bp->current_interval = BNX2_TIMER_INTERVAL;
5963
5964                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5965
5966                 if (bmcr & BMCR_ANENABLE) {
5967                         if (bnx2_5706_serdes_has_link(bp)) {
5968                                 bmcr &= ~BMCR_ANENABLE;
5969                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5970                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5971                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5972                         }
5973                 }
5974         }
5975         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5976                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5977                 u32 phy2;
5978
5979                 bnx2_write_phy(bp, 0x17, 0x0f01);
5980                 bnx2_read_phy(bp, 0x15, &phy2);
5981                 if (phy2 & 0x20) {
5982                         u32 bmcr;
5983
5984                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5985                         bmcr |= BMCR_ANENABLE;
5986                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5987
5988                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5989                 }
5990         } else
5991                 bp->current_interval = BNX2_TIMER_INTERVAL;
5992
5993         if (check_link) {
5994                 u32 val;
5995
5996                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5997                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5998                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5999
6000                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6001                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6002                                 bnx2_5706s_force_link_dn(bp, 1);
6003                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6004                         } else
6005                                 bnx2_set_link(bp);
6006                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6007                         bnx2_set_link(bp);
6008         }
6009         spin_unlock(&bp->phy_lock);
6010 }
6011
6012 static void
6013 bnx2_5708_serdes_timer(struct bnx2 *bp)
6014 {
6015         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6016                 return;
6017
6018         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6019                 bp->serdes_an_pending = 0;
6020                 return;
6021         }
6022
6023         spin_lock(&bp->phy_lock);
6024         if (bp->serdes_an_pending)
6025                 bp->serdes_an_pending--;
6026         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6027                 u32 bmcr;
6028
6029                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6030                 if (bmcr & BMCR_ANENABLE) {
6031                         bnx2_enable_forced_2g5(bp);
6032                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6033                 } else {
6034                         bnx2_disable_forced_2g5(bp);
6035                         bp->serdes_an_pending = 2;
6036                         bp->current_interval = BNX2_TIMER_INTERVAL;
6037                 }
6038
6039         } else
6040                 bp->current_interval = BNX2_TIMER_INTERVAL;
6041
6042         spin_unlock(&bp->phy_lock);
6043 }
6044
6045 static void
6046 bnx2_timer(unsigned long data)
6047 {
6048         struct bnx2 *bp = (struct bnx2 *) data;
6049
6050         if (!netif_running(bp->dev))
6051                 return;
6052
6053         if (atomic_read(&bp->intr_sem) != 0)
6054                 goto bnx2_restart_timer;
6055
6056         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6057              BNX2_FLAG_USING_MSI)
6058                 bnx2_chk_missed_msi(bp);
6059
6060         bnx2_send_heart_beat(bp);
6061
6062         bp->stats_blk->stat_FwRxDrop =
6063                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6064
6065         /* workaround occasional corrupted counters */
6066         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6067                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6068                                             BNX2_HC_COMMAND_STATS_NOW);
6069
6070         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6071                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6072                         bnx2_5706_serdes_timer(bp);
6073                 else
6074                         bnx2_5708_serdes_timer(bp);
6075         }
6076
6077 bnx2_restart_timer:
6078         mod_timer(&bp->timer, jiffies + bp->current_interval);
6079 }
6080
6081 static int
6082 bnx2_request_irq(struct bnx2 *bp)
6083 {
6084         unsigned long flags;
6085         struct bnx2_irq *irq;
6086         int rc = 0, i;
6087
6088         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6089                 flags = 0;
6090         else
6091                 flags = IRQF_SHARED;
6092
6093         for (i = 0; i < bp->irq_nvecs; i++) {
6094                 irq = &bp->irq_tbl[i];
6095                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6096                                  &bp->bnx2_napi[i]);
6097                 if (rc)
6098                         break;
6099                 irq->requested = 1;
6100         }
6101         return rc;
6102 }
6103
6104 static void
6105 bnx2_free_irq(struct bnx2 *bp)
6106 {
6107         struct bnx2_irq *irq;
6108         int i;
6109
6110         for (i = 0; i < bp->irq_nvecs; i++) {
6111                 irq = &bp->irq_tbl[i];
6112                 if (irq->requested)
6113                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6114                 irq->requested = 0;
6115         }
6116         if (bp->flags & BNX2_FLAG_USING_MSI)
6117                 pci_disable_msi(bp->pdev);
6118         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6119                 pci_disable_msix(bp->pdev);
6120
6121         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6122 }
6123
6124 static void
6125 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6126 {
6127         int i, rc;
6128         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6129         struct net_device *dev = bp->dev;
6130         const int len = sizeof(bp->irq_tbl[0].name);
6131
6132         bnx2_setup_msix_tbl(bp);
6133         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6134         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6135         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6136
6137         /*  Need to flush the previous three writes to ensure MSI-X
6138          *  is setup properly */
6139         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6140
6141         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6142                 msix_ent[i].entry = i;
6143                 msix_ent[i].vector = 0;
6144         }
6145
6146         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6147         if (rc != 0)
6148                 return;
6149
6150         bp->irq_nvecs = msix_vecs;
6151         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6152         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6153                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6154                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6155                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6156         }
6157 }
6158
6159 static void
6160 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6161 {
6162         int cpus = num_online_cpus();
6163         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6164
6165         bp->irq_tbl[0].handler = bnx2_interrupt;
6166         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6167         bp->irq_nvecs = 1;
6168         bp->irq_tbl[0].vector = bp->pdev->irq;
6169
6170         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6171                 bnx2_enable_msix(bp, msix_vecs);
6172
6173         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6174             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6175                 if (pci_enable_msi(bp->pdev) == 0) {
6176                         bp->flags |= BNX2_FLAG_USING_MSI;
6177                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6178                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6179                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6180                         } else
6181                                 bp->irq_tbl[0].handler = bnx2_msi;
6182
6183                         bp->irq_tbl[0].vector = bp->pdev->irq;
6184                 }
6185         }
6186
6187         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6188         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6189
6190         bp->num_rx_rings = bp->irq_nvecs;
6191 }
6192
6193 /* Called with rtnl_lock */
6194 static int
6195 bnx2_open(struct net_device *dev)
6196 {
6197         struct bnx2 *bp = netdev_priv(dev);
6198         int rc;
6199
6200         netif_carrier_off(dev);
6201
6202         bnx2_set_power_state(bp, PCI_D0);
6203         bnx2_disable_int(bp);
6204
6205         bnx2_setup_int_mode(bp, disable_msi);
6206         bnx2_init_napi(bp);
6207         bnx2_napi_enable(bp);
6208         rc = bnx2_alloc_mem(bp);
6209         if (rc)
6210                 goto open_err;
6211
6212         rc = bnx2_request_irq(bp);
6213         if (rc)
6214                 goto open_err;
6215
6216         rc = bnx2_init_nic(bp, 1);
6217         if (rc)
6218                 goto open_err;
6219
6220         mod_timer(&bp->timer, jiffies + bp->current_interval);
6221
6222         atomic_set(&bp->intr_sem, 0);
6223
6224         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6225
6226         bnx2_enable_int(bp);
6227
6228         if (bp->flags & BNX2_FLAG_USING_MSI) {
6229                 /* Test MSI to make sure it is working
6230                  * If MSI test fails, go back to INTx mode
6231                  */
6232                 if (bnx2_test_intr(bp) != 0) {
6233                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6234
6235                         bnx2_disable_int(bp);
6236                         bnx2_free_irq(bp);
6237
6238                         bnx2_setup_int_mode(bp, 1);
6239
6240                         rc = bnx2_init_nic(bp, 0);
6241
6242                         if (!rc)
6243                                 rc = bnx2_request_irq(bp);
6244
6245                         if (rc) {
6246                                 del_timer_sync(&bp->timer);
6247                                 goto open_err;
6248                         }
6249                         bnx2_enable_int(bp);
6250                 }
6251         }
6252         if (bp->flags & BNX2_FLAG_USING_MSI)
6253                 netdev_info(dev, "using MSI\n");
6254         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6255                 netdev_info(dev, "using MSIX\n");
6256
6257         netif_tx_start_all_queues(dev);
6258
6259         return 0;
6260
6261 open_err:
6262         bnx2_napi_disable(bp);
6263         bnx2_free_skbs(bp);
6264         bnx2_free_irq(bp);
6265         bnx2_free_mem(bp);
6266         return rc;
6267 }
6268
6269 static void
6270 bnx2_reset_task(struct work_struct *work)
6271 {
6272         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6273
6274         rtnl_lock();
6275         if (!netif_running(bp->dev)) {
6276                 rtnl_unlock();
6277                 return;
6278         }
6279
6280         bnx2_netif_stop(bp);
6281
6282         bnx2_init_nic(bp, 1);
6283
6284         atomic_set(&bp->intr_sem, 1);
6285         bnx2_netif_start(bp);
6286         rtnl_unlock();
6287 }
6288
6289 static void
6290 bnx2_dump_state(struct bnx2 *bp)
6291 {
6292         struct net_device *dev = bp->dev;
6293
6294         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6295         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6296                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6297                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6298         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6299                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6300                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6301         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6302                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6303         if (bp->flags & BNX2_FLAG_USING_MSIX)
6304                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6305                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6306 }
6307
6308 static void
6309 bnx2_tx_timeout(struct net_device *dev)
6310 {
6311         struct bnx2 *bp = netdev_priv(dev);
6312
6313         bnx2_dump_state(bp);
6314
6315         /* This allows the netif to be shutdown gracefully before resetting */
6316         schedule_work(&bp->reset_task);
6317 }
6318
6319 #ifdef BCM_VLAN
6320 /* Called with rtnl_lock */
6321 static void
6322 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6323 {
6324         struct bnx2 *bp = netdev_priv(dev);
6325
6326         if (netif_running(dev))
6327                 bnx2_netif_stop(bp);
6328
6329         bp->vlgrp = vlgrp;
6330
6331         if (!netif_running(dev))
6332                 return;
6333
6334         bnx2_set_rx_mode(dev);
6335         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6336                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6337
6338         bnx2_netif_start(bp);
6339 }
6340 #endif
6341
6342 /* Called with netif_tx_lock.
6343  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6344  * netif_wake_queue().
6345  */
6346 static netdev_tx_t
6347 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6348 {
6349         struct bnx2 *bp = netdev_priv(dev);
6350         dma_addr_t mapping;
6351         struct tx_bd *txbd;
6352         struct sw_tx_bd *tx_buf;
6353         u32 len, vlan_tag_flags, last_frag, mss;
6354         u16 prod, ring_prod;
6355         int i;
6356         struct bnx2_napi *bnapi;
6357         struct bnx2_tx_ring_info *txr;
6358         struct netdev_queue *txq;
6359
6360         /*  Determine which tx ring we will be placed on */
6361         i = skb_get_queue_mapping(skb);
6362         bnapi = &bp->bnx2_napi[i];
6363         txr = &bnapi->tx_ring;
6364         txq = netdev_get_tx_queue(dev, i);
6365
6366         if (unlikely(bnx2_tx_avail(bp, txr) <
6367             (skb_shinfo(skb)->nr_frags + 1))) {
6368                 netif_tx_stop_queue(txq);
6369                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6370
6371                 return NETDEV_TX_BUSY;
6372         }
6373         len = skb_headlen(skb);
6374         prod = txr->tx_prod;
6375         ring_prod = TX_RING_IDX(prod);
6376
6377         vlan_tag_flags = 0;
6378         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6379                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6380         }
6381
6382 #ifdef BCM_VLAN
6383         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6384                 vlan_tag_flags |=
6385                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6386         }
6387 #endif
6388         if ((mss = skb_shinfo(skb)->gso_size)) {
6389                 u32 tcp_opt_len;
6390                 struct iphdr *iph;
6391
6392                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6393
6394                 tcp_opt_len = tcp_optlen(skb);
6395
6396                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6397                         u32 tcp_off = skb_transport_offset(skb) -
6398                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6399
6400                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6401                                           TX_BD_FLAGS_SW_FLAGS;
6402                         if (likely(tcp_off == 0))
6403                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6404                         else {
6405                                 tcp_off >>= 3;
6406                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6407                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6408                                                   ((tcp_off & 0x10) <<
6409                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6410                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6411                         }
6412                 } else {
6413                         iph = ip_hdr(skb);
6414                         if (tcp_opt_len || (iph->ihl > 5)) {
6415                                 vlan_tag_flags |= ((iph->ihl - 5) +
6416                                                    (tcp_opt_len >> 2)) << 8;
6417                         }
6418                 }
6419         } else
6420                 mss = 0;
6421
6422         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6423         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6424                 dev_kfree_skb(skb);
6425                 return NETDEV_TX_OK;
6426         }
6427
6428         tx_buf = &txr->tx_buf_ring[ring_prod];
6429         tx_buf->skb = skb;
6430         pci_unmap_addr_set(tx_buf, mapping, mapping);
6431
6432         txbd = &txr->tx_desc_ring[ring_prod];
6433
6434         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6435         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6436         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6437         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6438
6439         last_frag = skb_shinfo(skb)->nr_frags;
6440         tx_buf->nr_frags = last_frag;
6441         tx_buf->is_gso = skb_is_gso(skb);
6442
6443         for (i = 0; i < last_frag; i++) {
6444                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6445
6446                 prod = NEXT_TX_BD(prod);
6447                 ring_prod = TX_RING_IDX(prod);
6448                 txbd = &txr->tx_desc_ring[ring_prod];
6449
6450                 len = frag->size;
6451                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6452                         len, PCI_DMA_TODEVICE);
6453                 if (pci_dma_mapping_error(bp->pdev, mapping))
6454                         goto dma_error;
6455                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6456                                    mapping);
6457
6458                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6459                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6460                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6461                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6462
6463         }
6464         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6465
6466         prod = NEXT_TX_BD(prod);
6467         txr->tx_prod_bseq += skb->len;
6468
6469         REG_WR16(bp, txr->tx_bidx_addr, prod);
6470         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6471
6472         mmiowb();
6473
6474         txr->tx_prod = prod;
6475
6476         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6477                 netif_tx_stop_queue(txq);
6478                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6479                         netif_tx_wake_queue(txq);
6480         }
6481
6482         return NETDEV_TX_OK;
6483 dma_error:
6484         /* save value of frag that failed */
6485         last_frag = i;
6486
6487         /* start back at beginning and unmap skb */
6488         prod = txr->tx_prod;
6489         ring_prod = TX_RING_IDX(prod);
6490         tx_buf = &txr->tx_buf_ring[ring_prod];
6491         tx_buf->skb = NULL;
6492         pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6493                          skb_headlen(skb), PCI_DMA_TODEVICE);
6494
6495         /* unmap remaining mapped pages */
6496         for (i = 0; i < last_frag; i++) {
6497                 prod = NEXT_TX_BD(prod);
6498                 ring_prod = TX_RING_IDX(prod);
6499                 tx_buf = &txr->tx_buf_ring[ring_prod];
6500                 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6501                                skb_shinfo(skb)->frags[i].size,
6502                                PCI_DMA_TODEVICE);
6503         }
6504
6505         dev_kfree_skb(skb);
6506         return NETDEV_TX_OK;
6507 }
6508
6509 /* Called with rtnl_lock */
6510 static int
6511 bnx2_close(struct net_device *dev)
6512 {
6513         struct bnx2 *bp = netdev_priv(dev);
6514
6515         cancel_work_sync(&bp->reset_task);
6516
6517         bnx2_disable_int_sync(bp);
6518         bnx2_napi_disable(bp);
6519         del_timer_sync(&bp->timer);
6520         bnx2_shutdown_chip(bp);
6521         bnx2_free_irq(bp);
6522         bnx2_free_skbs(bp);
6523         bnx2_free_mem(bp);
6524         bp->link_up = 0;
6525         netif_carrier_off(bp->dev);
6526         bnx2_set_power_state(bp, PCI_D3hot);
6527         return 0;
6528 }
6529
6530 static void
6531 bnx2_save_stats(struct bnx2 *bp)
6532 {
6533         u32 *hw_stats = (u32 *) bp->stats_blk;
6534         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6535         int i;
6536
6537         /* The 1st 10 counters are 64-bit counters */
6538         for (i = 0; i < 20; i += 2) {
6539                 u32 hi;
6540                 u64 lo;
6541
6542                 hi = temp_stats[i] + hw_stats[i];
6543                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6544                 if (lo > 0xffffffff)
6545                         hi++;
6546                 temp_stats[i] = hi;
6547                 temp_stats[i + 1] = lo & 0xffffffff;
6548         }
6549
6550         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6551                 temp_stats[i] += hw_stats[i];
6552 }
6553
6554 #define GET_64BIT_NET_STATS64(ctr)                              \
6555         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6556         (unsigned long) (ctr##_lo)
6557
6558 #define GET_64BIT_NET_STATS32(ctr)                              \
6559         (ctr##_lo)
6560
6561 #if (BITS_PER_LONG == 64)
6562 #define GET_64BIT_NET_STATS(ctr)                                \
6563         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6564         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6565 #else
6566 #define GET_64BIT_NET_STATS(ctr)                                \
6567         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6568         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6569 #endif
6570
6571 #define GET_32BIT_NET_STATS(ctr)                                \
6572         (unsigned long) (bp->stats_blk->ctr +                   \
6573                          bp->temp_stats_blk->ctr)
6574
6575 static struct net_device_stats *
6576 bnx2_get_stats(struct net_device *dev)
6577 {
6578         struct bnx2 *bp = netdev_priv(dev);
6579         struct net_device_stats *net_stats = &dev->stats;
6580
6581         if (bp->stats_blk == NULL) {
6582                 return net_stats;
6583         }
6584         net_stats->rx_packets =
6585                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6586                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6587                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6588
6589         net_stats->tx_packets =
6590                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6591                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6592                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6593
6594         net_stats->rx_bytes =
6595                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6596
6597         net_stats->tx_bytes =
6598                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6599
6600         net_stats->multicast =
6601                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6602
6603         net_stats->collisions =
6604                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6605
6606         net_stats->rx_length_errors =
6607                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6608                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6609
6610         net_stats->rx_over_errors =
6611                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6612                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6613
6614         net_stats->rx_frame_errors =
6615                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6616
6617         net_stats->rx_crc_errors =
6618                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6619
6620         net_stats->rx_errors = net_stats->rx_length_errors +
6621                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6622                 net_stats->rx_crc_errors;
6623
6624         net_stats->tx_aborted_errors =
6625                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6626                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6627
6628         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6629             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6630                 net_stats->tx_carrier_errors = 0;
6631         else {
6632                 net_stats->tx_carrier_errors =
6633                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6634         }
6635
6636         net_stats->tx_errors =
6637                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6638                 net_stats->tx_aborted_errors +
6639                 net_stats->tx_carrier_errors;
6640
6641         net_stats->rx_missed_errors =
6642                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6643                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6644                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6645
6646         return net_stats;
6647 }
6648
6649 /* All ethtool functions called with rtnl_lock */
6650
6651 static int
6652 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6653 {
6654         struct bnx2 *bp = netdev_priv(dev);
6655         int support_serdes = 0, support_copper = 0;
6656
6657         cmd->supported = SUPPORTED_Autoneg;
6658         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6659                 support_serdes = 1;
6660                 support_copper = 1;
6661         } else if (bp->phy_port == PORT_FIBRE)
6662                 support_serdes = 1;
6663         else
6664                 support_copper = 1;
6665
6666         if (support_serdes) {
6667                 cmd->supported |= SUPPORTED_1000baseT_Full |
6668                         SUPPORTED_FIBRE;
6669                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6670                         cmd->supported |= SUPPORTED_2500baseX_Full;
6671
6672         }
6673         if (support_copper) {
6674                 cmd->supported |= SUPPORTED_10baseT_Half |
6675                         SUPPORTED_10baseT_Full |
6676                         SUPPORTED_100baseT_Half |
6677                         SUPPORTED_100baseT_Full |
6678                         SUPPORTED_1000baseT_Full |
6679                         SUPPORTED_TP;
6680
6681         }
6682
6683         spin_lock_bh(&bp->phy_lock);
6684         cmd->port = bp->phy_port;
6685         cmd->advertising = bp->advertising;
6686
6687         if (bp->autoneg & AUTONEG_SPEED) {
6688                 cmd->autoneg = AUTONEG_ENABLE;
6689         }
6690         else {
6691                 cmd->autoneg = AUTONEG_DISABLE;
6692         }
6693
6694         if (netif_carrier_ok(dev)) {
6695                 cmd->speed = bp->line_speed;
6696                 cmd->duplex = bp->duplex;
6697         }
6698         else {
6699                 cmd->speed = -1;
6700                 cmd->duplex = -1;
6701         }
6702         spin_unlock_bh(&bp->phy_lock);
6703
6704         cmd->transceiver = XCVR_INTERNAL;
6705         cmd->phy_address = bp->phy_addr;
6706
6707         return 0;
6708 }
6709
6710 static int
6711 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6712 {
6713         struct bnx2 *bp = netdev_priv(dev);
6714         u8 autoneg = bp->autoneg;
6715         u8 req_duplex = bp->req_duplex;
6716         u16 req_line_speed = bp->req_line_speed;
6717         u32 advertising = bp->advertising;
6718         int err = -EINVAL;
6719
6720         spin_lock_bh(&bp->phy_lock);
6721
6722         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6723                 goto err_out_unlock;
6724
6725         if (cmd->port != bp->phy_port &&
6726             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6727                 goto err_out_unlock;
6728
6729         /* If device is down, we can store the settings only if the user
6730          * is setting the currently active port.
6731          */
6732         if (!netif_running(dev) && cmd->port != bp->phy_port)
6733                 goto err_out_unlock;
6734
6735         if (cmd->autoneg == AUTONEG_ENABLE) {
6736                 autoneg |= AUTONEG_SPEED;
6737
6738                 advertising = cmd->advertising;
6739                 if (cmd->port == PORT_TP) {
6740                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6741                         if (!advertising)
6742                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6743                 } else {
6744                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6745                         if (!advertising)
6746                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6747                 }
6748                 advertising |= ADVERTISED_Autoneg;
6749         }
6750         else {
6751                 if (cmd->port == PORT_FIBRE) {
6752                         if ((cmd->speed != SPEED_1000 &&
6753                              cmd->speed != SPEED_2500) ||
6754                             (cmd->duplex != DUPLEX_FULL))
6755                                 goto err_out_unlock;
6756
6757                         if (cmd->speed == SPEED_2500 &&
6758                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6759                                 goto err_out_unlock;
6760                 }
6761                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6762                         goto err_out_unlock;
6763
6764                 autoneg &= ~AUTONEG_SPEED;
6765                 req_line_speed = cmd->speed;
6766                 req_duplex = cmd->duplex;
6767                 advertising = 0;
6768         }
6769
6770         bp->autoneg = autoneg;
6771         bp->advertising = advertising;
6772         bp->req_line_speed = req_line_speed;
6773         bp->req_duplex = req_duplex;
6774
6775         err = 0;
6776         /* If device is down, the new settings will be picked up when it is
6777          * brought up.
6778          */
6779         if (netif_running(dev))
6780                 err = bnx2_setup_phy(bp, cmd->port);
6781
6782 err_out_unlock:
6783         spin_unlock_bh(&bp->phy_lock);
6784
6785         return err;
6786 }
6787
6788 static void
6789 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6790 {
6791         struct bnx2 *bp = netdev_priv(dev);
6792
6793         strcpy(info->driver, DRV_MODULE_NAME);
6794         strcpy(info->version, DRV_MODULE_VERSION);
6795         strcpy(info->bus_info, pci_name(bp->pdev));
6796         strcpy(info->fw_version, bp->fw_version);
6797 }
6798
6799 #define BNX2_REGDUMP_LEN                (32 * 1024)
6800
6801 static int
6802 bnx2_get_regs_len(struct net_device *dev)
6803 {
6804         return BNX2_REGDUMP_LEN;
6805 }
6806
6807 static void
6808 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6809 {
6810         u32 *p = _p, i, offset;
6811         u8 *orig_p = _p;
6812         struct bnx2 *bp = netdev_priv(dev);
6813         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6814                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6815                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6816                                  0x1040, 0x1048, 0x1080, 0x10a4,
6817                                  0x1400, 0x1490, 0x1498, 0x14f0,
6818                                  0x1500, 0x155c, 0x1580, 0x15dc,
6819                                  0x1600, 0x1658, 0x1680, 0x16d8,
6820                                  0x1800, 0x1820, 0x1840, 0x1854,
6821                                  0x1880, 0x1894, 0x1900, 0x1984,
6822                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6823                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6824                                  0x2000, 0x2030, 0x23c0, 0x2400,
6825                                  0x2800, 0x2820, 0x2830, 0x2850,
6826                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6827                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6828                                  0x4080, 0x4090, 0x43c0, 0x4458,
6829                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6830                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6831                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6832                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6833                                  0x6800, 0x6848, 0x684c, 0x6860,
6834                                  0x6888, 0x6910, 0x8000 };
6835
6836         regs->version = 0;
6837
6838         memset(p, 0, BNX2_REGDUMP_LEN);
6839
6840         if (!netif_running(bp->dev))
6841                 return;
6842
6843         i = 0;
6844         offset = reg_boundaries[0];
6845         p += offset;
6846         while (offset < BNX2_REGDUMP_LEN) {
6847                 *p++ = REG_RD(bp, offset);
6848                 offset += 4;
6849                 if (offset == reg_boundaries[i + 1]) {
6850                         offset = reg_boundaries[i + 2];
6851                         p = (u32 *) (orig_p + offset);
6852                         i += 2;
6853                 }
6854         }
6855 }
6856
6857 static void
6858 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6859 {
6860         struct bnx2 *bp = netdev_priv(dev);
6861
6862         if (bp->flags & BNX2_FLAG_NO_WOL) {
6863                 wol->supported = 0;
6864                 wol->wolopts = 0;
6865         }
6866         else {
6867                 wol->supported = WAKE_MAGIC;
6868                 if (bp->wol)
6869                         wol->wolopts = WAKE_MAGIC;
6870                 else
6871                         wol->wolopts = 0;
6872         }
6873         memset(&wol->sopass, 0, sizeof(wol->sopass));
6874 }
6875
6876 static int
6877 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6878 {
6879         struct bnx2 *bp = netdev_priv(dev);
6880
6881         if (wol->wolopts & ~WAKE_MAGIC)
6882                 return -EINVAL;
6883
6884         if (wol->wolopts & WAKE_MAGIC) {
6885                 if (bp->flags & BNX2_FLAG_NO_WOL)
6886                         return -EINVAL;
6887
6888                 bp->wol = 1;
6889         }
6890         else {
6891                 bp->wol = 0;
6892         }
6893         return 0;
6894 }
6895
6896 static int
6897 bnx2_nway_reset(struct net_device *dev)
6898 {
6899         struct bnx2 *bp = netdev_priv(dev);
6900         u32 bmcr;
6901
6902         if (!netif_running(dev))
6903                 return -EAGAIN;
6904
6905         if (!(bp->autoneg & AUTONEG_SPEED)) {
6906                 return -EINVAL;
6907         }
6908
6909         spin_lock_bh(&bp->phy_lock);
6910
6911         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6912                 int rc;
6913
6914                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6915                 spin_unlock_bh(&bp->phy_lock);
6916                 return rc;
6917         }
6918
6919         /* Force a link down visible on the other side */
6920         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6921                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6922                 spin_unlock_bh(&bp->phy_lock);
6923
6924                 msleep(20);
6925
6926                 spin_lock_bh(&bp->phy_lock);
6927
6928                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6929                 bp->serdes_an_pending = 1;
6930                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6931         }
6932
6933         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6934         bmcr &= ~BMCR_LOOPBACK;
6935         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6936
6937         spin_unlock_bh(&bp->phy_lock);
6938
6939         return 0;
6940 }
6941
6942 static u32
6943 bnx2_get_link(struct net_device *dev)
6944 {
6945         struct bnx2 *bp = netdev_priv(dev);
6946
6947         return bp->link_up;
6948 }
6949
6950 static int
6951 bnx2_get_eeprom_len(struct net_device *dev)
6952 {
6953         struct bnx2 *bp = netdev_priv(dev);
6954
6955         if (bp->flash_info == NULL)
6956                 return 0;
6957
6958         return (int) bp->flash_size;
6959 }
6960
6961 static int
6962 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6963                 u8 *eebuf)
6964 {
6965         struct bnx2 *bp = netdev_priv(dev);
6966         int rc;
6967
6968         if (!netif_running(dev))
6969                 return -EAGAIN;
6970
6971         /* parameters already validated in ethtool_get_eeprom */
6972
6973         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6974
6975         return rc;
6976 }
6977
6978 static int
6979 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6980                 u8 *eebuf)
6981 {
6982         struct bnx2 *bp = netdev_priv(dev);
6983         int rc;
6984
6985         if (!netif_running(dev))
6986                 return -EAGAIN;
6987
6988         /* parameters already validated in ethtool_set_eeprom */
6989
6990         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6991
6992         return rc;
6993 }
6994
6995 static int
6996 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6997 {
6998         struct bnx2 *bp = netdev_priv(dev);
6999
7000         memset(coal, 0, sizeof(struct ethtool_coalesce));
7001
7002         coal->rx_coalesce_usecs = bp->rx_ticks;
7003         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7004         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7005         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7006
7007         coal->tx_coalesce_usecs = bp->tx_ticks;
7008         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7009         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7010         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7011
7012         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7013
7014         return 0;
7015 }
7016
7017 static int
7018 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7019 {
7020         struct bnx2 *bp = netdev_priv(dev);
7021
7022         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7023         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7024
7025         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7026         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7027
7028         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7029         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7030
7031         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7032         if (bp->rx_quick_cons_trip_int > 0xff)
7033                 bp->rx_quick_cons_trip_int = 0xff;
7034
7035         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7036         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7037
7038         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7039         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7040
7041         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7042         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7043
7044         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7045         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7046                 0xff;
7047
7048         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7049         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7050                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7051                         bp->stats_ticks = USEC_PER_SEC;
7052         }
7053         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7054                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7055         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7056
7057         if (netif_running(bp->dev)) {
7058                 bnx2_netif_stop(bp);
7059                 bnx2_init_nic(bp, 0);
7060                 bnx2_netif_start(bp);
7061         }
7062
7063         return 0;
7064 }
7065
7066 static void
7067 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7068 {
7069         struct bnx2 *bp = netdev_priv(dev);
7070
7071         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7072         ering->rx_mini_max_pending = 0;
7073         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7074
7075         ering->rx_pending = bp->rx_ring_size;
7076         ering->rx_mini_pending = 0;
7077         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7078
7079         ering->tx_max_pending = MAX_TX_DESC_CNT;
7080         ering->tx_pending = bp->tx_ring_size;
7081 }
7082
7083 static int
7084 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7085 {
7086         if (netif_running(bp->dev)) {
7087                 /* Reset will erase chipset stats; save them */
7088                 bnx2_save_stats(bp);
7089
7090                 bnx2_netif_stop(bp);
7091                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7092                 bnx2_free_skbs(bp);
7093                 bnx2_free_mem(bp);
7094         }
7095
7096         bnx2_set_rx_ring_size(bp, rx);
7097         bp->tx_ring_size = tx;
7098
7099         if (netif_running(bp->dev)) {
7100                 int rc;
7101
7102                 rc = bnx2_alloc_mem(bp);
7103                 if (!rc)
7104                         rc = bnx2_init_nic(bp, 0);
7105
7106                 if (rc) {
7107                         bnx2_napi_enable(bp);
7108                         dev_close(bp->dev);
7109                         return rc;
7110                 }
7111 #ifdef BCM_CNIC
7112                 mutex_lock(&bp->cnic_lock);
7113                 /* Let cnic know about the new status block. */
7114                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7115                         bnx2_setup_cnic_irq_info(bp);
7116                 mutex_unlock(&bp->cnic_lock);
7117 #endif
7118                 bnx2_netif_start(bp);
7119         }
7120         return 0;
7121 }
7122
7123 static int
7124 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7125 {
7126         struct bnx2 *bp = netdev_priv(dev);
7127         int rc;
7128
7129         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7130                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7131                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7132
7133                 return -EINVAL;
7134         }
7135         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7136         return rc;
7137 }
7138
7139 static void
7140 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7141 {
7142         struct bnx2 *bp = netdev_priv(dev);
7143
7144         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7145         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7146         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7147 }
7148
7149 static int
7150 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7151 {
7152         struct bnx2 *bp = netdev_priv(dev);
7153
7154         bp->req_flow_ctrl = 0;
7155         if (epause->rx_pause)
7156                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7157         if (epause->tx_pause)
7158                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7159
7160         if (epause->autoneg) {
7161                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7162         }
7163         else {
7164                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7165         }
7166
7167         if (netif_running(dev)) {
7168                 spin_lock_bh(&bp->phy_lock);
7169                 bnx2_setup_phy(bp, bp->phy_port);
7170                 spin_unlock_bh(&bp->phy_lock);
7171         }
7172
7173         return 0;
7174 }
7175
7176 static u32
7177 bnx2_get_rx_csum(struct net_device *dev)
7178 {
7179         struct bnx2 *bp = netdev_priv(dev);
7180
7181         return bp->rx_csum;
7182 }
7183
7184 static int
7185 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7186 {
7187         struct bnx2 *bp = netdev_priv(dev);
7188
7189         bp->rx_csum = data;
7190         return 0;
7191 }
7192
7193 static int
7194 bnx2_set_tso(struct net_device *dev, u32 data)
7195 {
7196         struct bnx2 *bp = netdev_priv(dev);
7197
7198         if (data) {
7199                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7200                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7201                         dev->features |= NETIF_F_TSO6;
7202         } else
7203                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7204                                    NETIF_F_TSO_ECN);
7205         return 0;
7206 }
7207
7208 static struct {
7209         char string[ETH_GSTRING_LEN];
7210 } bnx2_stats_str_arr[] = {
7211         { "rx_bytes" },
7212         { "rx_error_bytes" },
7213         { "tx_bytes" },
7214         { "tx_error_bytes" },
7215         { "rx_ucast_packets" },
7216         { "rx_mcast_packets" },
7217         { "rx_bcast_packets" },
7218         { "tx_ucast_packets" },
7219         { "tx_mcast_packets" },
7220         { "tx_bcast_packets" },
7221         { "tx_mac_errors" },
7222         { "tx_carrier_errors" },
7223         { "rx_crc_errors" },
7224         { "rx_align_errors" },
7225         { "tx_single_collisions" },
7226         { "tx_multi_collisions" },
7227         { "tx_deferred" },
7228         { "tx_excess_collisions" },
7229         { "tx_late_collisions" },
7230         { "tx_total_collisions" },
7231         { "rx_fragments" },
7232         { "rx_jabbers" },
7233         { "rx_undersize_packets" },
7234         { "rx_oversize_packets" },
7235         { "rx_64_byte_packets" },
7236         { "rx_65_to_127_byte_packets" },
7237         { "rx_128_to_255_byte_packets" },
7238         { "rx_256_to_511_byte_packets" },
7239         { "rx_512_to_1023_byte_packets" },
7240         { "rx_1024_to_1522_byte_packets" },
7241         { "rx_1523_to_9022_byte_packets" },
7242         { "tx_64_byte_packets" },
7243         { "tx_65_to_127_byte_packets" },
7244         { "tx_128_to_255_byte_packets" },
7245         { "tx_256_to_511_byte_packets" },
7246         { "tx_512_to_1023_byte_packets" },
7247         { "tx_1024_to_1522_byte_packets" },
7248         { "tx_1523_to_9022_byte_packets" },
7249         { "rx_xon_frames" },
7250         { "rx_xoff_frames" },
7251         { "tx_xon_frames" },
7252         { "tx_xoff_frames" },
7253         { "rx_mac_ctrl_frames" },
7254         { "rx_filtered_packets" },
7255         { "rx_ftq_discards" },
7256         { "rx_discards" },
7257         { "rx_fw_discards" },
7258 };
7259
7260 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7261                         sizeof(bnx2_stats_str_arr[0]))
7262
7263 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7264
7265 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7266     STATS_OFFSET32(stat_IfHCInOctets_hi),
7267     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7268     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7269     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7270     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7271     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7272     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7273     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7274     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7275     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7276     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7277     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7278     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7279     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7280     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7281     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7282     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7283     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7284     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7285     STATS_OFFSET32(stat_EtherStatsCollisions),
7286     STATS_OFFSET32(stat_EtherStatsFragments),
7287     STATS_OFFSET32(stat_EtherStatsJabbers),
7288     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7289     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7290     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7291     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7292     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7293     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7294     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7295     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7296     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7297     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7298     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7299     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7300     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7301     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7302     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7303     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7304     STATS_OFFSET32(stat_XonPauseFramesReceived),
7305     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7306     STATS_OFFSET32(stat_OutXonSent),
7307     STATS_OFFSET32(stat_OutXoffSent),
7308     STATS_OFFSET32(stat_MacControlFramesReceived),
7309     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7310     STATS_OFFSET32(stat_IfInFTQDiscards),
7311     STATS_OFFSET32(stat_IfInMBUFDiscards),
7312     STATS_OFFSET32(stat_FwRxDrop),
7313 };
7314
7315 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7316  * skipped because of errata.
7317  */
7318 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7319         8,0,8,8,8,8,8,8,8,8,
7320         4,0,4,4,4,4,4,4,4,4,
7321         4,4,4,4,4,4,4,4,4,4,
7322         4,4,4,4,4,4,4,4,4,4,
7323         4,4,4,4,4,4,4,
7324 };
7325
7326 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7327         8,0,8,8,8,8,8,8,8,8,
7328         4,4,4,4,4,4,4,4,4,4,
7329         4,4,4,4,4,4,4,4,4,4,
7330         4,4,4,4,4,4,4,4,4,4,
7331         4,4,4,4,4,4,4,
7332 };
7333
7334 #define BNX2_NUM_TESTS 6
7335
7336 static struct {
7337         char string[ETH_GSTRING_LEN];
7338 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7339         { "register_test (offline)" },
7340         { "memory_test (offline)" },
7341         { "loopback_test (offline)" },
7342         { "nvram_test (online)" },
7343         { "interrupt_test (online)" },
7344         { "link_test (online)" },
7345 };
7346
7347 static int
7348 bnx2_get_sset_count(struct net_device *dev, int sset)
7349 {
7350         switch (sset) {
7351         case ETH_SS_TEST:
7352                 return BNX2_NUM_TESTS;
7353         case ETH_SS_STATS:
7354                 return BNX2_NUM_STATS;
7355         default:
7356                 return -EOPNOTSUPP;
7357         }
7358 }
7359
7360 static void
7361 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7362 {
7363         struct bnx2 *bp = netdev_priv(dev);
7364
7365         bnx2_set_power_state(bp, PCI_D0);
7366
7367         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7368         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7369                 int i;
7370
7371                 bnx2_netif_stop(bp);
7372                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7373                 bnx2_free_skbs(bp);
7374
7375                 if (bnx2_test_registers(bp) != 0) {
7376                         buf[0] = 1;
7377                         etest->flags |= ETH_TEST_FL_FAILED;
7378                 }
7379                 if (bnx2_test_memory(bp) != 0) {
7380                         buf[1] = 1;
7381                         etest->flags |= ETH_TEST_FL_FAILED;
7382                 }
7383                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7384                         etest->flags |= ETH_TEST_FL_FAILED;
7385
7386                 if (!netif_running(bp->dev))
7387                         bnx2_shutdown_chip(bp);
7388                 else {
7389                         bnx2_init_nic(bp, 1);
7390                         bnx2_netif_start(bp);
7391                 }
7392
7393                 /* wait for link up */
7394                 for (i = 0; i < 7; i++) {
7395                         if (bp->link_up)
7396                                 break;
7397                         msleep_interruptible(1000);
7398                 }
7399         }
7400
7401         if (bnx2_test_nvram(bp) != 0) {
7402                 buf[3] = 1;
7403                 etest->flags |= ETH_TEST_FL_FAILED;
7404         }
7405         if (bnx2_test_intr(bp) != 0) {
7406                 buf[4] = 1;
7407                 etest->flags |= ETH_TEST_FL_FAILED;
7408         }
7409
7410         if (bnx2_test_link(bp) != 0) {
7411                 buf[5] = 1;
7412                 etest->flags |= ETH_TEST_FL_FAILED;
7413
7414         }
7415         if (!netif_running(bp->dev))
7416                 bnx2_set_power_state(bp, PCI_D3hot);
7417 }
7418
7419 static void
7420 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7421 {
7422         switch (stringset) {
7423         case ETH_SS_STATS:
7424                 memcpy(buf, bnx2_stats_str_arr,
7425                         sizeof(bnx2_stats_str_arr));
7426                 break;
7427         case ETH_SS_TEST:
7428                 memcpy(buf, bnx2_tests_str_arr,
7429                         sizeof(bnx2_tests_str_arr));
7430                 break;
7431         }
7432 }
7433
7434 static void
7435 bnx2_get_ethtool_stats(struct net_device *dev,
7436                 struct ethtool_stats *stats, u64 *buf)
7437 {
7438         struct bnx2 *bp = netdev_priv(dev);
7439         int i;
7440         u32 *hw_stats = (u32 *) bp->stats_blk;
7441         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7442         u8 *stats_len_arr = NULL;
7443
7444         if (hw_stats == NULL) {
7445                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7446                 return;
7447         }
7448
7449         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7450             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7451             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7452             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7453                 stats_len_arr = bnx2_5706_stats_len_arr;
7454         else
7455                 stats_len_arr = bnx2_5708_stats_len_arr;
7456
7457         for (i = 0; i < BNX2_NUM_STATS; i++) {
7458                 unsigned long offset;
7459
7460                 if (stats_len_arr[i] == 0) {
7461                         /* skip this counter */
7462                         buf[i] = 0;
7463                         continue;
7464                 }
7465
7466                 offset = bnx2_stats_offset_arr[i];
7467                 if (stats_len_arr[i] == 4) {
7468                         /* 4-byte counter */
7469                         buf[i] = (u64) *(hw_stats + offset) +
7470                                  *(temp_stats + offset);
7471                         continue;
7472                 }
7473                 /* 8-byte counter */
7474                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7475                          *(hw_stats + offset + 1) +
7476                          (((u64) *(temp_stats + offset)) << 32) +
7477                          *(temp_stats + offset + 1);
7478         }
7479 }
7480
7481 static int
7482 bnx2_phys_id(struct net_device *dev, u32 data)
7483 {
7484         struct bnx2 *bp = netdev_priv(dev);
7485         int i;
7486         u32 save;
7487
7488         bnx2_set_power_state(bp, PCI_D0);
7489
7490         if (data == 0)
7491                 data = 2;
7492
7493         save = REG_RD(bp, BNX2_MISC_CFG);
7494         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7495
7496         for (i = 0; i < (data * 2); i++) {
7497                 if ((i % 2) == 0) {
7498                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7499                 }
7500                 else {
7501                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7502                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7503                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7504                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7505                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7506                                 BNX2_EMAC_LED_TRAFFIC);
7507                 }
7508                 msleep_interruptible(500);
7509                 if (signal_pending(current))
7510                         break;
7511         }
7512         REG_WR(bp, BNX2_EMAC_LED, 0);
7513         REG_WR(bp, BNX2_MISC_CFG, save);
7514
7515         if (!netif_running(dev))
7516                 bnx2_set_power_state(bp, PCI_D3hot);
7517
7518         return 0;
7519 }
7520
7521 static int
7522 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7523 {
7524         struct bnx2 *bp = netdev_priv(dev);
7525
7526         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7527                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7528         else
7529                 return (ethtool_op_set_tx_csum(dev, data));
7530 }
7531
7532 static const struct ethtool_ops bnx2_ethtool_ops = {
7533         .get_settings           = bnx2_get_settings,
7534         .set_settings           = bnx2_set_settings,
7535         .get_drvinfo            = bnx2_get_drvinfo,
7536         .get_regs_len           = bnx2_get_regs_len,
7537         .get_regs               = bnx2_get_regs,
7538         .get_wol                = bnx2_get_wol,
7539         .set_wol                = bnx2_set_wol,
7540         .nway_reset             = bnx2_nway_reset,
7541         .get_link               = bnx2_get_link,
7542         .get_eeprom_len         = bnx2_get_eeprom_len,
7543         .get_eeprom             = bnx2_get_eeprom,
7544         .set_eeprom             = bnx2_set_eeprom,
7545         .get_coalesce           = bnx2_get_coalesce,
7546         .set_coalesce           = bnx2_set_coalesce,
7547         .get_ringparam          = bnx2_get_ringparam,
7548         .set_ringparam          = bnx2_set_ringparam,
7549         .get_pauseparam         = bnx2_get_pauseparam,
7550         .set_pauseparam         = bnx2_set_pauseparam,
7551         .get_rx_csum            = bnx2_get_rx_csum,
7552         .set_rx_csum            = bnx2_set_rx_csum,
7553         .set_tx_csum            = bnx2_set_tx_csum,
7554         .set_sg                 = ethtool_op_set_sg,
7555         .set_tso                = bnx2_set_tso,
7556         .self_test              = bnx2_self_test,
7557         .get_strings            = bnx2_get_strings,
7558         .phys_id                = bnx2_phys_id,
7559         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7560         .get_sset_count         = bnx2_get_sset_count,
7561 };
7562
7563 /* Called with rtnl_lock */
7564 static int
7565 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7566 {
7567         struct mii_ioctl_data *data = if_mii(ifr);
7568         struct bnx2 *bp = netdev_priv(dev);
7569         int err;
7570
7571         switch(cmd) {
7572         case SIOCGMIIPHY:
7573                 data->phy_id = bp->phy_addr;
7574
7575                 /* fallthru */
7576         case SIOCGMIIREG: {
7577                 u32 mii_regval;
7578
7579                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7580                         return -EOPNOTSUPP;
7581
7582                 if (!netif_running(dev))
7583                         return -EAGAIN;
7584
7585                 spin_lock_bh(&bp->phy_lock);
7586                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7587                 spin_unlock_bh(&bp->phy_lock);
7588
7589                 data->val_out = mii_regval;
7590
7591                 return err;
7592         }
7593
7594         case SIOCSMIIREG:
7595                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7596                         return -EOPNOTSUPP;
7597
7598                 if (!netif_running(dev))
7599                         return -EAGAIN;
7600
7601                 spin_lock_bh(&bp->phy_lock);
7602                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7603                 spin_unlock_bh(&bp->phy_lock);
7604
7605                 return err;
7606
7607         default:
7608                 /* do nothing */
7609                 break;
7610         }
7611         return -EOPNOTSUPP;
7612 }
7613
7614 /* Called with rtnl_lock */
7615 static int
7616 bnx2_change_mac_addr(struct net_device *dev, void *p)
7617 {
7618         struct sockaddr *addr = p;
7619         struct bnx2 *bp = netdev_priv(dev);
7620
7621         if (!is_valid_ether_addr(addr->sa_data))
7622                 return -EINVAL;
7623
7624         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7625         if (netif_running(dev))
7626                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7627
7628         return 0;
7629 }
7630
7631 /* Called with rtnl_lock */
7632 static int
7633 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7634 {
7635         struct bnx2 *bp = netdev_priv(dev);
7636
7637         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7638                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7639                 return -EINVAL;
7640
7641         dev->mtu = new_mtu;
7642         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7643 }
7644
7645 #ifdef CONFIG_NET_POLL_CONTROLLER
7646 static void
7647 poll_bnx2(struct net_device *dev)
7648 {
7649         struct bnx2 *bp = netdev_priv(dev);
7650         int i;
7651
7652         for (i = 0; i < bp->irq_nvecs; i++) {
7653                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7654
7655                 disable_irq(irq->vector);
7656                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7657                 enable_irq(irq->vector);
7658         }
7659 }
7660 #endif
7661
7662 static void __devinit
7663 bnx2_get_5709_media(struct bnx2 *bp)
7664 {
7665         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7666         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7667         u32 strap;
7668
7669         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7670                 return;
7671         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7672                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7673                 return;
7674         }
7675
7676         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7677                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7678         else
7679                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7680
7681         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7682                 switch (strap) {
7683                 case 0x4:
7684                 case 0x5:
7685                 case 0x6:
7686                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7687                         return;
7688                 }
7689         } else {
7690                 switch (strap) {
7691                 case 0x1:
7692                 case 0x2:
7693                 case 0x4:
7694                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7695                         return;
7696                 }
7697         }
7698 }
7699
7700 static void __devinit
7701 bnx2_get_pci_speed(struct bnx2 *bp)
7702 {
7703         u32 reg;
7704
7705         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7706         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7707                 u32 clkreg;
7708
7709                 bp->flags |= BNX2_FLAG_PCIX;
7710
7711                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7712
7713                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7714                 switch (clkreg) {
7715                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7716                         bp->bus_speed_mhz = 133;
7717                         break;
7718
7719                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7720                         bp->bus_speed_mhz = 100;
7721                         break;
7722
7723                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7724                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7725                         bp->bus_speed_mhz = 66;
7726                         break;
7727
7728                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7729                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7730                         bp->bus_speed_mhz = 50;
7731                         break;
7732
7733                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7734                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7735                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7736                         bp->bus_speed_mhz = 33;
7737                         break;
7738                 }
7739         }
7740         else {
7741                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7742                         bp->bus_speed_mhz = 66;
7743                 else
7744                         bp->bus_speed_mhz = 33;
7745         }
7746
7747         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7748                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7749
7750 }
7751
7752 static void __devinit
7753 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7754 {
7755         int rc, i, j;
7756         u8 *data;
7757         unsigned int block_end, rosize, len;
7758
7759 #define BNX2_VPD_NVRAM_OFFSET   0x300
7760 #define BNX2_VPD_LEN            128
7761 #define BNX2_MAX_VER_SLEN       30
7762
7763         data = kmalloc(256, GFP_KERNEL);
7764         if (!data)
7765                 return;
7766
7767         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7768                              BNX2_VPD_LEN);
7769         if (rc)
7770                 goto vpd_done;
7771
7772         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7773                 data[i] = data[i + BNX2_VPD_LEN + 3];
7774                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7775                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7776                 data[i + 3] = data[i + BNX2_VPD_LEN];
7777         }
7778
7779         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7780         if (i < 0)
7781                 goto vpd_done;
7782
7783         rosize = pci_vpd_lrdt_size(&data[i]);
7784         i += PCI_VPD_LRDT_TAG_SIZE;
7785         block_end = i + rosize;
7786
7787         if (block_end > BNX2_VPD_LEN)
7788                 goto vpd_done;
7789
7790         j = pci_vpd_find_info_keyword(data, i, rosize,
7791                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7792         if (j < 0)
7793                 goto vpd_done;
7794
7795         len = pci_vpd_info_field_size(&data[j]);
7796
7797         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7798         if (j + len > block_end || len != 4 ||
7799             memcmp(&data[j], "1028", 4))
7800                 goto vpd_done;
7801
7802         j = pci_vpd_find_info_keyword(data, i, rosize,
7803                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7804         if (j < 0)
7805                 goto vpd_done;
7806
7807         len = pci_vpd_info_field_size(&data[j]);
7808
7809         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7810         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7811                 goto vpd_done;
7812
7813         memcpy(bp->fw_version, &data[j], len);
7814         bp->fw_version[len] = ' ';
7815
7816 vpd_done:
7817         kfree(data);
7818 }
7819
7820 static int __devinit
7821 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7822 {
7823         struct bnx2 *bp;
7824         unsigned long mem_len;
7825         int rc, i, j;
7826         u32 reg;
7827         u64 dma_mask, persist_dma_mask;
7828
7829         SET_NETDEV_DEV(dev, &pdev->dev);
7830         bp = netdev_priv(dev);
7831
7832         bp->flags = 0;
7833         bp->phy_flags = 0;
7834
7835         bp->temp_stats_blk =
7836                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7837
7838         if (bp->temp_stats_blk == NULL) {
7839                 rc = -ENOMEM;
7840                 goto err_out;
7841         }
7842
7843         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7844         rc = pci_enable_device(pdev);
7845         if (rc) {
7846                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7847                 goto err_out;
7848         }
7849
7850         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7851                 dev_err(&pdev->dev,
7852                         "Cannot find PCI device base address, aborting\n");
7853                 rc = -ENODEV;
7854                 goto err_out_disable;
7855         }
7856
7857         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7858         if (rc) {
7859                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7860                 goto err_out_disable;
7861         }
7862
7863         pci_set_master(pdev);
7864         pci_save_state(pdev);
7865
7866         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7867         if (bp->pm_cap == 0) {
7868                 dev_err(&pdev->dev,
7869                         "Cannot find power management capability, aborting\n");
7870                 rc = -EIO;
7871                 goto err_out_release;
7872         }
7873
7874         bp->dev = dev;
7875         bp->pdev = pdev;
7876
7877         spin_lock_init(&bp->phy_lock);
7878         spin_lock_init(&bp->indirect_lock);
7879 #ifdef BCM_CNIC
7880         mutex_init(&bp->cnic_lock);
7881 #endif
7882         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7883
7884         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7885         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7886         dev->mem_end = dev->mem_start + mem_len;
7887         dev->irq = pdev->irq;
7888
7889         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7890
7891         if (!bp->regview) {
7892                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7893                 rc = -ENOMEM;
7894                 goto err_out_release;
7895         }
7896
7897         /* Configure byte swap and enable write to the reg_window registers.
7898          * Rely on CPU to do target byte swapping on big endian systems
7899          * The chip's target access swapping will not swap all accesses
7900          */
7901         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7902                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7903                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7904
7905         bnx2_set_power_state(bp, PCI_D0);
7906
7907         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7908
7909         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7910                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7911                         dev_err(&pdev->dev,
7912                                 "Cannot find PCIE capability, aborting\n");
7913                         rc = -EIO;
7914                         goto err_out_unmap;
7915                 }
7916                 bp->flags |= BNX2_FLAG_PCIE;
7917                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7918                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7919         } else {
7920                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7921                 if (bp->pcix_cap == 0) {
7922                         dev_err(&pdev->dev,
7923                                 "Cannot find PCIX capability, aborting\n");
7924                         rc = -EIO;
7925                         goto err_out_unmap;
7926                 }
7927                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7928         }
7929
7930         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7931                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7932                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7933         }
7934
7935         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7936                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7937                         bp->flags |= BNX2_FLAG_MSI_CAP;
7938         }
7939
7940         /* 5708 cannot support DMA addresses > 40-bit.  */
7941         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7942                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7943         else
7944                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7945
7946         /* Configure DMA attributes. */
7947         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7948                 dev->features |= NETIF_F_HIGHDMA;
7949                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7950                 if (rc) {
7951                         dev_err(&pdev->dev,
7952                                 "pci_set_consistent_dma_mask failed, aborting\n");
7953                         goto err_out_unmap;
7954                 }
7955         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7956                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7957                 goto err_out_unmap;
7958         }
7959
7960         if (!(bp->flags & BNX2_FLAG_PCIE))
7961                 bnx2_get_pci_speed(bp);
7962
7963         /* 5706A0 may falsely detect SERR and PERR. */
7964         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7965                 reg = REG_RD(bp, PCI_COMMAND);
7966                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7967                 REG_WR(bp, PCI_COMMAND, reg);
7968         }
7969         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7970                 !(bp->flags & BNX2_FLAG_PCIX)) {
7971
7972                 dev_err(&pdev->dev,
7973                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7974                 goto err_out_unmap;
7975         }
7976
7977         bnx2_init_nvram(bp);
7978
7979         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7980
7981         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7982             BNX2_SHM_HDR_SIGNATURE_SIG) {
7983                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7984
7985                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7986         } else
7987                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7988
7989         /* Get the permanent MAC address.  First we need to make sure the
7990          * firmware is actually running.
7991          */
7992         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7993
7994         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7995             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7996                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7997                 rc = -ENODEV;
7998                 goto err_out_unmap;
7999         }
8000
8001         bnx2_read_vpd_fw_ver(bp);
8002
8003         j = strlen(bp->fw_version);
8004         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8005         for (i = 0; i < 3 && j < 24; i++) {
8006                 u8 num, k, skip0;
8007
8008                 if (i == 0) {
8009                         bp->fw_version[j++] = 'b';
8010                         bp->fw_version[j++] = 'c';
8011                         bp->fw_version[j++] = ' ';
8012                 }
8013                 num = (u8) (reg >> (24 - (i * 8)));
8014                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8015                         if (num >= k || !skip0 || k == 1) {
8016                                 bp->fw_version[j++] = (num / k) + '0';
8017                                 skip0 = 0;
8018                         }
8019                 }
8020                 if (i != 2)
8021                         bp->fw_version[j++] = '.';
8022         }
8023         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8024         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8025                 bp->wol = 1;
8026
8027         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8028                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8029
8030                 for (i = 0; i < 30; i++) {
8031                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8032                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8033                                 break;
8034                         msleep(10);
8035                 }
8036         }
8037         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8038         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8039         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8040             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8041                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8042
8043                 if (j < 32)
8044                         bp->fw_version[j++] = ' ';
8045                 for (i = 0; i < 3 && j < 28; i++) {
8046                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8047                         reg = swab32(reg);
8048                         memcpy(&bp->fw_version[j], &reg, 4);
8049                         j += 4;
8050                 }
8051         }
8052
8053         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8054         bp->mac_addr[0] = (u8) (reg >> 8);
8055         bp->mac_addr[1] = (u8) reg;
8056
8057         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8058         bp->mac_addr[2] = (u8) (reg >> 24);
8059         bp->mac_addr[3] = (u8) (reg >> 16);
8060         bp->mac_addr[4] = (u8) (reg >> 8);
8061         bp->mac_addr[5] = (u8) reg;
8062
8063         bp->tx_ring_size = MAX_TX_DESC_CNT;
8064         bnx2_set_rx_ring_size(bp, 255);
8065
8066         bp->rx_csum = 1;
8067
8068         bp->tx_quick_cons_trip_int = 2;
8069         bp->tx_quick_cons_trip = 20;
8070         bp->tx_ticks_int = 18;
8071         bp->tx_ticks = 80;
8072
8073         bp->rx_quick_cons_trip_int = 2;
8074         bp->rx_quick_cons_trip = 12;
8075         bp->rx_ticks_int = 18;
8076         bp->rx_ticks = 18;
8077
8078         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8079
8080         bp->current_interval = BNX2_TIMER_INTERVAL;
8081
8082         bp->phy_addr = 1;
8083
8084         /* Disable WOL support if we are running on a SERDES chip. */
8085         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8086                 bnx2_get_5709_media(bp);
8087         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8088                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8089
8090         bp->phy_port = PORT_TP;
8091         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8092                 bp->phy_port = PORT_FIBRE;
8093                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8094                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8095                         bp->flags |= BNX2_FLAG_NO_WOL;
8096                         bp->wol = 0;
8097                 }
8098                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8099                         /* Don't do parallel detect on this board because of
8100                          * some board problems.  The link will not go down
8101                          * if we do parallel detect.
8102                          */
8103                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8104                             pdev->subsystem_device == 0x310c)
8105                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8106                 } else {
8107                         bp->phy_addr = 2;
8108                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8109                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8110                 }
8111         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8112                    CHIP_NUM(bp) == CHIP_NUM_5708)
8113                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8114         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8115                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8116                   CHIP_REV(bp) == CHIP_REV_Bx))
8117                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8118
8119         bnx2_init_fw_cap(bp);
8120
8121         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8122             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8123             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8124             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8125                 bp->flags |= BNX2_FLAG_NO_WOL;
8126                 bp->wol = 0;
8127         }
8128
8129         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8130                 bp->tx_quick_cons_trip_int =
8131                         bp->tx_quick_cons_trip;
8132                 bp->tx_ticks_int = bp->tx_ticks;
8133                 bp->rx_quick_cons_trip_int =
8134                         bp->rx_quick_cons_trip;
8135                 bp->rx_ticks_int = bp->rx_ticks;
8136                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8137                 bp->com_ticks_int = bp->com_ticks;
8138                 bp->cmd_ticks_int = bp->cmd_ticks;
8139         }
8140
8141         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8142          *
8143          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8144          * with byte enables disabled on the unused 32-bit word.  This is legal
8145          * but causes problems on the AMD 8132 which will eventually stop
8146          * responding after a while.
8147          *
8148          * AMD believes this incompatibility is unique to the 5706, and
8149          * prefers to locally disable MSI rather than globally disabling it.
8150          */
8151         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8152                 struct pci_dev *amd_8132 = NULL;
8153
8154                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8155                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8156                                                   amd_8132))) {
8157
8158                         if (amd_8132->revision >= 0x10 &&
8159                             amd_8132->revision <= 0x13) {
8160                                 disable_msi = 1;
8161                                 pci_dev_put(amd_8132);
8162                                 break;
8163                         }
8164                 }
8165         }
8166
8167         bnx2_set_default_link(bp);
8168         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8169
8170         init_timer(&bp->timer);
8171         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8172         bp->timer.data = (unsigned long) bp;
8173         bp->timer.function = bnx2_timer;
8174
8175         return 0;
8176
8177 err_out_unmap:
8178         if (bp->regview) {
8179                 iounmap(bp->regview);
8180                 bp->regview = NULL;
8181         }
8182
8183 err_out_release:
8184         pci_release_regions(pdev);
8185
8186 err_out_disable:
8187         pci_disable_device(pdev);
8188         pci_set_drvdata(pdev, NULL);
8189
8190 err_out:
8191         return rc;
8192 }
8193
8194 static char * __devinit
8195 bnx2_bus_string(struct bnx2 *bp, char *str)
8196 {
8197         char *s = str;
8198
8199         if (bp->flags & BNX2_FLAG_PCIE) {
8200                 s += sprintf(s, "PCI Express");
8201         } else {
8202                 s += sprintf(s, "PCI");
8203                 if (bp->flags & BNX2_FLAG_PCIX)
8204                         s += sprintf(s, "-X");
8205                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8206                         s += sprintf(s, " 32-bit");
8207                 else
8208                         s += sprintf(s, " 64-bit");
8209                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8210         }
8211         return str;
8212 }
8213
8214 static void __devinit
8215 bnx2_init_napi(struct bnx2 *bp)
8216 {
8217         int i;
8218
8219         for (i = 0; i < bp->irq_nvecs; i++) {
8220                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8221                 int (*poll)(struct napi_struct *, int);
8222
8223                 if (i == 0)
8224                         poll = bnx2_poll;
8225                 else
8226                         poll = bnx2_poll_msix;
8227
8228                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8229                 bnapi->bp = bp;
8230         }
8231 }
8232
8233 static const struct net_device_ops bnx2_netdev_ops = {
8234         .ndo_open               = bnx2_open,
8235         .ndo_start_xmit         = bnx2_start_xmit,
8236         .ndo_stop               = bnx2_close,
8237         .ndo_get_stats          = bnx2_get_stats,
8238         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8239         .ndo_do_ioctl           = bnx2_ioctl,
8240         .ndo_validate_addr      = eth_validate_addr,
8241         .ndo_set_mac_address    = bnx2_change_mac_addr,
8242         .ndo_change_mtu         = bnx2_change_mtu,
8243         .ndo_tx_timeout         = bnx2_tx_timeout,
8244 #ifdef BCM_VLAN
8245         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8246 #endif
8247 #ifdef CONFIG_NET_POLL_CONTROLLER
8248         .ndo_poll_controller    = poll_bnx2,
8249 #endif
8250 };
8251
8252 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8253 {
8254 #ifdef BCM_VLAN
8255         dev->vlan_features |= flags;
8256 #endif
8257 }
8258
8259 static int __devinit
8260 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8261 {
8262         static int version_printed = 0;
8263         struct net_device *dev = NULL;
8264         struct bnx2 *bp;
8265         int rc;
8266         char str[40];
8267
8268         if (version_printed++ == 0)
8269                 pr_info("%s", version);
8270
8271         /* dev zeroed in init_etherdev */
8272         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8273
8274         if (!dev)
8275                 return -ENOMEM;
8276
8277         rc = bnx2_init_board(pdev, dev);
8278         if (rc < 0) {
8279                 free_netdev(dev);
8280                 return rc;
8281         }
8282
8283         dev->netdev_ops = &bnx2_netdev_ops;
8284         dev->watchdog_timeo = TX_TIMEOUT;
8285         dev->ethtool_ops = &bnx2_ethtool_ops;
8286
8287         bp = netdev_priv(dev);
8288
8289         pci_set_drvdata(pdev, dev);
8290
8291         rc = bnx2_request_firmware(bp);
8292         if (rc)
8293                 goto error;
8294
8295         memcpy(dev->dev_addr, bp->mac_addr, 6);
8296         memcpy(dev->perm_addr, bp->mac_addr, 6);
8297
8298         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8299         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8300         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8301                 dev->features |= NETIF_F_IPV6_CSUM;
8302                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8303         }
8304 #ifdef BCM_VLAN
8305         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8306 #endif
8307         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8308         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8309         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8310                 dev->features |= NETIF_F_TSO6;
8311                 vlan_features_add(dev, NETIF_F_TSO6);
8312         }
8313         if ((rc = register_netdev(dev))) {
8314                 dev_err(&pdev->dev, "Cannot register net device\n");
8315                 goto error;
8316         }
8317
8318         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8319                     board_info[ent->driver_data].name,
8320                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8321                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8322                     bnx2_bus_string(bp, str),
8323                     dev->base_addr,
8324                     bp->pdev->irq, dev->dev_addr);
8325
8326         return 0;
8327
8328 error:
8329         if (bp->mips_firmware)
8330                 release_firmware(bp->mips_firmware);
8331         if (bp->rv2p_firmware)
8332                 release_firmware(bp->rv2p_firmware);
8333
8334         if (bp->regview)
8335                 iounmap(bp->regview);
8336         pci_release_regions(pdev);
8337         pci_disable_device(pdev);
8338         pci_set_drvdata(pdev, NULL);
8339         free_netdev(dev);
8340         return rc;
8341 }
8342
8343 static void __devexit
8344 bnx2_remove_one(struct pci_dev *pdev)
8345 {
8346         struct net_device *dev = pci_get_drvdata(pdev);
8347         struct bnx2 *bp = netdev_priv(dev);
8348
8349         flush_scheduled_work();
8350
8351         unregister_netdev(dev);
8352
8353         if (bp->mips_firmware)
8354                 release_firmware(bp->mips_firmware);
8355         if (bp->rv2p_firmware)
8356                 release_firmware(bp->rv2p_firmware);
8357
8358         if (bp->regview)
8359                 iounmap(bp->regview);
8360
8361         kfree(bp->temp_stats_blk);
8362
8363         free_netdev(dev);
8364         pci_release_regions(pdev);
8365         pci_disable_device(pdev);
8366         pci_set_drvdata(pdev, NULL);
8367 }
8368
8369 static int
8370 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8371 {
8372         struct net_device *dev = pci_get_drvdata(pdev);
8373         struct bnx2 *bp = netdev_priv(dev);
8374
8375         /* PCI register 4 needs to be saved whether netif_running() or not.
8376          * MSI address and data need to be saved if using MSI and
8377          * netif_running().
8378          */
8379         pci_save_state(pdev);
8380         if (!netif_running(dev))
8381                 return 0;
8382
8383         flush_scheduled_work();
8384         bnx2_netif_stop(bp);
8385         netif_device_detach(dev);
8386         del_timer_sync(&bp->timer);
8387         bnx2_shutdown_chip(bp);
8388         bnx2_free_skbs(bp);
8389         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8390         return 0;
8391 }
8392
8393 static int
8394 bnx2_resume(struct pci_dev *pdev)
8395 {
8396         struct net_device *dev = pci_get_drvdata(pdev);
8397         struct bnx2 *bp = netdev_priv(dev);
8398
8399         pci_restore_state(pdev);
8400         if (!netif_running(dev))
8401                 return 0;
8402
8403         bnx2_set_power_state(bp, PCI_D0);
8404         netif_device_attach(dev);
8405         bnx2_init_nic(bp, 1);
8406         bnx2_netif_start(bp);
8407         return 0;
8408 }
8409
8410 /**
8411  * bnx2_io_error_detected - called when PCI error is detected
8412  * @pdev: Pointer to PCI device
8413  * @state: The current pci connection state
8414  *
8415  * This function is called after a PCI bus error affecting
8416  * this device has been detected.
8417  */
8418 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8419                                                pci_channel_state_t state)
8420 {
8421         struct net_device *dev = pci_get_drvdata(pdev);
8422         struct bnx2 *bp = netdev_priv(dev);
8423
8424         rtnl_lock();
8425         netif_device_detach(dev);
8426
8427         if (state == pci_channel_io_perm_failure) {
8428                 rtnl_unlock();
8429                 return PCI_ERS_RESULT_DISCONNECT;
8430         }
8431
8432         if (netif_running(dev)) {
8433                 bnx2_netif_stop(bp);
8434                 del_timer_sync(&bp->timer);
8435                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8436         }
8437
8438         pci_disable_device(pdev);
8439         rtnl_unlock();
8440
8441         /* Request a slot slot reset. */
8442         return PCI_ERS_RESULT_NEED_RESET;
8443 }
8444
8445 /**
8446  * bnx2_io_slot_reset - called after the pci bus has been reset.
8447  * @pdev: Pointer to PCI device
8448  *
8449  * Restart the card from scratch, as if from a cold-boot.
8450  */
8451 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8452 {
8453         struct net_device *dev = pci_get_drvdata(pdev);
8454         struct bnx2 *bp = netdev_priv(dev);
8455
8456         rtnl_lock();
8457         if (pci_enable_device(pdev)) {
8458                 dev_err(&pdev->dev,
8459                         "Cannot re-enable PCI device after reset\n");
8460                 rtnl_unlock();
8461                 return PCI_ERS_RESULT_DISCONNECT;
8462         }
8463         pci_set_master(pdev);
8464         pci_restore_state(pdev);
8465         pci_save_state(pdev);
8466
8467         if (netif_running(dev)) {
8468                 bnx2_set_power_state(bp, PCI_D0);
8469                 bnx2_init_nic(bp, 1);
8470         }
8471
8472         rtnl_unlock();
8473         return PCI_ERS_RESULT_RECOVERED;
8474 }
8475
8476 /**
8477  * bnx2_io_resume - called when traffic can start flowing again.
8478  * @pdev: Pointer to PCI device
8479  *
8480  * This callback is called when the error recovery driver tells us that
8481  * its OK to resume normal operation.
8482  */
8483 static void bnx2_io_resume(struct pci_dev *pdev)
8484 {
8485         struct net_device *dev = pci_get_drvdata(pdev);
8486         struct bnx2 *bp = netdev_priv(dev);
8487
8488         rtnl_lock();
8489         if (netif_running(dev))
8490                 bnx2_netif_start(bp);
8491
8492         netif_device_attach(dev);
8493         rtnl_unlock();
8494 }
8495
8496 static struct pci_error_handlers bnx2_err_handler = {
8497         .error_detected = bnx2_io_error_detected,
8498         .slot_reset     = bnx2_io_slot_reset,
8499         .resume         = bnx2_io_resume,
8500 };
8501
8502 static struct pci_driver bnx2_pci_driver = {
8503         .name           = DRV_MODULE_NAME,
8504         .id_table       = bnx2_pci_tbl,
8505         .probe          = bnx2_init_one,
8506         .remove         = __devexit_p(bnx2_remove_one),
8507         .suspend        = bnx2_suspend,
8508         .resume         = bnx2_resume,
8509         .err_handler    = &bnx2_err_handler,
8510 };
8511
8512 static int __init bnx2_init(void)
8513 {
8514         return pci_register_driver(&bnx2_pci_driver);
8515 }
8516
8517 static void __exit bnx2_cleanup(void)
8518 {
8519         pci_unregister_driver(&bnx2_pci_driver);
8520 }
8521
8522 module_init(bnx2_init);
8523 module_exit(bnx2_cleanup);
8524
8525
8526