bnx2x: Using PCI_DEVICE macro
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 /* used only for slowpath so not inlined */
350 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
351 {
352         u32 wb_write[2];
353
354         wb_write[0] = val_hi;
355         wb_write[1] = val_lo;
356         REG_WR_DMAE(bp, reg, wb_write, 2);
357 }
358
359 #ifdef USE_WB_RD
360 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
361 {
362         u32 wb_data[2];
363
364         REG_RD_DMAE(bp, reg, wb_data, 2);
365
366         return HILO_U64(wb_data[0], wb_data[1]);
367 }
368 #endif
369
370 static int bnx2x_mc_assert(struct bnx2x *bp)
371 {
372         char last_idx;
373         int i, rc = 0;
374         u32 row0, row1, row2, row3;
375
376         /* XSTORM */
377         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
378                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
379         if (last_idx)
380                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
381
382         /* print the asserts */
383         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
384
385                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
386                               XSTORM_ASSERT_LIST_OFFSET(i));
387                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
388                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
389                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
390                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
391                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
392                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
393
394                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
395                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
396                                   " 0x%08x 0x%08x 0x%08x\n",
397                                   i, row3, row2, row1, row0);
398                         rc++;
399                 } else {
400                         break;
401                 }
402         }
403
404         /* TSTORM */
405         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
406                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
407         if (last_idx)
408                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
409
410         /* print the asserts */
411         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
412
413                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
414                               TSTORM_ASSERT_LIST_OFFSET(i));
415                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
416                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
417                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
418                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
419                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
420                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
421
422                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
423                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
424                                   " 0x%08x 0x%08x 0x%08x\n",
425                                   i, row3, row2, row1, row0);
426                         rc++;
427                 } else {
428                         break;
429                 }
430         }
431
432         /* CSTORM */
433         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
434                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
435         if (last_idx)
436                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
437
438         /* print the asserts */
439         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
440
441                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
442                               CSTORM_ASSERT_LIST_OFFSET(i));
443                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
444                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
445                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
446                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
447                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
448                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
449
450                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
451                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
452                                   " 0x%08x 0x%08x 0x%08x\n",
453                                   i, row3, row2, row1, row0);
454                         rc++;
455                 } else {
456                         break;
457                 }
458         }
459
460         /* USTORM */
461         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
462                            USTORM_ASSERT_LIST_INDEX_OFFSET);
463         if (last_idx)
464                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
465
466         /* print the asserts */
467         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
468
469                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
470                               USTORM_ASSERT_LIST_OFFSET(i));
471                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
472                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
473                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
474                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
475                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
476                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
477
478                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
479                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
480                                   " 0x%08x 0x%08x 0x%08x\n",
481                                   i, row3, row2, row1, row0);
482                         rc++;
483                 } else {
484                         break;
485                 }
486         }
487
488         return rc;
489 }
490
491 static void bnx2x_fw_dump(struct bnx2x *bp)
492 {
493         u32 mark, offset;
494         __be32 data[9];
495         int word;
496
497         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
498         mark = ((mark + 0x3) & ~0x3);
499         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
500
501         printk(KERN_ERR PFX);
502         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
503                 for (word = 0; word < 8; word++)
504                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
505                                                   offset + 4*word));
506                 data[8] = 0x0;
507                 printk(KERN_CONT "%s", (char *)data);
508         }
509         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
510                 for (word = 0; word < 8; word++)
511                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
512                                                   offset + 4*word));
513                 data[8] = 0x0;
514                 printk(KERN_CONT "%s", (char *)data);
515         }
516         printk(KERN_ERR PFX "end of fw dump\n");
517 }
518
519 static void bnx2x_panic_dump(struct bnx2x *bp)
520 {
521         int i;
522         u16 j, start, end;
523
524         bp->stats_state = STATS_STATE_DISABLED;
525         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
526
527         BNX2X_ERR("begin crash dump -----------------\n");
528
529         /* Indices */
530         /* Common */
531         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
532                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
533                   "  spq_prod_idx(%u)\n",
534                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
535                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
536
537         /* Rx */
538         for_each_rx_queue(bp, i) {
539                 struct bnx2x_fastpath *fp = &bp->fp[i];
540
541                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
542                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
543                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
544                           i, fp->rx_bd_prod, fp->rx_bd_cons,
545                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
546                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
547                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
548                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
549                           fp->rx_sge_prod, fp->last_max_sge,
550                           le16_to_cpu(fp->fp_u_idx),
551                           fp->status_blk->u_status_block.status_block_index);
552         }
553
554         /* Tx */
555         for_each_tx_queue(bp, i) {
556                 struct bnx2x_fastpath *fp = &bp->fp[i];
557
558                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
559                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
560                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
561                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
562                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
563                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
564                           fp->status_blk->c_status_block.status_block_index,
565                           fp->tx_db.data.prod);
566         }
567
568         /* Rings */
569         /* Rx */
570         for_each_rx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
574                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
575                 for (j = start; j != end; j = RX_BD(j + 1)) {
576                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
577                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
578
579                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
580                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
581                 }
582
583                 start = RX_SGE(fp->rx_sge_prod);
584                 end = RX_SGE(fp->last_max_sge);
585                 for (j = start; j != end; j = RX_SGE(j + 1)) {
586                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
587                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
588
589                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
590                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
591                 }
592
593                 start = RCQ_BD(fp->rx_comp_cons - 10);
594                 end = RCQ_BD(fp->rx_comp_cons + 503);
595                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
596                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
597
598                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
599                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
600                 }
601         }
602
603         /* Tx */
604         for_each_tx_queue(bp, i) {
605                 struct bnx2x_fastpath *fp = &bp->fp[i];
606
607                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
608                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
609                 for (j = start; j != end; j = TX_BD(j + 1)) {
610                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
611
612                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
613                                   i, j, sw_bd->skb, sw_bd->first_bd);
614                 }
615
616                 start = TX_BD(fp->tx_bd_cons - 10);
617                 end = TX_BD(fp->tx_bd_cons + 254);
618                 for (j = start; j != end; j = TX_BD(j + 1)) {
619                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
620
621                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
622                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
623                 }
624         }
625
626         bnx2x_fw_dump(bp);
627         bnx2x_mc_assert(bp);
628         BNX2X_ERR("end crash dump -----------------\n");
629 }
630
631 static void bnx2x_int_enable(struct bnx2x *bp)
632 {
633         int port = BP_PORT(bp);
634         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
635         u32 val = REG_RD(bp, addr);
636         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
637         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
638
639         if (msix) {
640                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                          HC_CONFIG_0_REG_INT_LINE_EN_0);
642                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
643                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644         } else if (msi) {
645                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
646                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
647                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
648                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649         } else {
650                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
653                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654
655                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
656                    val, port, addr);
657
658                 REG_WR(bp, addr, val);
659
660                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
661         }
662
663         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
664            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
665
666         REG_WR(bp, addr, val);
667         /*
668          * Ensure that HC_CONFIG is written before leading/trailing edge config
669          */
670         mmiowb();
671         barrier();
672
673         if (CHIP_IS_E1H(bp)) {
674                 /* init leading/trailing edge */
675                 if (IS_E1HMF(bp)) {
676                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
677                         if (bp->port.pmf)
678                                 /* enable nig and gpio3 attention */
679                                 val |= 0x1100;
680                 } else
681                         val = 0xffff;
682
683                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
684                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
685         }
686
687         /* Make sure that interrupts are indeed enabled from here on */
688         mmiowb();
689 }
690
691 static void bnx2x_int_disable(struct bnx2x *bp)
692 {
693         int port = BP_PORT(bp);
694         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
695         u32 val = REG_RD(bp, addr);
696
697         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
698                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
699                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
700                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
701
702         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
703            val, port, addr);
704
705         /* flush all outstanding writes */
706         mmiowb();
707
708         REG_WR(bp, addr, val);
709         if (REG_RD(bp, addr) != val)
710                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
711
712 }
713
714 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
715 {
716         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
717         int i, offset;
718
719         /* disable interrupt handling */
720         atomic_inc(&bp->intr_sem);
721         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
722
723         if (disable_hw)
724                 /* prevent the HW from sending interrupts */
725                 bnx2x_int_disable(bp);
726
727         /* make sure all ISRs are done */
728         if (msix) {
729                 synchronize_irq(bp->msix_table[0].vector);
730                 offset = 1;
731                 for_each_queue(bp, i)
732                         synchronize_irq(bp->msix_table[i + offset].vector);
733         } else
734                 synchronize_irq(bp->pdev->irq);
735
736         /* make sure sp_task is not running */
737         cancel_delayed_work(&bp->sp_task);
738         flush_workqueue(bnx2x_wq);
739 }
740
741 /* fast path */
742
743 /*
744  * General service functions
745  */
746
747 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
748                                 u8 storm, u16 index, u8 op, u8 update)
749 {
750         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
751                        COMMAND_REG_INT_ACK);
752         struct igu_ack_register igu_ack;
753
754         igu_ack.status_block_index = index;
755         igu_ack.sb_id_and_flags =
756                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
757                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
758                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
759                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
760
761         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
762            (*(u32 *)&igu_ack), hc_addr);
763         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
764
765         /* Make sure that ACK is written */
766         mmiowb();
767         barrier();
768 }
769
770 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
771 {
772         struct host_status_block *fpsb = fp->status_blk;
773         u16 rc = 0;
774
775         barrier(); /* status block is written to by the chip */
776         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
777                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
778                 rc |= 1;
779         }
780         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
781                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
782                 rc |= 2;
783         }
784         return rc;
785 }
786
787 static u16 bnx2x_ack_int(struct bnx2x *bp)
788 {
789         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
790                        COMMAND_REG_SIMD_MASK);
791         u32 result = REG_RD(bp, hc_addr);
792
793         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
794            result, hc_addr);
795
796         return result;
797 }
798
799
800 /*
801  * fast path service functions
802  */
803
804 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
805 {
806         /* Tell compiler that consumer and producer can change */
807         barrier();
808         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
809 }
810
811 /* free skb in the packet ring at pos idx
812  * return idx of last bd freed
813  */
814 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
815                              u16 idx)
816 {
817         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
818         struct eth_tx_start_bd *tx_start_bd;
819         struct eth_tx_bd *tx_data_bd;
820         struct sk_buff *skb = tx_buf->skb;
821         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
822         int nbd;
823
824         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
825            idx, tx_buf, skb);
826
827         /* unmap first bd */
828         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
829         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
830         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
831                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
832
833         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
834 #ifdef BNX2X_STOP_ON_ERROR
835         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
836                 BNX2X_ERR("BAD nbd!\n");
837                 bnx2x_panic();
838         }
839 #endif
840         new_cons = nbd + tx_buf->first_bd;
841
842         /* Get the next bd */
843         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844
845         /* Skip a parse bd... */
846         --nbd;
847         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848
849         /* ...and the TSO split header bd since they have no mapping */
850         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
851                 --nbd;
852                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
853         }
854
855         /* now free frags */
856         while (nbd > 0) {
857
858                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
859                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
860                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
861                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
862                 if (--nbd)
863                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864         }
865
866         /* release skb */
867         WARN_ON(!skb);
868         dev_kfree_skb_any(skb);
869         tx_buf->first_bd = 0;
870         tx_buf->skb = NULL;
871
872         return new_cons;
873 }
874
875 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
876 {
877         s16 used;
878         u16 prod;
879         u16 cons;
880
881         barrier(); /* Tell compiler that prod and cons can change */
882         prod = fp->tx_bd_prod;
883         cons = fp->tx_bd_cons;
884
885         /* NUM_TX_RINGS = number of "next-page" entries
886            It will be used as a threshold */
887         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
888
889 #ifdef BNX2X_STOP_ON_ERROR
890         WARN_ON(used < 0);
891         WARN_ON(used > fp->bp->tx_ring_size);
892         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
893 #endif
894
895         return (s16)(fp->bp->tx_ring_size) - used;
896 }
897
898 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
899 {
900         struct bnx2x *bp = fp->bp;
901         struct netdev_queue *txq;
902         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
903         int done = 0;
904
905 #ifdef BNX2X_STOP_ON_ERROR
906         if (unlikely(bp->panic))
907                 return;
908 #endif
909
910         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
911         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
912         sw_cons = fp->tx_pkt_cons;
913
914         while (sw_cons != hw_cons) {
915                 u16 pkt_cons;
916
917                 pkt_cons = TX_BD(sw_cons);
918
919                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920
921                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
922                    hw_cons, sw_cons, pkt_cons);
923
924 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
925                         rmb();
926                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
927                 }
928 */
929                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
930                 sw_cons++;
931                 done++;
932         }
933
934         fp->tx_pkt_cons = sw_cons;
935         fp->tx_bd_cons = bd_cons;
936
937         /* TBD need a thresh? */
938         if (unlikely(netif_tx_queue_stopped(txq))) {
939
940                 /* Need to make the tx_bd_cons update visible to start_xmit()
941                  * before checking for netif_tx_queue_stopped().  Without the
942                  * memory barrier, there is a small possibility that
943                  * start_xmit() will miss it and cause the queue to be stopped
944                  * forever.
945                  */
946                 smp_mb();
947
948                 if ((netif_tx_queue_stopped(txq)) &&
949                     (bp->state == BNX2X_STATE_OPEN) &&
950                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
951                         netif_tx_wake_queue(txq);
952         }
953 }
954
955
956 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
957                            union eth_rx_cqe *rr_cqe)
958 {
959         struct bnx2x *bp = fp->bp;
960         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
962
963         DP(BNX2X_MSG_SP,
964            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
965            fp->index, cid, command, bp->state,
966            rr_cqe->ramrod_cqe.ramrod_type);
967
968         bp->spq_left++;
969
970         if (fp->index) {
971                 switch (command | fp->state) {
972                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
973                                                 BNX2X_FP_STATE_OPENING):
974                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
975                            cid);
976                         fp->state = BNX2X_FP_STATE_OPEN;
977                         break;
978
979                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
980                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
981                            cid);
982                         fp->state = BNX2X_FP_STATE_HALTED;
983                         break;
984
985                 default:
986                         BNX2X_ERR("unexpected MC reply (%d)  "
987                                   "fp->state is %x\n", command, fp->state);
988                         break;
989                 }
990                 mb(); /* force bnx2x_wait_ramrod() to see the change */
991                 return;
992         }
993
994         switch (command | bp->state) {
995         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
996                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
997                 bp->state = BNX2X_STATE_OPEN;
998                 break;
999
1000         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1001                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1002                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1003                 fp->state = BNX2X_FP_STATE_HALTED;
1004                 break;
1005
1006         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1008                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1009                 break;
1010
1011
1012         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1013         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1014                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1015                 bp->set_mac_pending = 0;
1016                 break;
1017
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1020                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1021                 break;
1022
1023         default:
1024                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1025                           command, bp->state);
1026                 break;
1027         }
1028         mb(); /* force bnx2x_wait_ramrod() to see the change */
1029 }
1030
1031 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1032                                      struct bnx2x_fastpath *fp, u16 index)
1033 {
1034         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1035         struct page *page = sw_buf->page;
1036         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1037
1038         /* Skip "next page" elements */
1039         if (!page)
1040                 return;
1041
1042         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1043                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1044         __free_pages(page, PAGES_PER_SGE_SHIFT);
1045
1046         sw_buf->page = NULL;
1047         sge->addr_hi = 0;
1048         sge->addr_lo = 0;
1049 }
1050
1051 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1052                                            struct bnx2x_fastpath *fp, int last)
1053 {
1054         int i;
1055
1056         for (i = 0; i < last; i++)
1057                 bnx2x_free_rx_sge(bp, fp, i);
1058 }
1059
1060 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1061                                      struct bnx2x_fastpath *fp, u16 index)
1062 {
1063         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1064         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1065         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066         dma_addr_t mapping;
1067
1068         if (unlikely(page == NULL))
1069                 return -ENOMEM;
1070
1071         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1072                                PCI_DMA_FROMDEVICE);
1073         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1074                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1075                 return -ENOMEM;
1076         }
1077
1078         sw_buf->page = page;
1079         pci_unmap_addr_set(sw_buf, mapping, mapping);
1080
1081         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1082         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1083
1084         return 0;
1085 }
1086
1087 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1088                                      struct bnx2x_fastpath *fp, u16 index)
1089 {
1090         struct sk_buff *skb;
1091         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1092         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1093         dma_addr_t mapping;
1094
1095         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1096         if (unlikely(skb == NULL))
1097                 return -ENOMEM;
1098
1099         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1100                                  PCI_DMA_FROMDEVICE);
1101         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1102                 dev_kfree_skb(skb);
1103                 return -ENOMEM;
1104         }
1105
1106         rx_buf->skb = skb;
1107         pci_unmap_addr_set(rx_buf, mapping, mapping);
1108
1109         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1110         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112         return 0;
1113 }
1114
1115 /* note that we are not allocating a new skb,
1116  * we are just moving one from cons to prod
1117  * we are not creating a new mapping,
1118  * so there is no need to check for dma_mapping_error().
1119  */
1120 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1121                                struct sk_buff *skb, u16 cons, u16 prod)
1122 {
1123         struct bnx2x *bp = fp->bp;
1124         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1125         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1126         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1127         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1128
1129         pci_dma_sync_single_for_device(bp->pdev,
1130                                        pci_unmap_addr(cons_rx_buf, mapping),
1131                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1132
1133         prod_rx_buf->skb = cons_rx_buf->skb;
1134         pci_unmap_addr_set(prod_rx_buf, mapping,
1135                            pci_unmap_addr(cons_rx_buf, mapping));
1136         *prod_bd = *cons_bd;
1137 }
1138
1139 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1140                                              u16 idx)
1141 {
1142         u16 last_max = fp->last_max_sge;
1143
1144         if (SUB_S16(idx, last_max) > 0)
1145                 fp->last_max_sge = idx;
1146 }
1147
1148 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1149 {
1150         int i, j;
1151
1152         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1153                 int idx = RX_SGE_CNT * i - 1;
1154
1155                 for (j = 0; j < 2; j++) {
1156                         SGE_MASK_CLEAR_BIT(fp, idx);
1157                         idx--;
1158                 }
1159         }
1160 }
1161
1162 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1163                                   struct eth_fast_path_rx_cqe *fp_cqe)
1164 {
1165         struct bnx2x *bp = fp->bp;
1166         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1167                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1168                       SGE_PAGE_SHIFT;
1169         u16 last_max, last_elem, first_elem;
1170         u16 delta = 0;
1171         u16 i;
1172
1173         if (!sge_len)
1174                 return;
1175
1176         /* First mark all used pages */
1177         for (i = 0; i < sge_len; i++)
1178                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1179
1180         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1181            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1182
1183         /* Here we assume that the last SGE index is the biggest */
1184         prefetch((void *)(fp->sge_mask));
1185         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1186
1187         last_max = RX_SGE(fp->last_max_sge);
1188         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1189         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1190
1191         /* If ring is not full */
1192         if (last_elem + 1 != first_elem)
1193                 last_elem++;
1194
1195         /* Now update the prod */
1196         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1197                 if (likely(fp->sge_mask[i]))
1198                         break;
1199
1200                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1201                 delta += RX_SGE_MASK_ELEM_SZ;
1202         }
1203
1204         if (delta > 0) {
1205                 fp->rx_sge_prod += delta;
1206                 /* clear page-end entries */
1207                 bnx2x_clear_sge_mask_next_elems(fp);
1208         }
1209
1210         DP(NETIF_MSG_RX_STATUS,
1211            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1212            fp->last_max_sge, fp->rx_sge_prod);
1213 }
1214
1215 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1216 {
1217         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1218         memset(fp->sge_mask, 0xff,
1219                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1220
1221         /* Clear the two last indices in the page to 1:
1222            these are the indices that correspond to the "next" element,
1223            hence will never be indicated and should be removed from
1224            the calculations. */
1225         bnx2x_clear_sge_mask_next_elems(fp);
1226 }
1227
1228 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1229                             struct sk_buff *skb, u16 cons, u16 prod)
1230 {
1231         struct bnx2x *bp = fp->bp;
1232         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1233         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1234         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1235         dma_addr_t mapping;
1236
1237         /* move empty skb from pool to prod and map it */
1238         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1239         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1240                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1241         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1242
1243         /* move partial skb from cons to pool (don't unmap yet) */
1244         fp->tpa_pool[queue] = *cons_rx_buf;
1245
1246         /* mark bin state as start - print error if current state != stop */
1247         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1248                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1249
1250         fp->tpa_state[queue] = BNX2X_TPA_START;
1251
1252         /* point prod_bd to new skb */
1253         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1254         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1255
1256 #ifdef BNX2X_STOP_ON_ERROR
1257         fp->tpa_queue_used |= (1 << queue);
1258 #ifdef __powerpc64__
1259         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1260 #else
1261         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1262 #endif
1263            fp->tpa_queue_used);
1264 #endif
1265 }
1266
1267 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268                                struct sk_buff *skb,
1269                                struct eth_fast_path_rx_cqe *fp_cqe,
1270                                u16 cqe_idx)
1271 {
1272         struct sw_rx_page *rx_pg, old_rx_pg;
1273         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1274         u32 i, frag_len, frag_size, pages;
1275         int err;
1276         int j;
1277
1278         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1279         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1280
1281         /* This is needed in order to enable forwarding support */
1282         if (frag_size)
1283                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1284                                                max(frag_size, (u32)len_on_bd));
1285
1286 #ifdef BNX2X_STOP_ON_ERROR
1287         if (pages >
1288             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1289                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1290                           pages, cqe_idx);
1291                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1292                           fp_cqe->pkt_len, len_on_bd);
1293                 bnx2x_panic();
1294                 return -EINVAL;
1295         }
1296 #endif
1297
1298         /* Run through the SGL and compose the fragmented skb */
1299         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1300                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1301
1302                 /* FW gives the indices of the SGE as if the ring is an array
1303                    (meaning that "next" element will consume 2 indices) */
1304                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1305                 rx_pg = &fp->rx_page_ring[sge_idx];
1306                 old_rx_pg = *rx_pg;
1307
1308                 /* If we fail to allocate a substitute page, we simply stop
1309                    where we are and drop the whole packet */
1310                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1311                 if (unlikely(err)) {
1312                         fp->eth_q_stats.rx_skb_alloc_failed++;
1313                         return err;
1314                 }
1315
1316                 /* Unmap the page as we r going to pass it to the stack */
1317                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1318                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1319
1320                 /* Add one frag and update the appropriate fields in the skb */
1321                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1322
1323                 skb->data_len += frag_len;
1324                 skb->truesize += frag_len;
1325                 skb->len += frag_len;
1326
1327                 frag_size -= frag_len;
1328         }
1329
1330         return 0;
1331 }
1332
1333 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1334                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1335                            u16 cqe_idx)
1336 {
1337         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1338         struct sk_buff *skb = rx_buf->skb;
1339         /* alloc new skb */
1340         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1341
1342         /* Unmap skb in the pool anyway, as we are going to change
1343            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1344            fails. */
1345         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1346                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1347
1348         if (likely(new_skb)) {
1349                 /* fix ip xsum and give it to the stack */
1350                 /* (no need to map the new skb) */
1351 #ifdef BCM_VLAN
1352                 int is_vlan_cqe =
1353                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1354                          PARSING_FLAGS_VLAN);
1355                 int is_not_hwaccel_vlan_cqe =
1356                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1357 #endif
1358
1359                 prefetch(skb);
1360                 prefetch(((char *)(skb)) + 128);
1361
1362 #ifdef BNX2X_STOP_ON_ERROR
1363                 if (pad + len > bp->rx_buf_size) {
1364                         BNX2X_ERR("skb_put is about to fail...  "
1365                                   "pad %d  len %d  rx_buf_size %d\n",
1366                                   pad, len, bp->rx_buf_size);
1367                         bnx2x_panic();
1368                         return;
1369                 }
1370 #endif
1371
1372                 skb_reserve(skb, pad);
1373                 skb_put(skb, len);
1374
1375                 skb->protocol = eth_type_trans(skb, bp->dev);
1376                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1377
1378                 {
1379                         struct iphdr *iph;
1380
1381                         iph = (struct iphdr *)skb->data;
1382 #ifdef BCM_VLAN
1383                         /* If there is no Rx VLAN offloading -
1384                            take VLAN tag into an account */
1385                         if (unlikely(is_not_hwaccel_vlan_cqe))
1386                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1387 #endif
1388                         iph->check = 0;
1389                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1390                 }
1391
1392                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1393                                          &cqe->fast_path_cqe, cqe_idx)) {
1394 #ifdef BCM_VLAN
1395                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1396                             (!is_not_hwaccel_vlan_cqe))
1397                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1398                                                 le16_to_cpu(cqe->fast_path_cqe.
1399                                                             vlan_tag));
1400                         else
1401 #endif
1402                                 netif_receive_skb(skb);
1403                 } else {
1404                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1405                            " - dropping packet!\n");
1406                         dev_kfree_skb(skb);
1407                 }
1408
1409
1410                 /* put new skb in bin */
1411                 fp->tpa_pool[queue].skb = new_skb;
1412
1413         } else {
1414                 /* else drop the packet and keep the buffer in the bin */
1415                 DP(NETIF_MSG_RX_STATUS,
1416                    "Failed to allocate new skb - dropping packet!\n");
1417                 fp->eth_q_stats.rx_skb_alloc_failed++;
1418         }
1419
1420         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1421 }
1422
1423 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1424                                         struct bnx2x_fastpath *fp,
1425                                         u16 bd_prod, u16 rx_comp_prod,
1426                                         u16 rx_sge_prod)
1427 {
1428         struct ustorm_eth_rx_producers rx_prods = {0};
1429         int i;
1430
1431         /* Update producers */
1432         rx_prods.bd_prod = bd_prod;
1433         rx_prods.cqe_prod = rx_comp_prod;
1434         rx_prods.sge_prod = rx_sge_prod;
1435
1436         /*
1437          * Make sure that the BD and SGE data is updated before updating the
1438          * producers since FW might read the BD/SGE right after the producer
1439          * is updated.
1440          * This is only applicable for weak-ordered memory model archs such
1441          * as IA-64. The following barrier is also mandatory since FW will
1442          * assumes BDs must have buffers.
1443          */
1444         wmb();
1445
1446         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1447                 REG_WR(bp, BAR_USTRORM_INTMEM +
1448                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1449                        ((u32 *)&rx_prods)[i]);
1450
1451         mmiowb(); /* keep prod updates ordered */
1452
1453         DP(NETIF_MSG_RX_STATUS,
1454            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1455            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1456 }
1457
1458 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1459 {
1460         struct bnx2x *bp = fp->bp;
1461         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1462         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1463         int rx_pkt = 0;
1464
1465 #ifdef BNX2X_STOP_ON_ERROR
1466         if (unlikely(bp->panic))
1467                 return 0;
1468 #endif
1469
1470         /* CQ "next element" is of the size of the regular element,
1471            that's why it's ok here */
1472         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1473         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1474                 hw_comp_cons++;
1475
1476         bd_cons = fp->rx_bd_cons;
1477         bd_prod = fp->rx_bd_prod;
1478         bd_prod_fw = bd_prod;
1479         sw_comp_cons = fp->rx_comp_cons;
1480         sw_comp_prod = fp->rx_comp_prod;
1481
1482         /* Memory barrier necessary as speculative reads of the rx
1483          * buffer can be ahead of the index in the status block
1484          */
1485         rmb();
1486
1487         DP(NETIF_MSG_RX_STATUS,
1488            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1489            fp->index, hw_comp_cons, sw_comp_cons);
1490
1491         while (sw_comp_cons != hw_comp_cons) {
1492                 struct sw_rx_bd *rx_buf = NULL;
1493                 struct sk_buff *skb;
1494                 union eth_rx_cqe *cqe;
1495                 u8 cqe_fp_flags;
1496                 u16 len, pad;
1497
1498                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1499                 bd_prod = RX_BD(bd_prod);
1500                 bd_cons = RX_BD(bd_cons);
1501
1502                 /* Prefetch the page containing the BD descriptor
1503                    at producer's index. It will be needed when new skb is
1504                    allocated */
1505                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1506                                              (&fp->rx_desc_ring[bd_prod])) -
1507                                   PAGE_SIZE + 1));
1508
1509                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1510                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1511
1512                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1513                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1514                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1515                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1516                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1517                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1518
1519                 /* is this a slowpath msg? */
1520                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1521                         bnx2x_sp_event(fp, cqe);
1522                         goto next_cqe;
1523
1524                 /* this is an rx packet */
1525                 } else {
1526                         rx_buf = &fp->rx_buf_ring[bd_cons];
1527                         skb = rx_buf->skb;
1528                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1529                         pad = cqe->fast_path_cqe.placement_offset;
1530
1531                         /* If CQE is marked both TPA_START and TPA_END
1532                            it is a non-TPA CQE */
1533                         if ((!fp->disable_tpa) &&
1534                             (TPA_TYPE(cqe_fp_flags) !=
1535                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1536                                 u16 queue = cqe->fast_path_cqe.queue_index;
1537
1538                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1539                                         DP(NETIF_MSG_RX_STATUS,
1540                                            "calling tpa_start on queue %d\n",
1541                                            queue);
1542
1543                                         bnx2x_tpa_start(fp, queue, skb,
1544                                                         bd_cons, bd_prod);
1545                                         goto next_rx;
1546                                 }
1547
1548                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1549                                         DP(NETIF_MSG_RX_STATUS,
1550                                            "calling tpa_stop on queue %d\n",
1551                                            queue);
1552
1553                                         if (!BNX2X_RX_SUM_FIX(cqe))
1554                                                 BNX2X_ERR("STOP on none TCP "
1555                                                           "data\n");
1556
1557                                         /* This is a size of the linear data
1558                                            on this skb */
1559                                         len = le16_to_cpu(cqe->fast_path_cqe.
1560                                                                 len_on_bd);
1561                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1562                                                     len, cqe, comp_ring_cons);
1563 #ifdef BNX2X_STOP_ON_ERROR
1564                                         if (bp->panic)
1565                                                 return 0;
1566 #endif
1567
1568                                         bnx2x_update_sge_prod(fp,
1569                                                         &cqe->fast_path_cqe);
1570                                         goto next_cqe;
1571                                 }
1572                         }
1573
1574                         pci_dma_sync_single_for_device(bp->pdev,
1575                                         pci_unmap_addr(rx_buf, mapping),
1576                                                        pad + RX_COPY_THRESH,
1577                                                        PCI_DMA_FROMDEVICE);
1578                         prefetch(skb);
1579                         prefetch(((char *)(skb)) + 128);
1580
1581                         /* is this an error packet? */
1582                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1583                                 DP(NETIF_MSG_RX_ERR,
1584                                    "ERROR  flags %x  rx packet %u\n",
1585                                    cqe_fp_flags, sw_comp_cons);
1586                                 fp->eth_q_stats.rx_err_discard_pkt++;
1587                                 goto reuse_rx;
1588                         }
1589
1590                         /* Since we don't have a jumbo ring
1591                          * copy small packets if mtu > 1500
1592                          */
1593                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1594                             (len <= RX_COPY_THRESH)) {
1595                                 struct sk_buff *new_skb;
1596
1597                                 new_skb = netdev_alloc_skb(bp->dev,
1598                                                            len + pad);
1599                                 if (new_skb == NULL) {
1600                                         DP(NETIF_MSG_RX_ERR,
1601                                            "ERROR  packet dropped "
1602                                            "because of alloc failure\n");
1603                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1604                                         goto reuse_rx;
1605                                 }
1606
1607                                 /* aligned copy */
1608                                 skb_copy_from_linear_data_offset(skb, pad,
1609                                                     new_skb->data + pad, len);
1610                                 skb_reserve(new_skb, pad);
1611                                 skb_put(new_skb, len);
1612
1613                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1614
1615                                 skb = new_skb;
1616
1617                         } else
1618                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1619                                 pci_unmap_single(bp->pdev,
1620                                         pci_unmap_addr(rx_buf, mapping),
1621                                                  bp->rx_buf_size,
1622                                                  PCI_DMA_FROMDEVICE);
1623                                 skb_reserve(skb, pad);
1624                                 skb_put(skb, len);
1625
1626                         } else {
1627                                 DP(NETIF_MSG_RX_ERR,
1628                                    "ERROR  packet dropped because "
1629                                    "of alloc failure\n");
1630                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1631 reuse_rx:
1632                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1633                                 goto next_rx;
1634                         }
1635
1636                         skb->protocol = eth_type_trans(skb, bp->dev);
1637
1638                         skb->ip_summed = CHECKSUM_NONE;
1639                         if (bp->rx_csum) {
1640                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1641                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1642                                 else
1643                                         fp->eth_q_stats.hw_csum_err++;
1644                         }
1645                 }
1646
1647                 skb_record_rx_queue(skb, fp->index);
1648 #ifdef BCM_VLAN
1649                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1650                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1651                      PARSING_FLAGS_VLAN))
1652                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1653                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1654                 else
1655 #endif
1656                         netif_receive_skb(skb);
1657
1658
1659 next_rx:
1660                 rx_buf->skb = NULL;
1661
1662                 bd_cons = NEXT_RX_IDX(bd_cons);
1663                 bd_prod = NEXT_RX_IDX(bd_prod);
1664                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1665                 rx_pkt++;
1666 next_cqe:
1667                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1668                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1669
1670                 if (rx_pkt == budget)
1671                         break;
1672         } /* while */
1673
1674         fp->rx_bd_cons = bd_cons;
1675         fp->rx_bd_prod = bd_prod_fw;
1676         fp->rx_comp_cons = sw_comp_cons;
1677         fp->rx_comp_prod = sw_comp_prod;
1678
1679         /* Update producers */
1680         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1681                              fp->rx_sge_prod);
1682
1683         fp->rx_pkt += rx_pkt;
1684         fp->rx_calls++;
1685
1686         return rx_pkt;
1687 }
1688
1689 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1690 {
1691         struct bnx2x_fastpath *fp = fp_cookie;
1692         struct bnx2x *bp = fp->bp;
1693
1694         /* Return here if interrupt is disabled */
1695         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1696                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1697                 return IRQ_HANDLED;
1698         }
1699
1700         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1701            fp->index, fp->sb_id);
1702         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1703
1704 #ifdef BNX2X_STOP_ON_ERROR
1705         if (unlikely(bp->panic))
1706                 return IRQ_HANDLED;
1707 #endif
1708         /* Handle Rx or Tx according to MSI-X vector */
1709         if (fp->is_rx_queue) {
1710                 prefetch(fp->rx_cons_sb);
1711                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1712
1713                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1714
1715         } else {
1716                 prefetch(fp->tx_cons_sb);
1717                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1718
1719                 bnx2x_update_fpsb_idx(fp);
1720                 rmb();
1721                 bnx2x_tx_int(fp);
1722
1723                 /* Re-enable interrupts */
1724                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1725                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1726                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1727                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1728         }
1729
1730         return IRQ_HANDLED;
1731 }
1732
1733 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1734 {
1735         struct bnx2x *bp = netdev_priv(dev_instance);
1736         u16 status = bnx2x_ack_int(bp);
1737         u16 mask;
1738         int i;
1739
1740         /* Return here if interrupt is shared and it's not for us */
1741         if (unlikely(status == 0)) {
1742                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1743                 return IRQ_NONE;
1744         }
1745         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1746
1747         /* Return here if interrupt is disabled */
1748         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1749                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1750                 return IRQ_HANDLED;
1751         }
1752
1753 #ifdef BNX2X_STOP_ON_ERROR
1754         if (unlikely(bp->panic))
1755                 return IRQ_HANDLED;
1756 #endif
1757
1758         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1759                 struct bnx2x_fastpath *fp = &bp->fp[i];
1760
1761                 mask = 0x2 << fp->sb_id;
1762                 if (status & mask) {
1763                         /* Handle Rx or Tx according to SB id */
1764                         if (fp->is_rx_queue) {
1765                                 prefetch(fp->rx_cons_sb);
1766                                 prefetch(&fp->status_blk->u_status_block.
1767                                                         status_block_index);
1768
1769                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1770
1771                         } else {
1772                                 prefetch(fp->tx_cons_sb);
1773                                 prefetch(&fp->status_blk->c_status_block.
1774                                                         status_block_index);
1775
1776                                 bnx2x_update_fpsb_idx(fp);
1777                                 rmb();
1778                                 bnx2x_tx_int(fp);
1779
1780                                 /* Re-enable interrupts */
1781                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1782                                              le16_to_cpu(fp->fp_u_idx),
1783                                              IGU_INT_NOP, 1);
1784                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1785                                              le16_to_cpu(fp->fp_c_idx),
1786                                              IGU_INT_ENABLE, 1);
1787                         }
1788                         status &= ~mask;
1789                 }
1790         }
1791
1792
1793         if (unlikely(status & 0x1)) {
1794                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1795
1796                 status &= ~0x1;
1797                 if (!status)
1798                         return IRQ_HANDLED;
1799         }
1800
1801         if (status)
1802                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1803                    status);
1804
1805         return IRQ_HANDLED;
1806 }
1807
1808 /* end of fast path */
1809
1810 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1811
1812 /* Link */
1813
1814 /*
1815  * General service functions
1816  */
1817
1818 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1819 {
1820         u32 lock_status;
1821         u32 resource_bit = (1 << resource);
1822         int func = BP_FUNC(bp);
1823         u32 hw_lock_control_reg;
1824         int cnt;
1825
1826         /* Validating that the resource is within range */
1827         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1828                 DP(NETIF_MSG_HW,
1829                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1830                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1831                 return -EINVAL;
1832         }
1833
1834         if (func <= 5) {
1835                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1836         } else {
1837                 hw_lock_control_reg =
1838                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1839         }
1840
1841         /* Validating that the resource is not already taken */
1842         lock_status = REG_RD(bp, hw_lock_control_reg);
1843         if (lock_status & resource_bit) {
1844                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1845                    lock_status, resource_bit);
1846                 return -EEXIST;
1847         }
1848
1849         /* Try for 5 second every 5ms */
1850         for (cnt = 0; cnt < 1000; cnt++) {
1851                 /* Try to acquire the lock */
1852                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1853                 lock_status = REG_RD(bp, hw_lock_control_reg);
1854                 if (lock_status & resource_bit)
1855                         return 0;
1856
1857                 msleep(5);
1858         }
1859         DP(NETIF_MSG_HW, "Timeout\n");
1860         return -EAGAIN;
1861 }
1862
1863 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1864 {
1865         u32 lock_status;
1866         u32 resource_bit = (1 << resource);
1867         int func = BP_FUNC(bp);
1868         u32 hw_lock_control_reg;
1869
1870         /* Validating that the resource is within range */
1871         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872                 DP(NETIF_MSG_HW,
1873                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875                 return -EINVAL;
1876         }
1877
1878         if (func <= 5) {
1879                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880         } else {
1881                 hw_lock_control_reg =
1882                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883         }
1884
1885         /* Validating that the resource is currently taken */
1886         lock_status = REG_RD(bp, hw_lock_control_reg);
1887         if (!(lock_status & resource_bit)) {
1888                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1889                    lock_status, resource_bit);
1890                 return -EFAULT;
1891         }
1892
1893         REG_WR(bp, hw_lock_control_reg, resource_bit);
1894         return 0;
1895 }
1896
1897 /* HW Lock for shared dual port PHYs */
1898 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1899 {
1900         mutex_lock(&bp->port.phy_mutex);
1901
1902         if (bp->port.need_hw_lock)
1903                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1904 }
1905
1906 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1907 {
1908         if (bp->port.need_hw_lock)
1909                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1910
1911         mutex_unlock(&bp->port.phy_mutex);
1912 }
1913
1914 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1915 {
1916         /* The GPIO should be swapped if swap register is set and active */
1917         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1918                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1919         int gpio_shift = gpio_num +
1920                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1921         u32 gpio_mask = (1 << gpio_shift);
1922         u32 gpio_reg;
1923         int value;
1924
1925         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1926                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1927                 return -EINVAL;
1928         }
1929
1930         /* read GPIO value */
1931         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1932
1933         /* get the requested pin value */
1934         if ((gpio_reg & gpio_mask) == gpio_mask)
1935                 value = 1;
1936         else
1937                 value = 0;
1938
1939         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1940
1941         return value;
1942 }
1943
1944 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1945 {
1946         /* The GPIO should be swapped if swap register is set and active */
1947         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1948                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1949         int gpio_shift = gpio_num +
1950                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1951         u32 gpio_mask = (1 << gpio_shift);
1952         u32 gpio_reg;
1953
1954         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1955                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1956                 return -EINVAL;
1957         }
1958
1959         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1960         /* read GPIO and mask except the float bits */
1961         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1962
1963         switch (mode) {
1964         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1965                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1966                    gpio_num, gpio_shift);
1967                 /* clear FLOAT and set CLR */
1968                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1969                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1970                 break;
1971
1972         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1974                    gpio_num, gpio_shift);
1975                 /* clear FLOAT and set SET */
1976                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1977                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1978                 break;
1979
1980         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1981                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1982                    gpio_num, gpio_shift);
1983                 /* set FLOAT */
1984                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1985                 break;
1986
1987         default:
1988                 break;
1989         }
1990
1991         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1992         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1993
1994         return 0;
1995 }
1996
1997 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1998 {
1999         /* The GPIO should be swapped if swap register is set and active */
2000         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002         int gpio_shift = gpio_num +
2003                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004         u32 gpio_mask = (1 << gpio_shift);
2005         u32 gpio_reg;
2006
2007         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2008                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2009                 return -EINVAL;
2010         }
2011
2012         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2013         /* read GPIO int */
2014         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2015
2016         switch (mode) {
2017         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2018                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2019                                    "output low\n", gpio_num, gpio_shift);
2020                 /* clear SET and set CLR */
2021                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2023                 break;
2024
2025         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2026                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2027                                    "output high\n", gpio_num, gpio_shift);
2028                 /* clear CLR and set SET */
2029                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2030                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2031                 break;
2032
2033         default:
2034                 break;
2035         }
2036
2037         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2038         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2039
2040         return 0;
2041 }
2042
2043 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2044 {
2045         u32 spio_mask = (1 << spio_num);
2046         u32 spio_reg;
2047
2048         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2049             (spio_num > MISC_REGISTERS_SPIO_7)) {
2050                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2051                 return -EINVAL;
2052         }
2053
2054         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2055         /* read SPIO and mask except the float bits */
2056         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2057
2058         switch (mode) {
2059         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2060                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2061                 /* clear FLOAT and set CLR */
2062                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2063                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2064                 break;
2065
2066         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2067                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2068                 /* clear FLOAT and set SET */
2069                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2070                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2071                 break;
2072
2073         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2074                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2075                 /* set FLOAT */
2076                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2077                 break;
2078
2079         default:
2080                 break;
2081         }
2082
2083         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2084         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2085
2086         return 0;
2087 }
2088
2089 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2090 {
2091         switch (bp->link_vars.ieee_fc &
2092                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2093         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2094                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2095                                           ADVERTISED_Pause);
2096                 break;
2097
2098         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2099                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2100                                          ADVERTISED_Pause);
2101                 break;
2102
2103         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2104                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2105                 break;
2106
2107         default:
2108                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2109                                           ADVERTISED_Pause);
2110                 break;
2111         }
2112 }
2113
2114 static void bnx2x_link_report(struct bnx2x *bp)
2115 {
2116         if (bp->state == BNX2X_STATE_DISABLED) {
2117                 netif_carrier_off(bp->dev);
2118                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2119                 return;
2120         }
2121
2122         if (bp->link_vars.link_up) {
2123                 if (bp->state == BNX2X_STATE_OPEN)
2124                         netif_carrier_on(bp->dev);
2125                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2126
2127                 printk("%d Mbps ", bp->link_vars.line_speed);
2128
2129                 if (bp->link_vars.duplex == DUPLEX_FULL)
2130                         printk("full duplex");
2131                 else
2132                         printk("half duplex");
2133
2134                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2135                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2136                                 printk(", receive ");
2137                                 if (bp->link_vars.flow_ctrl &
2138                                     BNX2X_FLOW_CTRL_TX)
2139                                         printk("& transmit ");
2140                         } else {
2141                                 printk(", transmit ");
2142                         }
2143                         printk("flow control ON");
2144                 }
2145                 printk("\n");
2146
2147         } else { /* link_down */
2148                 netif_carrier_off(bp->dev);
2149                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2150         }
2151 }
2152
2153 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2154 {
2155         if (!BP_NOMCP(bp)) {
2156                 u8 rc;
2157
2158                 /* Initialize link parameters structure variables */
2159                 /* It is recommended to turn off RX FC for jumbo frames
2160                    for better performance */
2161                 if (bp->dev->mtu > 5000)
2162                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2163                 else
2164                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2165
2166                 bnx2x_acquire_phy_lock(bp);
2167
2168                 if (load_mode == LOAD_DIAG)
2169                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2170
2171                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2172
2173                 bnx2x_release_phy_lock(bp);
2174
2175                 bnx2x_calc_fc_adv(bp);
2176
2177                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2178                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2179                         bnx2x_link_report(bp);
2180                 }
2181
2182                 return rc;
2183         }
2184         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2185         return -EINVAL;
2186 }
2187
2188 static void bnx2x_link_set(struct bnx2x *bp)
2189 {
2190         if (!BP_NOMCP(bp)) {
2191                 bnx2x_acquire_phy_lock(bp);
2192                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2193                 bnx2x_release_phy_lock(bp);
2194
2195                 bnx2x_calc_fc_adv(bp);
2196         } else
2197                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2198 }
2199
2200 static void bnx2x__link_reset(struct bnx2x *bp)
2201 {
2202         if (!BP_NOMCP(bp)) {
2203                 bnx2x_acquire_phy_lock(bp);
2204                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2205                 bnx2x_release_phy_lock(bp);
2206         } else
2207                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2208 }
2209
2210 static u8 bnx2x_link_test(struct bnx2x *bp)
2211 {
2212         u8 rc;
2213
2214         bnx2x_acquire_phy_lock(bp);
2215         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2216         bnx2x_release_phy_lock(bp);
2217
2218         return rc;
2219 }
2220
2221 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2222 {
2223         u32 r_param = bp->link_vars.line_speed / 8;
2224         u32 fair_periodic_timeout_usec;
2225         u32 t_fair;
2226
2227         memset(&(bp->cmng.rs_vars), 0,
2228                sizeof(struct rate_shaping_vars_per_port));
2229         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2230
2231         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2232         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2233
2234         /* this is the threshold below which no timer arming will occur
2235            1.25 coefficient is for the threshold to be a little bigger
2236            than the real time, to compensate for timer in-accuracy */
2237         bp->cmng.rs_vars.rs_threshold =
2238                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2239
2240         /* resolution of fairness timer */
2241         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2242         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2243         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2244
2245         /* this is the threshold below which we won't arm the timer anymore */
2246         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2247
2248         /* we multiply by 1e3/8 to get bytes/msec.
2249            We don't want the credits to pass a credit
2250            of the t_fair*FAIR_MEM (algorithm resolution) */
2251         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2252         /* since each tick is 4 usec */
2253         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2254 }
2255
2256 /* Calculates the sum of vn_min_rates.
2257    It's needed for further normalizing of the min_rates.
2258    Returns:
2259      sum of vn_min_rates.
2260        or
2261      0 - if all the min_rates are 0.
2262      In the later case fainess algorithm should be deactivated.
2263      If not all min_rates are zero then those that are zeroes will be set to 1.
2264  */
2265 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2266 {
2267         int all_zero = 1;
2268         int port = BP_PORT(bp);
2269         int vn;
2270
2271         bp->vn_weight_sum = 0;
2272         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2273                 int func = 2*vn + port;
2274                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2275                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2276                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2277
2278                 /* Skip hidden vns */
2279                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2280                         continue;
2281
2282                 /* If min rate is zero - set it to 1 */
2283                 if (!vn_min_rate)
2284                         vn_min_rate = DEF_MIN_RATE;
2285                 else
2286                         all_zero = 0;
2287
2288                 bp->vn_weight_sum += vn_min_rate;
2289         }
2290
2291         /* ... only if all min rates are zeros - disable fairness */
2292         if (all_zero)
2293                 bp->vn_weight_sum = 0;
2294 }
2295
2296 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2297 {
2298         struct rate_shaping_vars_per_vn m_rs_vn;
2299         struct fairness_vars_per_vn m_fair_vn;
2300         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2301         u16 vn_min_rate, vn_max_rate;
2302         int i;
2303
2304         /* If function is hidden - set min and max to zeroes */
2305         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2306                 vn_min_rate = 0;
2307                 vn_max_rate = 0;
2308
2309         } else {
2310                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2311                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2312                 /* If fairness is enabled (not all min rates are zeroes) and
2313                    if current min rate is zero - set it to 1.
2314                    This is a requirement of the algorithm. */
2315                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2316                         vn_min_rate = DEF_MIN_RATE;
2317                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2318                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2319         }
2320
2321         DP(NETIF_MSG_IFUP,
2322            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2323            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2324
2325         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2326         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2327
2328         /* global vn counter - maximal Mbps for this vn */
2329         m_rs_vn.vn_counter.rate = vn_max_rate;
2330
2331         /* quota - number of bytes transmitted in this period */
2332         m_rs_vn.vn_counter.quota =
2333                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2334
2335         if (bp->vn_weight_sum) {
2336                 /* credit for each period of the fairness algorithm:
2337                    number of bytes in T_FAIR (the vn share the port rate).
2338                    vn_weight_sum should not be larger than 10000, thus
2339                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2340                    than zero */
2341                 m_fair_vn.vn_credit_delta =
2342                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2343                                                  (8 * bp->vn_weight_sum))),
2344                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2345                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2346                    m_fair_vn.vn_credit_delta);
2347         }
2348
2349         /* Store it to internal memory */
2350         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2351                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2352                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2353                        ((u32 *)(&m_rs_vn))[i]);
2354
2355         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2356                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2357                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2358                        ((u32 *)(&m_fair_vn))[i]);
2359 }
2360
2361
2362 /* This function is called upon link interrupt */
2363 static void bnx2x_link_attn(struct bnx2x *bp)
2364 {
2365         /* Make sure that we are synced with the current statistics */
2366         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
2368         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2369
2370         if (bp->link_vars.link_up) {
2371
2372                 /* dropless flow control */
2373                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2374                         int port = BP_PORT(bp);
2375                         u32 pause_enabled = 0;
2376
2377                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2378                                 pause_enabled = 1;
2379
2380                         REG_WR(bp, BAR_USTRORM_INTMEM +
2381                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2382                                pause_enabled);
2383                 }
2384
2385                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2386                         struct host_port_stats *pstats;
2387
2388                         pstats = bnx2x_sp(bp, port_stats);
2389                         /* reset old bmac stats */
2390                         memset(&(pstats->mac_stx[0]), 0,
2391                                sizeof(struct mac_stx));
2392                 }
2393                 if ((bp->state == BNX2X_STATE_OPEN) ||
2394                     (bp->state == BNX2X_STATE_DISABLED))
2395                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2396         }
2397
2398         /* indicate link status */
2399         bnx2x_link_report(bp);
2400
2401         if (IS_E1HMF(bp)) {
2402                 int port = BP_PORT(bp);
2403                 int func;
2404                 int vn;
2405
2406                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2407                         if (vn == BP_E1HVN(bp))
2408                                 continue;
2409
2410                         func = ((vn << 1) | port);
2411
2412                         /* Set the attention towards other drivers
2413                            on the same port */
2414                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2415                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2416                 }
2417
2418                 if (bp->link_vars.link_up) {
2419                         int i;
2420
2421                         /* Init rate shaping and fairness contexts */
2422                         bnx2x_init_port_minmax(bp);
2423
2424                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2425                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2426
2427                         /* Store it to internal memory */
2428                         for (i = 0;
2429                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2430                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2431                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2432                                        ((u32 *)(&bp->cmng))[i]);
2433                 }
2434         }
2435 }
2436
2437 static void bnx2x__link_status_update(struct bnx2x *bp)
2438 {
2439         int func = BP_FUNC(bp);
2440
2441         if (bp->state != BNX2X_STATE_OPEN)
2442                 return;
2443
2444         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2445
2446         if (bp->link_vars.link_up)
2447                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2448         else
2449                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2450
2451         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2452         bnx2x_calc_vn_weight_sum(bp);
2453
2454         /* indicate link status */
2455         bnx2x_link_report(bp);
2456 }
2457
2458 static void bnx2x_pmf_update(struct bnx2x *bp)
2459 {
2460         int port = BP_PORT(bp);
2461         u32 val;
2462
2463         bp->port.pmf = 1;
2464         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2465
2466         /* enable nig attention */
2467         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2468         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2469         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2470
2471         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2472 }
2473
2474 /* end of Link */
2475
2476 /* slow path */
2477
2478 /*
2479  * General service functions
2480  */
2481
2482 /* send the MCP a request, block until there is a reply */
2483 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2484 {
2485         int func = BP_FUNC(bp);
2486         u32 seq = ++bp->fw_seq;
2487         u32 rc = 0;
2488         u32 cnt = 1;
2489         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2490
2491         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2492         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2493
2494         do {
2495                 /* let the FW do it's magic ... */
2496                 msleep(delay);
2497
2498                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2499
2500                 /* Give the FW up to 2 second (200*10ms) */
2501         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2502
2503         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2504            cnt*delay, rc, seq);
2505
2506         /* is this a reply to our command? */
2507         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2508                 rc &= FW_MSG_CODE_MASK;
2509         else {
2510                 /* FW BUG! */
2511                 BNX2X_ERR("FW failed to respond!\n");
2512                 bnx2x_fw_dump(bp);
2513                 rc = 0;
2514         }
2515
2516         return rc;
2517 }
2518
2519 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2520 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2521 static void bnx2x_set_rx_mode(struct net_device *dev);
2522
2523 static void bnx2x_e1h_disable(struct bnx2x *bp)
2524 {
2525         int port = BP_PORT(bp);
2526         int i;
2527
2528         bp->rx_mode = BNX2X_RX_MODE_NONE;
2529         bnx2x_set_storm_rx_mode(bp);
2530
2531         netif_tx_disable(bp->dev);
2532         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2533
2534         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2535
2536         bnx2x_set_mac_addr_e1h(bp, 0);
2537
2538         for (i = 0; i < MC_HASH_SIZE; i++)
2539                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2540
2541         netif_carrier_off(bp->dev);
2542 }
2543
2544 static void bnx2x_e1h_enable(struct bnx2x *bp)
2545 {
2546         int port = BP_PORT(bp);
2547
2548         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2549
2550         bnx2x_set_mac_addr_e1h(bp, 1);
2551
2552         /* Tx queue should be only reenabled */
2553         netif_tx_wake_all_queues(bp->dev);
2554
2555         /* Initialize the receive filter. */
2556         bnx2x_set_rx_mode(bp->dev);
2557 }
2558
2559 static void bnx2x_update_min_max(struct bnx2x *bp)
2560 {
2561         int port = BP_PORT(bp);
2562         int vn, i;
2563
2564         /* Init rate shaping and fairness contexts */
2565         bnx2x_init_port_minmax(bp);
2566
2567         bnx2x_calc_vn_weight_sum(bp);
2568
2569         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2570                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2571
2572         if (bp->port.pmf) {
2573                 int func;
2574
2575                 /* Set the attention towards other drivers on the same port */
2576                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2577                         if (vn == BP_E1HVN(bp))
2578                                 continue;
2579
2580                         func = ((vn << 1) | port);
2581                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2582                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2583                 }
2584
2585                 /* Store it to internal memory */
2586                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2587                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2588                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2589                                ((u32 *)(&bp->cmng))[i]);
2590         }
2591 }
2592
2593 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2594 {
2595         int func = BP_FUNC(bp);
2596
2597         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2598         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2599
2600         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2601
2602                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2603                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2604                         bp->state = BNX2X_STATE_DISABLED;
2605
2606                         bnx2x_e1h_disable(bp);
2607                 } else {
2608                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2609                         bp->state = BNX2X_STATE_OPEN;
2610
2611                         bnx2x_e1h_enable(bp);
2612                 }
2613                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2614         }
2615         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2616
2617                 bnx2x_update_min_max(bp);
2618                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2619         }
2620
2621         /* Report results to MCP */
2622         if (dcc_event)
2623                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2624         else
2625                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2626 }
2627
2628 /* the slow path queue is odd since completions arrive on the fastpath ring */
2629 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2630                          u32 data_hi, u32 data_lo, int common)
2631 {
2632         int func = BP_FUNC(bp);
2633
2634         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2635            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2636            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2637            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2638            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2639
2640 #ifdef BNX2X_STOP_ON_ERROR
2641         if (unlikely(bp->panic))
2642                 return -EIO;
2643 #endif
2644
2645         spin_lock_bh(&bp->spq_lock);
2646
2647         if (!bp->spq_left) {
2648                 BNX2X_ERR("BUG! SPQ ring full!\n");
2649                 spin_unlock_bh(&bp->spq_lock);
2650                 bnx2x_panic();
2651                 return -EBUSY;
2652         }
2653
2654         /* CID needs port number to be encoded int it */
2655         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2656                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2657                                      HW_CID(bp, cid)));
2658         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2659         if (common)
2660                 bp->spq_prod_bd->hdr.type |=
2661                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2662
2663         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2664         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2665
2666         bp->spq_left--;
2667
2668         if (bp->spq_prod_bd == bp->spq_last_bd) {
2669                 bp->spq_prod_bd = bp->spq;
2670                 bp->spq_prod_idx = 0;
2671                 DP(NETIF_MSG_TIMER, "end of spq\n");
2672
2673         } else {
2674                 bp->spq_prod_bd++;
2675                 bp->spq_prod_idx++;
2676         }
2677
2678         /* Make sure that BD data is updated before writing the producer */
2679         wmb();
2680
2681         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2682                bp->spq_prod_idx);
2683
2684         mmiowb();
2685
2686         spin_unlock_bh(&bp->spq_lock);
2687         return 0;
2688 }
2689
2690 /* acquire split MCP access lock register */
2691 static int bnx2x_acquire_alr(struct bnx2x *bp)
2692 {
2693         u32 i, j, val;
2694         int rc = 0;
2695
2696         might_sleep();
2697         i = 100;
2698         for (j = 0; j < i*10; j++) {
2699                 val = (1UL << 31);
2700                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2701                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2702                 if (val & (1L << 31))
2703                         break;
2704
2705                 msleep(5);
2706         }
2707         if (!(val & (1L << 31))) {
2708                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2709                 rc = -EBUSY;
2710         }
2711
2712         return rc;
2713 }
2714
2715 /* release split MCP access lock register */
2716 static void bnx2x_release_alr(struct bnx2x *bp)
2717 {
2718         u32 val = 0;
2719
2720         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2721 }
2722
2723 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2724 {
2725         struct host_def_status_block *def_sb = bp->def_status_blk;
2726         u16 rc = 0;
2727
2728         barrier(); /* status block is written to by the chip */
2729         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2730                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2731                 rc |= 1;
2732         }
2733         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2734                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2735                 rc |= 2;
2736         }
2737         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2738                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2739                 rc |= 4;
2740         }
2741         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2742                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2743                 rc |= 8;
2744         }
2745         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2746                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2747                 rc |= 16;
2748         }
2749         return rc;
2750 }
2751
2752 /*
2753  * slow path service functions
2754  */
2755
2756 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2757 {
2758         int port = BP_PORT(bp);
2759         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2760                        COMMAND_REG_ATTN_BITS_SET);
2761         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2762                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2763         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2764                                        NIG_REG_MASK_INTERRUPT_PORT0;
2765         u32 aeu_mask;
2766         u32 nig_mask = 0;
2767
2768         if (bp->attn_state & asserted)
2769                 BNX2X_ERR("IGU ERROR\n");
2770
2771         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2772         aeu_mask = REG_RD(bp, aeu_addr);
2773
2774         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2775            aeu_mask, asserted);
2776         aeu_mask &= ~(asserted & 0xff);
2777         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2778
2779         REG_WR(bp, aeu_addr, aeu_mask);
2780         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781
2782         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2783         bp->attn_state |= asserted;
2784         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2785
2786         if (asserted & ATTN_HARD_WIRED_MASK) {
2787                 if (asserted & ATTN_NIG_FOR_FUNC) {
2788
2789                         bnx2x_acquire_phy_lock(bp);
2790
2791                         /* save nig interrupt mask */
2792                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2793                         REG_WR(bp, nig_int_mask_addr, 0);
2794
2795                         bnx2x_link_attn(bp);
2796
2797                         /* handle unicore attn? */
2798                 }
2799                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2800                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2801
2802                 if (asserted & GPIO_2_FUNC)
2803                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2804
2805                 if (asserted & GPIO_3_FUNC)
2806                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2807
2808                 if (asserted & GPIO_4_FUNC)
2809                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2810
2811                 if (port == 0) {
2812                         if (asserted & ATTN_GENERAL_ATTN_1) {
2813                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2814                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2815                         }
2816                         if (asserted & ATTN_GENERAL_ATTN_2) {
2817                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2818                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2819                         }
2820                         if (asserted & ATTN_GENERAL_ATTN_3) {
2821                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2822                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2823                         }
2824                 } else {
2825                         if (asserted & ATTN_GENERAL_ATTN_4) {
2826                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2827                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2828                         }
2829                         if (asserted & ATTN_GENERAL_ATTN_5) {
2830                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2831                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2832                         }
2833                         if (asserted & ATTN_GENERAL_ATTN_6) {
2834                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2835                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2836                         }
2837                 }
2838
2839         } /* if hardwired */
2840
2841         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2842            asserted, hc_addr);
2843         REG_WR(bp, hc_addr, asserted);
2844
2845         /* now set back the mask */
2846         if (asserted & ATTN_NIG_FOR_FUNC) {
2847                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2848                 bnx2x_release_phy_lock(bp);
2849         }
2850 }
2851
2852 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2853 {
2854         int port = BP_PORT(bp);
2855
2856         /* mark the failure */
2857         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2858         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2859         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2860                  bp->link_params.ext_phy_config);
2861
2862         /* log the failure */
2863         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2864                " the driver to shutdown the card to prevent permanent"
2865                " damage.  Please contact Dell Support for assistance\n",
2866                bp->dev->name);
2867 }
2868 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2869 {
2870         int port = BP_PORT(bp);
2871         int reg_offset;
2872         u32 val, swap_val, swap_override;
2873
2874         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2875                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2876
2877         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2878
2879                 val = REG_RD(bp, reg_offset);
2880                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2881                 REG_WR(bp, reg_offset, val);
2882
2883                 BNX2X_ERR("SPIO5 hw attention\n");
2884
2885                 /* Fan failure attention */
2886                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2887                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2888                         /* Low power mode is controlled by GPIO 2 */
2889                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2890                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2891                         /* The PHY reset is controlled by GPIO 1 */
2892                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2893                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2894                         break;
2895
2896                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2897                         /* The PHY reset is controlled by GPIO 1 */
2898                         /* fake the port number to cancel the swap done in
2899                            set_gpio() */
2900                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2901                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2902                         port = (swap_val && swap_override) ^ 1;
2903                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2904                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2905                         break;
2906
2907                 default:
2908                         break;
2909                 }
2910                 bnx2x_fan_failure(bp);
2911         }
2912
2913         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915                 bnx2x_acquire_phy_lock(bp);
2916                 bnx2x_handle_module_detect_int(&bp->link_params);
2917                 bnx2x_release_phy_lock(bp);
2918         }
2919
2920         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2921
2922                 val = REG_RD(bp, reg_offset);
2923                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924                 REG_WR(bp, reg_offset, val);
2925
2926                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2927                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2928                 bnx2x_panic();
2929         }
2930 }
2931
2932 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2933 {
2934         u32 val;
2935
2936         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2937
2938                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940                 /* DORQ discard attention */
2941                 if (val & 0x2)
2942                         BNX2X_ERR("FATAL error from DORQ\n");
2943         }
2944
2945         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2946
2947                 int port = BP_PORT(bp);
2948                 int reg_offset;
2949
2950                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2952
2953                 val = REG_RD(bp, reg_offset);
2954                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955                 REG_WR(bp, reg_offset, val);
2956
2957                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2958                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2959                 bnx2x_panic();
2960         }
2961 }
2962
2963 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2964 {
2965         u32 val;
2966
2967         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2968
2969                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971                 /* CFC error attention */
2972                 if (val & 0x2)
2973                         BNX2X_ERR("FATAL error from CFC\n");
2974         }
2975
2976         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2977
2978                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980                 /* RQ_USDMDP_FIFO_OVERFLOW */
2981                 if (val & 0x18000)
2982                         BNX2X_ERR("FATAL error from PXP\n");
2983         }
2984
2985         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2986
2987                 int port = BP_PORT(bp);
2988                 int reg_offset;
2989
2990                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2991                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2992
2993                 val = REG_RD(bp, reg_offset);
2994                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2995                 REG_WR(bp, reg_offset, val);
2996
2997                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2998                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2999                 bnx2x_panic();
3000         }
3001 }
3002
3003 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3004 {
3005         u32 val;
3006
3007         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3008
3009                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3010                         int func = BP_FUNC(bp);
3011
3012                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3013                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3014                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3015                                 bnx2x_dcc_event(bp,
3016                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3017                         bnx2x__link_status_update(bp);
3018                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3019                                 bnx2x_pmf_update(bp);
3020
3021                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3022
3023                         BNX2X_ERR("MC assert!\n");
3024                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3025                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3026                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3027                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3028                         bnx2x_panic();
3029
3030                 } else if (attn & BNX2X_MCP_ASSERT) {
3031
3032                         BNX2X_ERR("MCP assert!\n");
3033                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3034                         bnx2x_fw_dump(bp);
3035
3036                 } else
3037                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3038         }
3039
3040         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3041                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3042                 if (attn & BNX2X_GRC_TIMEOUT) {
3043                         val = CHIP_IS_E1H(bp) ?
3044                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3045                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3046                 }
3047                 if (attn & BNX2X_GRC_RSV) {
3048                         val = CHIP_IS_E1H(bp) ?
3049                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3050                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3051                 }
3052                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3053         }
3054 }
3055
3056 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3057 {
3058         struct attn_route attn;
3059         struct attn_route group_mask;
3060         int port = BP_PORT(bp);
3061         int index;
3062         u32 reg_addr;
3063         u32 val;
3064         u32 aeu_mask;
3065
3066         /* need to take HW lock because MCP or other port might also
3067            try to handle this event */
3068         bnx2x_acquire_alr(bp);
3069
3070         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3071         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3072         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3073         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3074         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3075            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3076
3077         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3078                 if (deasserted & (1 << index)) {
3079                         group_mask = bp->attn_group[index];
3080
3081                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3082                            index, group_mask.sig[0], group_mask.sig[1],
3083                            group_mask.sig[2], group_mask.sig[3]);
3084
3085                         bnx2x_attn_int_deasserted3(bp,
3086                                         attn.sig[3] & group_mask.sig[3]);
3087                         bnx2x_attn_int_deasserted1(bp,
3088                                         attn.sig[1] & group_mask.sig[1]);
3089                         bnx2x_attn_int_deasserted2(bp,
3090                                         attn.sig[2] & group_mask.sig[2]);
3091                         bnx2x_attn_int_deasserted0(bp,
3092                                         attn.sig[0] & group_mask.sig[0]);
3093
3094                         if ((attn.sig[0] & group_mask.sig[0] &
3095                                                 HW_PRTY_ASSERT_SET_0) ||
3096                             (attn.sig[1] & group_mask.sig[1] &
3097                                                 HW_PRTY_ASSERT_SET_1) ||
3098                             (attn.sig[2] & group_mask.sig[2] &
3099                                                 HW_PRTY_ASSERT_SET_2))
3100                                 BNX2X_ERR("FATAL HW block parity attention\n");
3101                 }
3102         }
3103
3104         bnx2x_release_alr(bp);
3105
3106         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3107
3108         val = ~deasserted;
3109         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3110            val, reg_addr);
3111         REG_WR(bp, reg_addr, val);
3112
3113         if (~bp->attn_state & deasserted)
3114                 BNX2X_ERR("IGU ERROR\n");
3115
3116         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3117                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3118
3119         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3120         aeu_mask = REG_RD(bp, reg_addr);
3121
3122         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3123            aeu_mask, deasserted);
3124         aeu_mask |= (deasserted & 0xff);
3125         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3126
3127         REG_WR(bp, reg_addr, aeu_mask);
3128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3129
3130         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3131         bp->attn_state &= ~deasserted;
3132         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3133 }
3134
3135 static void bnx2x_attn_int(struct bnx2x *bp)
3136 {
3137         /* read local copy of bits */
3138         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3139                                                                 attn_bits);
3140         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3141                                                                 attn_bits_ack);
3142         u32 attn_state = bp->attn_state;
3143
3144         /* look for changed bits */
3145         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3146         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3147
3148         DP(NETIF_MSG_HW,
3149            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3150            attn_bits, attn_ack, asserted, deasserted);
3151
3152         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3153                 BNX2X_ERR("BAD attention state\n");
3154
3155         /* handle bits that were raised */
3156         if (asserted)
3157                 bnx2x_attn_int_asserted(bp, asserted);
3158
3159         if (deasserted)
3160                 bnx2x_attn_int_deasserted(bp, deasserted);
3161 }
3162
3163 static void bnx2x_sp_task(struct work_struct *work)
3164 {
3165         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3166         u16 status;
3167
3168
3169         /* Return here if interrupt is disabled */
3170         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3171                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3172                 return;
3173         }
3174
3175         status = bnx2x_update_dsb_idx(bp);
3176 /*      if (status == 0)                                     */
3177 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3178
3179         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3180
3181         /* HW attentions */
3182         if (status & 0x1)
3183                 bnx2x_attn_int(bp);
3184
3185         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3186                      IGU_INT_NOP, 1);
3187         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3188                      IGU_INT_NOP, 1);
3189         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3190                      IGU_INT_NOP, 1);
3191         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3192                      IGU_INT_NOP, 1);
3193         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3194                      IGU_INT_ENABLE, 1);
3195
3196 }
3197
3198 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3199 {
3200         struct net_device *dev = dev_instance;
3201         struct bnx2x *bp = netdev_priv(dev);
3202
3203         /* Return here if interrupt is disabled */
3204         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3205                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3206                 return IRQ_HANDLED;
3207         }
3208
3209         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3210
3211 #ifdef BNX2X_STOP_ON_ERROR
3212         if (unlikely(bp->panic))
3213                 return IRQ_HANDLED;
3214 #endif
3215
3216         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3217
3218         return IRQ_HANDLED;
3219 }
3220
3221 /* end of slow path */
3222
3223 /* Statistics */
3224
3225 /****************************************************************************
3226 * Macros
3227 ****************************************************************************/
3228
3229 /* sum[hi:lo] += add[hi:lo] */
3230 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3231         do { \
3232                 s_lo += a_lo; \
3233                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3234         } while (0)
3235
3236 /* difference = minuend - subtrahend */
3237 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3238         do { \
3239                 if (m_lo < s_lo) { \
3240                         /* underflow */ \
3241                         d_hi = m_hi - s_hi; \
3242                         if (d_hi > 0) { \
3243                                 /* we can 'loan' 1 */ \
3244                                 d_hi--; \
3245                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3246                         } else { \
3247                                 /* m_hi <= s_hi */ \
3248                                 d_hi = 0; \
3249                                 d_lo = 0; \
3250                         } \
3251                 } else { \
3252                         /* m_lo >= s_lo */ \
3253                         if (m_hi < s_hi) { \
3254                                 d_hi = 0; \
3255                                 d_lo = 0; \
3256                         } else { \
3257                                 /* m_hi >= s_hi */ \
3258                                 d_hi = m_hi - s_hi; \
3259                                 d_lo = m_lo - s_lo; \
3260                         } \
3261                 } \
3262         } while (0)
3263
3264 #define UPDATE_STAT64(s, t) \
3265         do { \
3266                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3267                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3268                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3269                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3270                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3271                        pstats->mac_stx[1].t##_lo, diff.lo); \
3272         } while (0)
3273
3274 #define UPDATE_STAT64_NIG(s, t) \
3275         do { \
3276                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3277                         diff.lo, new->s##_lo, old->s##_lo); \
3278                 ADD_64(estats->t##_hi, diff.hi, \
3279                        estats->t##_lo, diff.lo); \
3280         } while (0)
3281
3282 /* sum[hi:lo] += add */
3283 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3284         do { \
3285                 s_lo += a; \
3286                 s_hi += (s_lo < a) ? 1 : 0; \
3287         } while (0)
3288
3289 #define UPDATE_EXTEND_STAT(s) \
3290         do { \
3291                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3292                               pstats->mac_stx[1].s##_lo, \
3293                               new->s); \
3294         } while (0)
3295
3296 #define UPDATE_EXTEND_TSTAT(s, t) \
3297         do { \
3298                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3299                 old_tclient->s = tclient->s; \
3300                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3301         } while (0)
3302
3303 #define UPDATE_EXTEND_USTAT(s, t) \
3304         do { \
3305                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3306                 old_uclient->s = uclient->s; \
3307                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3308         } while (0)
3309
3310 #define UPDATE_EXTEND_XSTAT(s, t) \
3311         do { \
3312                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3313                 old_xclient->s = xclient->s; \
3314                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3315         } while (0)
3316
3317 /* minuend -= subtrahend */
3318 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3319         do { \
3320                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3321         } while (0)
3322
3323 /* minuend[hi:lo] -= subtrahend */
3324 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3325         do { \
3326                 SUB_64(m_hi, 0, m_lo, s); \
3327         } while (0)
3328
3329 #define SUB_EXTEND_USTAT(s, t) \
3330         do { \
3331                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3332                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3333         } while (0)
3334
3335 /*
3336  * General service functions
3337  */
3338
3339 static inline long bnx2x_hilo(u32 *hiref)
3340 {
3341         u32 lo = *(hiref + 1);
3342 #if (BITS_PER_LONG == 64)
3343         u32 hi = *hiref;
3344
3345         return HILO_U64(hi, lo);
3346 #else
3347         return lo;
3348 #endif
3349 }
3350
3351 /*
3352  * Init service functions
3353  */
3354
3355 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3356 {
3357         if (!bp->stats_pending) {
3358                 struct eth_query_ramrod_data ramrod_data = {0};
3359                 int i, rc;
3360
3361                 ramrod_data.drv_counter = bp->stats_counter++;
3362                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3363                 for_each_queue(bp, i)
3364                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3365
3366                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3367                                    ((u32 *)&ramrod_data)[1],
3368                                    ((u32 *)&ramrod_data)[0], 0);
3369                 if (rc == 0) {
3370                         /* stats ramrod has it's own slot on the spq */
3371                         bp->spq_left++;
3372                         bp->stats_pending = 1;
3373                 }
3374         }
3375 }
3376
3377 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3378 {
3379         struct dmae_command *dmae = &bp->stats_dmae;
3380         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3381
3382         *stats_comp = DMAE_COMP_VAL;
3383         if (CHIP_REV_IS_SLOW(bp))
3384                 return;
3385
3386         /* loader */
3387         if (bp->executer_idx) {
3388                 int loader_idx = PMF_DMAE_C(bp);
3389
3390                 memset(dmae, 0, sizeof(struct dmae_command));
3391
3392                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3393                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3394                                 DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3401                                                DMAE_CMD_PORT_0) |
3402                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3404                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3405                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3406                                      sizeof(struct dmae_command) *
3407                                      (loader_idx + 1)) >> 2;
3408                 dmae->dst_addr_hi = 0;
3409                 dmae->len = sizeof(struct dmae_command) >> 2;
3410                 if (CHIP_IS_E1(bp))
3411                         dmae->len--;
3412                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3413                 dmae->comp_addr_hi = 0;
3414                 dmae->comp_val = 1;
3415
3416                 *stats_comp = 0;
3417                 bnx2x_post_dmae(bp, dmae, loader_idx);
3418
3419         } else if (bp->func_stx) {
3420                 *stats_comp = 0;
3421                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3422         }
3423 }
3424
3425 static int bnx2x_stats_comp(struct bnx2x *bp)
3426 {
3427         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3428         int cnt = 10;
3429
3430         might_sleep();
3431         while (*stats_comp != DMAE_COMP_VAL) {
3432                 if (!cnt) {
3433                         BNX2X_ERR("timeout waiting for stats finished\n");
3434                         break;
3435                 }
3436                 cnt--;
3437                 msleep(1);
3438         }
3439         return 1;
3440 }
3441
3442 /*
3443  * Statistics service functions
3444  */
3445
3446 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3447 {
3448         struct dmae_command *dmae;
3449         u32 opcode;
3450         int loader_idx = PMF_DMAE_C(bp);
3451         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3452
3453         /* sanity */
3454         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3455                 BNX2X_ERR("BUG!\n");
3456                 return;
3457         }
3458
3459         bp->executer_idx = 0;
3460
3461         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3462                   DMAE_CMD_C_ENABLE |
3463                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3464 #ifdef __BIG_ENDIAN
3465                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466 #else
3467                   DMAE_CMD_ENDIANITY_DW_SWAP |
3468 #endif
3469                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3470                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3471
3472         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3473         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3474         dmae->src_addr_lo = bp->port.port_stx >> 2;
3475         dmae->src_addr_hi = 0;
3476         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3477         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3478         dmae->len = DMAE_LEN32_RD_MAX;
3479         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3480         dmae->comp_addr_hi = 0;
3481         dmae->comp_val = 1;
3482
3483         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3484         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3485         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3486         dmae->src_addr_hi = 0;
3487         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3488                                    DMAE_LEN32_RD_MAX * 4);
3489         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3490                                    DMAE_LEN32_RD_MAX * 4);
3491         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3492         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3493         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3494         dmae->comp_val = DMAE_COMP_VAL;
3495
3496         *stats_comp = 0;
3497         bnx2x_hw_stats_post(bp);
3498         bnx2x_stats_comp(bp);
3499 }
3500
3501 static void bnx2x_port_stats_init(struct bnx2x *bp)
3502 {
3503         struct dmae_command *dmae;
3504         int port = BP_PORT(bp);
3505         int vn = BP_E1HVN(bp);
3506         u32 opcode;
3507         int loader_idx = PMF_DMAE_C(bp);
3508         u32 mac_addr;
3509         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3510
3511         /* sanity */
3512         if (!bp->link_vars.link_up || !bp->port.pmf) {
3513                 BNX2X_ERR("BUG!\n");
3514                 return;
3515         }
3516
3517         bp->executer_idx = 0;
3518
3519         /* MCP */
3520         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3521                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3522                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3523 #ifdef __BIG_ENDIAN
3524                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3525 #else
3526                   DMAE_CMD_ENDIANITY_DW_SWAP |
3527 #endif
3528                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3529                   (vn << DMAE_CMD_E1HVN_SHIFT));
3530
3531         if (bp->port.port_stx) {
3532
3533                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3534                 dmae->opcode = opcode;
3535                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3536                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3537                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3538                 dmae->dst_addr_hi = 0;
3539                 dmae->len = sizeof(struct host_port_stats) >> 2;
3540                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3541                 dmae->comp_addr_hi = 0;
3542                 dmae->comp_val = 1;
3543         }
3544
3545         if (bp->func_stx) {
3546
3547                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548                 dmae->opcode = opcode;
3549                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3550                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3551                 dmae->dst_addr_lo = bp->func_stx >> 2;
3552                 dmae->dst_addr_hi = 0;
3553                 dmae->len = sizeof(struct host_func_stats) >> 2;
3554                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555                 dmae->comp_addr_hi = 0;
3556                 dmae->comp_val = 1;
3557         }
3558
3559         /* MAC */
3560         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3561                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3562                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3563 #ifdef __BIG_ENDIAN
3564                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3565 #else
3566                   DMAE_CMD_ENDIANITY_DW_SWAP |
3567 #endif
3568                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3569                   (vn << DMAE_CMD_E1HVN_SHIFT));
3570
3571         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3572
3573                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3574                                    NIG_REG_INGRESS_BMAC0_MEM);
3575
3576                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3577                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3578                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3579                 dmae->opcode = opcode;
3580                 dmae->src_addr_lo = (mac_addr +
3581                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3582                 dmae->src_addr_hi = 0;
3583                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3584                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3585                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3586                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3587                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3588                 dmae->comp_addr_hi = 0;
3589                 dmae->comp_val = 1;
3590
3591                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3592                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3593                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3594                 dmae->opcode = opcode;
3595                 dmae->src_addr_lo = (mac_addr +
3596                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3597                 dmae->src_addr_hi = 0;
3598                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3599                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3600                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3601                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3602                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3603                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3604                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3605                 dmae->comp_addr_hi = 0;
3606                 dmae->comp_val = 1;
3607
3608         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3609
3610                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3611
3612                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3613                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3614                 dmae->opcode = opcode;
3615                 dmae->src_addr_lo = (mac_addr +
3616                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3617                 dmae->src_addr_hi = 0;
3618                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3619                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3620                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3621                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3622                 dmae->comp_addr_hi = 0;
3623                 dmae->comp_val = 1;
3624
3625                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3626                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3627                 dmae->opcode = opcode;
3628                 dmae->src_addr_lo = (mac_addr +
3629                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3630                 dmae->src_addr_hi = 0;
3631                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3632                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3633                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3634                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3635                 dmae->len = 1;
3636                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3637                 dmae->comp_addr_hi = 0;
3638                 dmae->comp_val = 1;
3639
3640                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3641                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642                 dmae->opcode = opcode;
3643                 dmae->src_addr_lo = (mac_addr +
3644                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3645                 dmae->src_addr_hi = 0;
3646                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3647                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3648                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3649                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3650                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3651                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3652                 dmae->comp_addr_hi = 0;
3653                 dmae->comp_val = 1;
3654         }
3655
3656         /* NIG */
3657         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3658         dmae->opcode = opcode;
3659         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3660                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3661         dmae->src_addr_hi = 0;
3662         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3663         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3664         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3665         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3666         dmae->comp_addr_hi = 0;
3667         dmae->comp_val = 1;
3668
3669         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3670         dmae->opcode = opcode;
3671         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3672                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3673         dmae->src_addr_hi = 0;
3674         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3675                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3676         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3677                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3678         dmae->len = (2*sizeof(u32)) >> 2;
3679         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680         dmae->comp_addr_hi = 0;
3681         dmae->comp_val = 1;
3682
3683         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3684         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3685                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3686                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3687 #ifdef __BIG_ENDIAN
3688                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3689 #else
3690                         DMAE_CMD_ENDIANITY_DW_SWAP |
3691 #endif
3692                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3693                         (vn << DMAE_CMD_E1HVN_SHIFT));
3694         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3695                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3696         dmae->src_addr_hi = 0;
3697         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3698                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3699         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3700                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3701         dmae->len = (2*sizeof(u32)) >> 2;
3702         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3703         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3704         dmae->comp_val = DMAE_COMP_VAL;
3705
3706         *stats_comp = 0;
3707 }
3708
3709 static void bnx2x_func_stats_init(struct bnx2x *bp)
3710 {
3711         struct dmae_command *dmae = &bp->stats_dmae;
3712         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3713
3714         /* sanity */
3715         if (!bp->func_stx) {
3716                 BNX2X_ERR("BUG!\n");
3717                 return;
3718         }
3719
3720         bp->executer_idx = 0;
3721         memset(dmae, 0, sizeof(struct dmae_command));
3722
3723         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3724                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3725                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3726 #ifdef __BIG_ENDIAN
3727                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3728 #else
3729                         DMAE_CMD_ENDIANITY_DW_SWAP |
3730 #endif
3731                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3732                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3733         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3734         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3735         dmae->dst_addr_lo = bp->func_stx >> 2;
3736         dmae->dst_addr_hi = 0;
3737         dmae->len = sizeof(struct host_func_stats) >> 2;
3738         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3739         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3740         dmae->comp_val = DMAE_COMP_VAL;
3741
3742         *stats_comp = 0;
3743 }
3744
3745 static void bnx2x_stats_start(struct bnx2x *bp)
3746 {
3747         if (bp->port.pmf)
3748                 bnx2x_port_stats_init(bp);
3749
3750         else if (bp->func_stx)
3751                 bnx2x_func_stats_init(bp);
3752
3753         bnx2x_hw_stats_post(bp);
3754         bnx2x_storm_stats_post(bp);
3755 }
3756
3757 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3758 {
3759         bnx2x_stats_comp(bp);
3760         bnx2x_stats_pmf_update(bp);
3761         bnx2x_stats_start(bp);
3762 }
3763
3764 static void bnx2x_stats_restart(struct bnx2x *bp)
3765 {
3766         bnx2x_stats_comp(bp);
3767         bnx2x_stats_start(bp);
3768 }
3769
3770 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3771 {
3772         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3773         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3774         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3775         struct {
3776                 u32 lo;
3777                 u32 hi;
3778         } diff;
3779
3780         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3781         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3782         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3783         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3784         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3785         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3786         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3787         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3788         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3789         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3790         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3791         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3792         UPDATE_STAT64(tx_stat_gt127,
3793                                 tx_stat_etherstatspkts65octetsto127octets);
3794         UPDATE_STAT64(tx_stat_gt255,
3795                                 tx_stat_etherstatspkts128octetsto255octets);
3796         UPDATE_STAT64(tx_stat_gt511,
3797                                 tx_stat_etherstatspkts256octetsto511octets);
3798         UPDATE_STAT64(tx_stat_gt1023,
3799                                 tx_stat_etherstatspkts512octetsto1023octets);
3800         UPDATE_STAT64(tx_stat_gt1518,
3801                                 tx_stat_etherstatspkts1024octetsto1522octets);
3802         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3803         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3804         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3805         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3806         UPDATE_STAT64(tx_stat_gterr,
3807                                 tx_stat_dot3statsinternalmactransmiterrors);
3808         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3809
3810         estats->pause_frames_received_hi =
3811                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3812         estats->pause_frames_received_lo =
3813                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3814
3815         estats->pause_frames_sent_hi =
3816                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3817         estats->pause_frames_sent_lo =
3818                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3819 }
3820
3821 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3822 {
3823         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3824         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3825         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3826
3827         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3828         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3829         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3830         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3831         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3832         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3833         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3834         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3835         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3836         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3837         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3838         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3839         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3840         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3841         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3842         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3843         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3844         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3845         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3846         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3847         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3848         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3849         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3850         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3851         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3852         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3853         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3854         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3855         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3856         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3857         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3858
3859         estats->pause_frames_received_hi =
3860                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3861         estats->pause_frames_received_lo =
3862                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3863         ADD_64(estats->pause_frames_received_hi,
3864                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3865                estats->pause_frames_received_lo,
3866                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3867
3868         estats->pause_frames_sent_hi =
3869                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3870         estats->pause_frames_sent_lo =
3871                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3872         ADD_64(estats->pause_frames_sent_hi,
3873                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3874                estats->pause_frames_sent_lo,
3875                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3876 }
3877
3878 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3879 {
3880         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3881         struct nig_stats *old = &(bp->port.old_nig_stats);
3882         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3883         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3884         struct {
3885                 u32 lo;
3886                 u32 hi;
3887         } diff;
3888         u32 nig_timer_max;
3889
3890         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3891                 bnx2x_bmac_stats_update(bp);
3892
3893         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3894                 bnx2x_emac_stats_update(bp);
3895
3896         else { /* unreached */
3897                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3898                 return -1;
3899         }
3900
3901         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3902                       new->brb_discard - old->brb_discard);
3903         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3904                       new->brb_truncate - old->brb_truncate);
3905
3906         UPDATE_STAT64_NIG(egress_mac_pkt0,
3907                                         etherstatspkts1024octetsto1522octets);
3908         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3909
3910         memcpy(old, new, sizeof(struct nig_stats));
3911
3912         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3913                sizeof(struct mac_stx));
3914         estats->brb_drop_hi = pstats->brb_drop_hi;
3915         estats->brb_drop_lo = pstats->brb_drop_lo;
3916
3917         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3918
3919         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3920         if (nig_timer_max != estats->nig_timer_max) {
3921                 estats->nig_timer_max = nig_timer_max;
3922                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3923         }
3924
3925         return 0;
3926 }
3927
3928 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3929 {
3930         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3931         struct tstorm_per_port_stats *tport =
3932                                         &stats->tstorm_common.port_statistics;
3933         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3934         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3935         int i;
3936
3937         memcpy(&(fstats->total_bytes_received_hi),
3938                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3939                sizeof(struct host_func_stats) - 2*sizeof(u32));
3940         estats->error_bytes_received_hi = 0;
3941         estats->error_bytes_received_lo = 0;
3942         estats->etherstatsoverrsizepkts_hi = 0;
3943         estats->etherstatsoverrsizepkts_lo = 0;
3944         estats->no_buff_discard_hi = 0;
3945         estats->no_buff_discard_lo = 0;
3946
3947         for_each_rx_queue(bp, i) {
3948                 struct bnx2x_fastpath *fp = &bp->fp[i];
3949                 int cl_id = fp->cl_id;
3950                 struct tstorm_per_client_stats *tclient =
3951                                 &stats->tstorm_common.client_statistics[cl_id];
3952                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3953                 struct ustorm_per_client_stats *uclient =
3954                                 &stats->ustorm_common.client_statistics[cl_id];
3955                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3956                 struct xstorm_per_client_stats *xclient =
3957                                 &stats->xstorm_common.client_statistics[cl_id];
3958                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3959                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3960                 u32 diff;
3961
3962                 /* are storm stats valid? */
3963                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3964                                                         bp->stats_counter) {
3965                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3966                            "  xstorm counter (%d) != stats_counter (%d)\n",
3967                            i, xclient->stats_counter, bp->stats_counter);
3968                         return -1;
3969                 }
3970                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3971                                                         bp->stats_counter) {
3972                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3973                            "  tstorm counter (%d) != stats_counter (%d)\n",
3974                            i, tclient->stats_counter, bp->stats_counter);
3975                         return -2;
3976                 }
3977                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3978                                                         bp->stats_counter) {
3979                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3980                            "  ustorm counter (%d) != stats_counter (%d)\n",
3981                            i, uclient->stats_counter, bp->stats_counter);
3982                         return -4;
3983                 }
3984
3985                 qstats->total_bytes_received_hi =
3986                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3987                 qstats->total_bytes_received_lo =
3988                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3989
3990                 ADD_64(qstats->total_bytes_received_hi,
3991                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3992                        qstats->total_bytes_received_lo,
3993                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3994
3995                 ADD_64(qstats->total_bytes_received_hi,
3996                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3997                        qstats->total_bytes_received_lo,
3998                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3999
4000                 qstats->valid_bytes_received_hi =
4001                                         qstats->total_bytes_received_hi;
4002                 qstats->valid_bytes_received_lo =
4003                                         qstats->total_bytes_received_lo;
4004
4005                 qstats->error_bytes_received_hi =
4006                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4007                 qstats->error_bytes_received_lo =
4008                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4009
4010                 ADD_64(qstats->total_bytes_received_hi,
4011                        qstats->error_bytes_received_hi,
4012                        qstats->total_bytes_received_lo,
4013                        qstats->error_bytes_received_lo);
4014
4015                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4016                                         total_unicast_packets_received);
4017                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4018                                         total_multicast_packets_received);
4019                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4020                                         total_broadcast_packets_received);
4021                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4022                                         etherstatsoverrsizepkts);
4023                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4024
4025                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4026                                         total_unicast_packets_received);
4027                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4028                                         total_multicast_packets_received);
4029                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4030                                         total_broadcast_packets_received);
4031                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4032                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4033                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4034
4035                 qstats->total_bytes_transmitted_hi =
4036                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4037                 qstats->total_bytes_transmitted_lo =
4038                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4039
4040                 ADD_64(qstats->total_bytes_transmitted_hi,
4041                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4042                        qstats->total_bytes_transmitted_lo,
4043                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4044
4045                 ADD_64(qstats->total_bytes_transmitted_hi,
4046                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4047                        qstats->total_bytes_transmitted_lo,
4048                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4049
4050                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4051                                         total_unicast_packets_transmitted);
4052                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4053                                         total_multicast_packets_transmitted);
4054                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4055                                         total_broadcast_packets_transmitted);
4056
4057                 old_tclient->checksum_discard = tclient->checksum_discard;
4058                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4059
4060                 ADD_64(fstats->total_bytes_received_hi,
4061                        qstats->total_bytes_received_hi,
4062                        fstats->total_bytes_received_lo,
4063                        qstats->total_bytes_received_lo);
4064                 ADD_64(fstats->total_bytes_transmitted_hi,
4065                        qstats->total_bytes_transmitted_hi,
4066                        fstats->total_bytes_transmitted_lo,
4067                        qstats->total_bytes_transmitted_lo);
4068                 ADD_64(fstats->total_unicast_packets_received_hi,
4069                        qstats->total_unicast_packets_received_hi,
4070                        fstats->total_unicast_packets_received_lo,
4071                        qstats->total_unicast_packets_received_lo);
4072                 ADD_64(fstats->total_multicast_packets_received_hi,
4073                        qstats->total_multicast_packets_received_hi,
4074                        fstats->total_multicast_packets_received_lo,
4075                        qstats->total_multicast_packets_received_lo);
4076                 ADD_64(fstats->total_broadcast_packets_received_hi,
4077                        qstats->total_broadcast_packets_received_hi,
4078                        fstats->total_broadcast_packets_received_lo,
4079                        qstats->total_broadcast_packets_received_lo);
4080                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4081                        qstats->total_unicast_packets_transmitted_hi,
4082                        fstats->total_unicast_packets_transmitted_lo,
4083                        qstats->total_unicast_packets_transmitted_lo);
4084                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4085                        qstats->total_multicast_packets_transmitted_hi,
4086                        fstats->total_multicast_packets_transmitted_lo,
4087                        qstats->total_multicast_packets_transmitted_lo);
4088                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4089                        qstats->total_broadcast_packets_transmitted_hi,
4090                        fstats->total_broadcast_packets_transmitted_lo,
4091                        qstats->total_broadcast_packets_transmitted_lo);
4092                 ADD_64(fstats->valid_bytes_received_hi,
4093                        qstats->valid_bytes_received_hi,
4094                        fstats->valid_bytes_received_lo,
4095                        qstats->valid_bytes_received_lo);
4096
4097                 ADD_64(estats->error_bytes_received_hi,
4098                        qstats->error_bytes_received_hi,
4099                        estats->error_bytes_received_lo,
4100                        qstats->error_bytes_received_lo);
4101                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4102                        qstats->etherstatsoverrsizepkts_hi,
4103                        estats->etherstatsoverrsizepkts_lo,
4104                        qstats->etherstatsoverrsizepkts_lo);
4105                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4106                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4107         }
4108
4109         ADD_64(fstats->total_bytes_received_hi,
4110                estats->rx_stat_ifhcinbadoctets_hi,
4111                fstats->total_bytes_received_lo,
4112                estats->rx_stat_ifhcinbadoctets_lo);
4113
4114         memcpy(estats, &(fstats->total_bytes_received_hi),
4115                sizeof(struct host_func_stats) - 2*sizeof(u32));
4116
4117         ADD_64(estats->etherstatsoverrsizepkts_hi,
4118                estats->rx_stat_dot3statsframestoolong_hi,
4119                estats->etherstatsoverrsizepkts_lo,
4120                estats->rx_stat_dot3statsframestoolong_lo);
4121         ADD_64(estats->error_bytes_received_hi,
4122                estats->rx_stat_ifhcinbadoctets_hi,
4123                estats->error_bytes_received_lo,
4124                estats->rx_stat_ifhcinbadoctets_lo);
4125
4126         if (bp->port.pmf) {
4127                 estats->mac_filter_discard =
4128                                 le32_to_cpu(tport->mac_filter_discard);
4129                 estats->xxoverflow_discard =
4130                                 le32_to_cpu(tport->xxoverflow_discard);
4131                 estats->brb_truncate_discard =
4132                                 le32_to_cpu(tport->brb_truncate_discard);
4133                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4134         }
4135
4136         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4137
4138         bp->stats_pending = 0;
4139
4140         return 0;
4141 }
4142
4143 static void bnx2x_net_stats_update(struct bnx2x *bp)
4144 {
4145         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4146         struct net_device_stats *nstats = &bp->dev->stats;
4147         int i;
4148
4149         nstats->rx_packets =
4150                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4151                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4152                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4153
4154         nstats->tx_packets =
4155                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4156                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4157                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4158
4159         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4160
4161         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4162
4163         nstats->rx_dropped = estats->mac_discard;
4164         for_each_rx_queue(bp, i)
4165                 nstats->rx_dropped +=
4166                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4167
4168         nstats->tx_dropped = 0;
4169
4170         nstats->multicast =
4171                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4172
4173         nstats->collisions =
4174                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4175
4176         nstats->rx_length_errors =
4177                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4178                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4179         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4180                                  bnx2x_hilo(&estats->brb_truncate_hi);
4181         nstats->rx_crc_errors =
4182                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4183         nstats->rx_frame_errors =
4184                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4185         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4186         nstats->rx_missed_errors = estats->xxoverflow_discard;
4187
4188         nstats->rx_errors = nstats->rx_length_errors +
4189                             nstats->rx_over_errors +
4190                             nstats->rx_crc_errors +
4191                             nstats->rx_frame_errors +
4192                             nstats->rx_fifo_errors +
4193                             nstats->rx_missed_errors;
4194
4195         nstats->tx_aborted_errors =
4196                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4197                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4198         nstats->tx_carrier_errors =
4199                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4200         nstats->tx_fifo_errors = 0;
4201         nstats->tx_heartbeat_errors = 0;
4202         nstats->tx_window_errors = 0;
4203
4204         nstats->tx_errors = nstats->tx_aborted_errors +
4205                             nstats->tx_carrier_errors +
4206             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4207 }
4208
4209 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4210 {
4211         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4212         int i;
4213
4214         estats->driver_xoff = 0;
4215         estats->rx_err_discard_pkt = 0;
4216         estats->rx_skb_alloc_failed = 0;
4217         estats->hw_csum_err = 0;
4218         for_each_rx_queue(bp, i) {
4219                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4220
4221                 estats->driver_xoff += qstats->driver_xoff;
4222                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4223                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4224                 estats->hw_csum_err += qstats->hw_csum_err;
4225         }
4226 }
4227
4228 static void bnx2x_stats_update(struct bnx2x *bp)
4229 {
4230         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4231
4232         if (*stats_comp != DMAE_COMP_VAL)
4233                 return;
4234
4235         if (bp->port.pmf)
4236                 bnx2x_hw_stats_update(bp);
4237
4238         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4239                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4240                 bnx2x_panic();
4241                 return;
4242         }
4243
4244         bnx2x_net_stats_update(bp);
4245         bnx2x_drv_stats_update(bp);
4246
4247         if (bp->msglevel & NETIF_MSG_TIMER) {
4248                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4249                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4250                 struct tstorm_per_client_stats *old_tclient =
4251                                                         &bp->fp->old_tclient;
4252                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4253                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4254                 struct net_device_stats *nstats = &bp->dev->stats;
4255                 int i;
4256
4257                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4258                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4259                                   "  tx pkt (%lx)\n",
4260                        bnx2x_tx_avail(fp0_tx),
4261                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4262                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4263                                   "  rx pkt (%lx)\n",
4264                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4265                              fp0_rx->rx_comp_cons),
4266                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4267                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4268                                   "brb truncate %u\n",
4269                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4270                        qstats->driver_xoff,
4271                        estats->brb_drop_lo, estats->brb_truncate_lo);
4272                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4273                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4274                         "mac_discard %u  mac_filter_discard %u  "
4275                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4276                         "ttl0_discard %u\n",
4277                        le32_to_cpu(old_tclient->checksum_discard),
4278                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4279                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4280                        estats->mac_discard, estats->mac_filter_discard,
4281                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4282                        le32_to_cpu(old_tclient->ttl0_discard));
4283
4284                 for_each_queue(bp, i) {
4285                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4286                                bnx2x_fp(bp, i, tx_pkt),
4287                                bnx2x_fp(bp, i, rx_pkt),
4288                                bnx2x_fp(bp, i, rx_calls));
4289                 }
4290         }
4291
4292         bnx2x_hw_stats_post(bp);
4293         bnx2x_storm_stats_post(bp);
4294 }
4295
4296 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4297 {
4298         struct dmae_command *dmae;
4299         u32 opcode;
4300         int loader_idx = PMF_DMAE_C(bp);
4301         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4302
4303         bp->executer_idx = 0;
4304
4305         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4306                   DMAE_CMD_C_ENABLE |
4307                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4308 #ifdef __BIG_ENDIAN
4309                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4310 #else
4311                   DMAE_CMD_ENDIANITY_DW_SWAP |
4312 #endif
4313                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4314                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4315
4316         if (bp->port.port_stx) {
4317
4318                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4319                 if (bp->func_stx)
4320                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4321                 else
4322                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4323                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4324                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4325                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4326                 dmae->dst_addr_hi = 0;
4327                 dmae->len = sizeof(struct host_port_stats) >> 2;
4328                 if (bp->func_stx) {
4329                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4330                         dmae->comp_addr_hi = 0;
4331                         dmae->comp_val = 1;
4332                 } else {
4333                         dmae->comp_addr_lo =
4334                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4335                         dmae->comp_addr_hi =
4336                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4337                         dmae->comp_val = DMAE_COMP_VAL;
4338
4339                         *stats_comp = 0;
4340                 }
4341         }
4342
4343         if (bp->func_stx) {
4344
4345                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4346                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4347                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4348                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4349                 dmae->dst_addr_lo = bp->func_stx >> 2;
4350                 dmae->dst_addr_hi = 0;
4351                 dmae->len = sizeof(struct host_func_stats) >> 2;
4352                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4353                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4354                 dmae->comp_val = DMAE_COMP_VAL;
4355
4356                 *stats_comp = 0;
4357         }
4358 }
4359
4360 static void bnx2x_stats_stop(struct bnx2x *bp)
4361 {
4362         int update = 0;
4363
4364         bnx2x_stats_comp(bp);
4365
4366         if (bp->port.pmf)
4367                 update = (bnx2x_hw_stats_update(bp) == 0);
4368
4369         update |= (bnx2x_storm_stats_update(bp) == 0);
4370
4371         if (update) {
4372                 bnx2x_net_stats_update(bp);
4373
4374                 if (bp->port.pmf)
4375                         bnx2x_port_stats_stop(bp);
4376
4377                 bnx2x_hw_stats_post(bp);
4378                 bnx2x_stats_comp(bp);
4379         }
4380 }
4381
4382 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4383 {
4384 }
4385
4386 static const struct {
4387         void (*action)(struct bnx2x *bp);
4388         enum bnx2x_stats_state next_state;
4389 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4390 /* state        event   */
4391 {
4392 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4393 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4394 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4395 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4396 },
4397 {
4398 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4399 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4400 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4401 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4402 }
4403 };
4404
4405 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4406 {
4407         enum bnx2x_stats_state state = bp->stats_state;
4408
4409         bnx2x_stats_stm[state][event].action(bp);
4410         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4411
4412         /* Make sure the state has been "changed" */
4413         smp_wmb();
4414
4415         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4416                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4417                    state, event, bp->stats_state);
4418 }
4419
4420 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4421 {
4422         struct dmae_command *dmae;
4423         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4424
4425         /* sanity */
4426         if (!bp->port.pmf || !bp->port.port_stx) {
4427                 BNX2X_ERR("BUG!\n");
4428                 return;
4429         }
4430
4431         bp->executer_idx = 0;
4432
4433         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4434         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4435                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4436                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4437 #ifdef __BIG_ENDIAN
4438                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4439 #else
4440                         DMAE_CMD_ENDIANITY_DW_SWAP |
4441 #endif
4442                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4443                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4444         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4445         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4446         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4447         dmae->dst_addr_hi = 0;
4448         dmae->len = sizeof(struct host_port_stats) >> 2;
4449         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4450         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4451         dmae->comp_val = DMAE_COMP_VAL;
4452
4453         *stats_comp = 0;
4454         bnx2x_hw_stats_post(bp);
4455         bnx2x_stats_comp(bp);
4456 }
4457
4458 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4459 {
4460         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4461         int port = BP_PORT(bp);
4462         int func;
4463         u32 func_stx;
4464
4465         /* sanity */
4466         if (!bp->port.pmf || !bp->func_stx) {
4467                 BNX2X_ERR("BUG!\n");
4468                 return;
4469         }
4470
4471         /* save our func_stx */
4472         func_stx = bp->func_stx;
4473
4474         for (vn = VN_0; vn < vn_max; vn++) {
4475                 func = 2*vn + port;
4476
4477                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4478                 bnx2x_func_stats_init(bp);
4479                 bnx2x_hw_stats_post(bp);
4480                 bnx2x_stats_comp(bp);
4481         }
4482
4483         /* restore our func_stx */
4484         bp->func_stx = func_stx;
4485 }
4486
4487 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4488 {
4489         struct dmae_command *dmae = &bp->stats_dmae;
4490         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4491
4492         /* sanity */
4493         if (!bp->func_stx) {
4494                 BNX2X_ERR("BUG!\n");
4495                 return;
4496         }
4497
4498         bp->executer_idx = 0;
4499         memset(dmae, 0, sizeof(struct dmae_command));
4500
4501         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4502                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4503                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4504 #ifdef __BIG_ENDIAN
4505                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4506 #else
4507                         DMAE_CMD_ENDIANITY_DW_SWAP |
4508 #endif
4509                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4510                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4511         dmae->src_addr_lo = bp->func_stx >> 2;
4512         dmae->src_addr_hi = 0;
4513         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4514         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4515         dmae->len = sizeof(struct host_func_stats) >> 2;
4516         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4517         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4518         dmae->comp_val = DMAE_COMP_VAL;
4519
4520         *stats_comp = 0;
4521         bnx2x_hw_stats_post(bp);
4522         bnx2x_stats_comp(bp);
4523 }
4524
4525 static void bnx2x_stats_init(struct bnx2x *bp)
4526 {
4527         int port = BP_PORT(bp);
4528         int func = BP_FUNC(bp);
4529         int i;
4530
4531         bp->stats_pending = 0;
4532         bp->executer_idx = 0;
4533         bp->stats_counter = 0;
4534
4535         /* port and func stats for management */
4536         if (!BP_NOMCP(bp)) {
4537                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4538                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4539
4540         } else {
4541                 bp->port.port_stx = 0;
4542                 bp->func_stx = 0;
4543         }
4544         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4545            bp->port.port_stx, bp->func_stx);
4546
4547         /* port stats */
4548         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4549         bp->port.old_nig_stats.brb_discard =
4550                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4551         bp->port.old_nig_stats.brb_truncate =
4552                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4553         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4554                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4555         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4556                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4557
4558         /* function stats */
4559         for_each_queue(bp, i) {
4560                 struct bnx2x_fastpath *fp = &bp->fp[i];
4561
4562                 memset(&fp->old_tclient, 0,
4563                        sizeof(struct tstorm_per_client_stats));
4564                 memset(&fp->old_uclient, 0,
4565                        sizeof(struct ustorm_per_client_stats));
4566                 memset(&fp->old_xclient, 0,
4567                        sizeof(struct xstorm_per_client_stats));
4568                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4569         }
4570
4571         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4572         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4573
4574         bp->stats_state = STATS_STATE_DISABLED;
4575
4576         if (bp->port.pmf) {
4577                 if (bp->port.port_stx)
4578                         bnx2x_port_stats_base_init(bp);
4579
4580                 if (bp->func_stx)
4581                         bnx2x_func_stats_base_init(bp);
4582
4583         } else if (bp->func_stx)
4584                 bnx2x_func_stats_base_update(bp);
4585 }
4586
4587 static void bnx2x_timer(unsigned long data)
4588 {
4589         struct bnx2x *bp = (struct bnx2x *) data;
4590
4591         if (!netif_running(bp->dev))
4592                 return;
4593
4594         if (atomic_read(&bp->intr_sem) != 0)
4595                 goto timer_restart;
4596
4597         if (poll) {
4598                 struct bnx2x_fastpath *fp = &bp->fp[0];
4599                 int rc;
4600
4601                 bnx2x_tx_int(fp);
4602                 rc = bnx2x_rx_int(fp, 1000);
4603         }
4604
4605         if (!BP_NOMCP(bp)) {
4606                 int func = BP_FUNC(bp);
4607                 u32 drv_pulse;
4608                 u32 mcp_pulse;
4609
4610                 ++bp->fw_drv_pulse_wr_seq;
4611                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4612                 /* TBD - add SYSTEM_TIME */
4613                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4614                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4615
4616                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4617                              MCP_PULSE_SEQ_MASK);
4618                 /* The delta between driver pulse and mcp response
4619                  * should be 1 (before mcp response) or 0 (after mcp response)
4620                  */
4621                 if ((drv_pulse != mcp_pulse) &&
4622                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4623                         /* someone lost a heartbeat... */
4624                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4625                                   drv_pulse, mcp_pulse);
4626                 }
4627         }
4628
4629         if ((bp->state == BNX2X_STATE_OPEN) ||
4630             (bp->state == BNX2X_STATE_DISABLED))
4631                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4632
4633 timer_restart:
4634         mod_timer(&bp->timer, jiffies + bp->current_interval);
4635 }
4636
4637 /* end of Statistics */
4638
4639 /* nic init */
4640
4641 /*
4642  * nic init service functions
4643  */
4644
4645 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4646 {
4647         int port = BP_PORT(bp);
4648
4649         /* "CSTORM" */
4650         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4651                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4652                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4653         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4655                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4656 }
4657
4658 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4659                           dma_addr_t mapping, int sb_id)
4660 {
4661         int port = BP_PORT(bp);
4662         int func = BP_FUNC(bp);
4663         int index;
4664         u64 section;
4665
4666         /* USTORM */
4667         section = ((u64)mapping) + offsetof(struct host_status_block,
4668                                             u_status_block);
4669         sb->u_status_block.status_block_id = sb_id;
4670
4671         REG_WR(bp, BAR_CSTRORM_INTMEM +
4672                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4673         REG_WR(bp, BAR_CSTRORM_INTMEM +
4674                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4675                U64_HI(section));
4676         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4677                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4678
4679         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4680                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4681                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4682
4683         /* CSTORM */
4684         section = ((u64)mapping) + offsetof(struct host_status_block,
4685                                             c_status_block);
4686         sb->c_status_block.status_block_id = sb_id;
4687
4688         REG_WR(bp, BAR_CSTRORM_INTMEM +
4689                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4690         REG_WR(bp, BAR_CSTRORM_INTMEM +
4691                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4692                U64_HI(section));
4693         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4694                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4695
4696         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4697                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4698                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4699
4700         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4701 }
4702
4703 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4704 {
4705         int func = BP_FUNC(bp);
4706
4707         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4708                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4709                         sizeof(struct tstorm_def_status_block)/4);
4710         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4711                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4712                         sizeof(struct cstorm_def_status_block_u)/4);
4713         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4715                         sizeof(struct cstorm_def_status_block_c)/4);
4716         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4717                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4718                         sizeof(struct xstorm_def_status_block)/4);
4719 }
4720
4721 static void bnx2x_init_def_sb(struct bnx2x *bp,
4722                               struct host_def_status_block *def_sb,
4723                               dma_addr_t mapping, int sb_id)
4724 {
4725         int port = BP_PORT(bp);
4726         int func = BP_FUNC(bp);
4727         int index, val, reg_offset;
4728         u64 section;
4729
4730         /* ATTN */
4731         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4732                                             atten_status_block);
4733         def_sb->atten_status_block.status_block_id = sb_id;
4734
4735         bp->attn_state = 0;
4736
4737         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4738                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4739
4740         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4741                 bp->attn_group[index].sig[0] = REG_RD(bp,
4742                                                      reg_offset + 0x10*index);
4743                 bp->attn_group[index].sig[1] = REG_RD(bp,
4744                                                reg_offset + 0x4 + 0x10*index);
4745                 bp->attn_group[index].sig[2] = REG_RD(bp,
4746                                                reg_offset + 0x8 + 0x10*index);
4747                 bp->attn_group[index].sig[3] = REG_RD(bp,
4748                                                reg_offset + 0xc + 0x10*index);
4749         }
4750
4751         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4752                              HC_REG_ATTN_MSG0_ADDR_L);
4753
4754         REG_WR(bp, reg_offset, U64_LO(section));
4755         REG_WR(bp, reg_offset + 4, U64_HI(section));
4756
4757         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4758
4759         val = REG_RD(bp, reg_offset);
4760         val |= sb_id;
4761         REG_WR(bp, reg_offset, val);
4762
4763         /* USTORM */
4764         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4765                                             u_def_status_block);
4766         def_sb->u_def_status_block.status_block_id = sb_id;
4767
4768         REG_WR(bp, BAR_CSTRORM_INTMEM +
4769                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4770         REG_WR(bp, BAR_CSTRORM_INTMEM +
4771                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4772                U64_HI(section));
4773         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4774                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4775
4776         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4777                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4778                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4779
4780         /* CSTORM */
4781         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4782                                             c_def_status_block);
4783         def_sb->c_def_status_block.status_block_id = sb_id;
4784
4785         REG_WR(bp, BAR_CSTRORM_INTMEM +
4786                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4787         REG_WR(bp, BAR_CSTRORM_INTMEM +
4788                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4789                U64_HI(section));
4790         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4791                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4792
4793         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4794                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4795                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4796
4797         /* TSTORM */
4798         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799                                             t_def_status_block);
4800         def_sb->t_def_status_block.status_block_id = sb_id;
4801
4802         REG_WR(bp, BAR_TSTRORM_INTMEM +
4803                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4804         REG_WR(bp, BAR_TSTRORM_INTMEM +
4805                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4806                U64_HI(section));
4807         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4808                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4809
4810         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4811                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4812                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4813
4814         /* XSTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             x_def_status_block);
4817         def_sb->x_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_XSTRORM_INTMEM +
4820                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_XSTRORM_INTMEM +
4822                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4825                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4829                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4830
4831         bp->stats_pending = 0;
4832         bp->set_mac_pending = 0;
4833
4834         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4835 }
4836
4837 static void bnx2x_update_coalesce(struct bnx2x *bp)
4838 {
4839         int port = BP_PORT(bp);
4840         int i;
4841
4842         for_each_queue(bp, i) {
4843                 int sb_id = bp->fp[i].sb_id;
4844
4845                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4846                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4847                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4848                                                       U_SB_ETH_RX_CQ_INDEX),
4849                         bp->rx_ticks/12);
4850                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4851                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4852                                                        U_SB_ETH_RX_CQ_INDEX),
4853                          (bp->rx_ticks/12) ? 0 : 1);
4854
4855                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4856                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4857                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4858                                                       C_SB_ETH_TX_CQ_INDEX),
4859                         bp->tx_ticks/12);
4860                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4861                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4862                                                        C_SB_ETH_TX_CQ_INDEX),
4863                          (bp->tx_ticks/12) ? 0 : 1);
4864         }
4865 }
4866
4867 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4868                                        struct bnx2x_fastpath *fp, int last)
4869 {
4870         int i;
4871
4872         for (i = 0; i < last; i++) {
4873                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4874                 struct sk_buff *skb = rx_buf->skb;
4875
4876                 if (skb == NULL) {
4877                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4878                         continue;
4879                 }
4880
4881                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4882                         pci_unmap_single(bp->pdev,
4883                                          pci_unmap_addr(rx_buf, mapping),
4884                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4885
4886                 dev_kfree_skb(skb);
4887                 rx_buf->skb = NULL;
4888         }
4889 }
4890
4891 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4892 {
4893         int func = BP_FUNC(bp);
4894         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4895                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4896         u16 ring_prod, cqe_ring_prod;
4897         int i, j;
4898
4899         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4900         DP(NETIF_MSG_IFUP,
4901            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4902
4903         if (bp->flags & TPA_ENABLE_FLAG) {
4904
4905                 for_each_rx_queue(bp, j) {
4906                         struct bnx2x_fastpath *fp = &bp->fp[j];
4907
4908                         for (i = 0; i < max_agg_queues; i++) {
4909                                 fp->tpa_pool[i].skb =
4910                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4911                                 if (!fp->tpa_pool[i].skb) {
4912                                         BNX2X_ERR("Failed to allocate TPA "
4913                                                   "skb pool for queue[%d] - "
4914                                                   "disabling TPA on this "
4915                                                   "queue!\n", j);
4916                                         bnx2x_free_tpa_pool(bp, fp, i);
4917                                         fp->disable_tpa = 1;
4918                                         break;
4919                                 }
4920                                 pci_unmap_addr_set((struct sw_rx_bd *)
4921                                                         &bp->fp->tpa_pool[i],
4922                                                    mapping, 0);
4923                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4924                         }
4925                 }
4926         }
4927
4928         for_each_rx_queue(bp, j) {
4929                 struct bnx2x_fastpath *fp = &bp->fp[j];
4930
4931                 fp->rx_bd_cons = 0;
4932                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4933                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4934
4935                 /* Mark queue as Rx */
4936                 fp->is_rx_queue = 1;
4937
4938                 /* "next page" elements initialization */
4939                 /* SGE ring */
4940                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4941                         struct eth_rx_sge *sge;
4942
4943                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4944                         sge->addr_hi =
4945                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4946                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4947                         sge->addr_lo =
4948                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4949                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4950                 }
4951
4952                 bnx2x_init_sge_ring_bit_mask(fp);
4953
4954                 /* RX BD ring */
4955                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4956                         struct eth_rx_bd *rx_bd;
4957
4958                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4959                         rx_bd->addr_hi =
4960                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4961                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4962                         rx_bd->addr_lo =
4963                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4964                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4965                 }
4966
4967                 /* CQ ring */
4968                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4969                         struct eth_rx_cqe_next_page *nextpg;
4970
4971                         nextpg = (struct eth_rx_cqe_next_page *)
4972                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4973                         nextpg->addr_hi =
4974                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4975                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4976                         nextpg->addr_lo =
4977                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4978                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4979                 }
4980
4981                 /* Allocate SGEs and initialize the ring elements */
4982                 for (i = 0, ring_prod = 0;
4983                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4984
4985                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4986                                 BNX2X_ERR("was only able to allocate "
4987                                           "%d rx sges\n", i);
4988                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4989                                 /* Cleanup already allocated elements */
4990                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4991                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4992                                 fp->disable_tpa = 1;
4993                                 ring_prod = 0;
4994                                 break;
4995                         }
4996                         ring_prod = NEXT_SGE_IDX(ring_prod);
4997                 }
4998                 fp->rx_sge_prod = ring_prod;
4999
5000                 /* Allocate BDs and initialize BD ring */
5001                 fp->rx_comp_cons = 0;
5002                 cqe_ring_prod = ring_prod = 0;
5003                 for (i = 0; i < bp->rx_ring_size; i++) {
5004                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5005                                 BNX2X_ERR("was only able to allocate "
5006                                           "%d rx skbs on queue[%d]\n", i, j);
5007                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5008                                 break;
5009                         }
5010                         ring_prod = NEXT_RX_IDX(ring_prod);
5011                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5012                         WARN_ON(ring_prod <= i);
5013                 }
5014
5015                 fp->rx_bd_prod = ring_prod;
5016                 /* must not have more available CQEs than BDs */
5017                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5018                                        cqe_ring_prod);
5019                 fp->rx_pkt = fp->rx_calls = 0;
5020
5021                 /* Warning!
5022                  * this will generate an interrupt (to the TSTORM)
5023                  * must only be done after chip is initialized
5024                  */
5025                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5026                                      fp->rx_sge_prod);
5027                 if (j != 0)
5028                         continue;
5029
5030                 REG_WR(bp, BAR_USTRORM_INTMEM +
5031                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5032                        U64_LO(fp->rx_comp_mapping));
5033                 REG_WR(bp, BAR_USTRORM_INTMEM +
5034                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5035                        U64_HI(fp->rx_comp_mapping));
5036         }
5037 }
5038
5039 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5040 {
5041         int i, j;
5042
5043         for_each_tx_queue(bp, j) {
5044                 struct bnx2x_fastpath *fp = &bp->fp[j];
5045
5046                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5047                         struct eth_tx_next_bd *tx_next_bd =
5048                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5049
5050                         tx_next_bd->addr_hi =
5051                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5052                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5053                         tx_next_bd->addr_lo =
5054                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5055                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5056                 }
5057
5058                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5059                 fp->tx_db.data.zero_fill1 = 0;
5060                 fp->tx_db.data.prod = 0;
5061
5062                 fp->tx_pkt_prod = 0;
5063                 fp->tx_pkt_cons = 0;
5064                 fp->tx_bd_prod = 0;
5065                 fp->tx_bd_cons = 0;
5066                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5067                 fp->tx_pkt = 0;
5068         }
5069
5070         /* clean tx statistics */
5071         for_each_rx_queue(bp, i)
5072                 bnx2x_fp(bp, i, tx_pkt) = 0;
5073 }
5074
5075 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5076 {
5077         int func = BP_FUNC(bp);
5078
5079         spin_lock_init(&bp->spq_lock);
5080
5081         bp->spq_left = MAX_SPQ_PENDING;
5082         bp->spq_prod_idx = 0;
5083         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5084         bp->spq_prod_bd = bp->spq;
5085         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5086
5087         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5088                U64_LO(bp->spq_mapping));
5089         REG_WR(bp,
5090                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5091                U64_HI(bp->spq_mapping));
5092
5093         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5094                bp->spq_prod_idx);
5095 }
5096
5097 static void bnx2x_init_context(struct bnx2x *bp)
5098 {
5099         int i;
5100
5101         for_each_rx_queue(bp, i) {
5102                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5103                 struct bnx2x_fastpath *fp = &bp->fp[i];
5104                 u8 cl_id = fp->cl_id;
5105
5106                 context->ustorm_st_context.common.sb_index_numbers =
5107                                                 BNX2X_RX_SB_INDEX_NUM;
5108                 context->ustorm_st_context.common.clientId = cl_id;
5109                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5110                 context->ustorm_st_context.common.flags =
5111                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5112                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5113                 context->ustorm_st_context.common.statistics_counter_id =
5114                                                 cl_id;
5115                 context->ustorm_st_context.common.mc_alignment_log_size =
5116                                                 BNX2X_RX_ALIGN_SHIFT;
5117                 context->ustorm_st_context.common.bd_buff_size =
5118                                                 bp->rx_buf_size;
5119                 context->ustorm_st_context.common.bd_page_base_hi =
5120                                                 U64_HI(fp->rx_desc_mapping);
5121                 context->ustorm_st_context.common.bd_page_base_lo =
5122                                                 U64_LO(fp->rx_desc_mapping);
5123                 if (!fp->disable_tpa) {
5124                         context->ustorm_st_context.common.flags |=
5125                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5126                         context->ustorm_st_context.common.sge_buff_size =
5127                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5128                                          (u32)0xffff);
5129                         context->ustorm_st_context.common.sge_page_base_hi =
5130                                                 U64_HI(fp->rx_sge_mapping);
5131                         context->ustorm_st_context.common.sge_page_base_lo =
5132                                                 U64_LO(fp->rx_sge_mapping);
5133
5134                         context->ustorm_st_context.common.max_sges_for_packet =
5135                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5136                         context->ustorm_st_context.common.max_sges_for_packet =
5137                                 ((context->ustorm_st_context.common.
5138                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5139                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5140                 }
5141
5142                 context->ustorm_ag_context.cdu_usage =
5143                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144                                                CDU_REGION_NUMBER_UCM_AG,
5145                                                ETH_CONNECTION_TYPE);
5146
5147                 context->xstorm_ag_context.cdu_reserved =
5148                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5149                                                CDU_REGION_NUMBER_XCM_AG,
5150                                                ETH_CONNECTION_TYPE);
5151         }
5152
5153         for_each_tx_queue(bp, i) {
5154                 struct bnx2x_fastpath *fp = &bp->fp[i];
5155                 struct eth_context *context =
5156                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5157
5158                 context->cstorm_st_context.sb_index_number =
5159                                                 C_SB_ETH_TX_CQ_INDEX;
5160                 context->cstorm_st_context.status_block_id = fp->sb_id;
5161
5162                 context->xstorm_st_context.tx_bd_page_base_hi =
5163                                                 U64_HI(fp->tx_desc_mapping);
5164                 context->xstorm_st_context.tx_bd_page_base_lo =
5165                                                 U64_LO(fp->tx_desc_mapping);
5166                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5167                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5168         }
5169 }
5170
5171 static void bnx2x_init_ind_table(struct bnx2x *bp)
5172 {
5173         int func = BP_FUNC(bp);
5174         int i;
5175
5176         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5177                 return;
5178
5179         DP(NETIF_MSG_IFUP,
5180            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5181         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5182                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5183                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5184                         bp->fp->cl_id + (i % bp->num_rx_queues));
5185 }
5186
5187 static void bnx2x_set_client_config(struct bnx2x *bp)
5188 {
5189         struct tstorm_eth_client_config tstorm_client = {0};
5190         int port = BP_PORT(bp);
5191         int i;
5192
5193         tstorm_client.mtu = bp->dev->mtu;
5194         tstorm_client.config_flags =
5195                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5196                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5197 #ifdef BCM_VLAN
5198         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5199                 tstorm_client.config_flags |=
5200                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5201                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5202         }
5203 #endif
5204
5205         for_each_queue(bp, i) {
5206                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5207
5208                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5209                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5210                        ((u32 *)&tstorm_client)[0]);
5211                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5212                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5213                        ((u32 *)&tstorm_client)[1]);
5214         }
5215
5216         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5217            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5218 }
5219
5220 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5221 {
5222         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5223         int mode = bp->rx_mode;
5224         int mask = (1 << BP_L_ID(bp));
5225         int func = BP_FUNC(bp);
5226         int port = BP_PORT(bp);
5227         int i;
5228         /* All but management unicast packets should pass to the host as well */
5229         u32 llh_mask =
5230                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5231                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5232                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5233                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5234
5235         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5236
5237         switch (mode) {
5238         case BNX2X_RX_MODE_NONE: /* no Rx */
5239                 tstorm_mac_filter.ucast_drop_all = mask;
5240                 tstorm_mac_filter.mcast_drop_all = mask;
5241                 tstorm_mac_filter.bcast_drop_all = mask;
5242                 break;
5243
5244         case BNX2X_RX_MODE_NORMAL:
5245                 tstorm_mac_filter.bcast_accept_all = mask;
5246                 break;
5247
5248         case BNX2X_RX_MODE_ALLMULTI:
5249                 tstorm_mac_filter.mcast_accept_all = mask;
5250                 tstorm_mac_filter.bcast_accept_all = mask;
5251                 break;
5252
5253         case BNX2X_RX_MODE_PROMISC:
5254                 tstorm_mac_filter.ucast_accept_all = mask;
5255                 tstorm_mac_filter.mcast_accept_all = mask;
5256                 tstorm_mac_filter.bcast_accept_all = mask;
5257                 /* pass management unicast packets as well */
5258                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5259                 break;
5260
5261         default:
5262                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5263                 break;
5264         }
5265
5266         REG_WR(bp,
5267                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5268                llh_mask);
5269
5270         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5271                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5272                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5273                        ((u32 *)&tstorm_mac_filter)[i]);
5274
5275 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5276                    ((u32 *)&tstorm_mac_filter)[i]); */
5277         }
5278
5279         if (mode != BNX2X_RX_MODE_NONE)
5280                 bnx2x_set_client_config(bp);
5281 }
5282
5283 static void bnx2x_init_internal_common(struct bnx2x *bp)
5284 {
5285         int i;
5286
5287         /* Zero this manually as its initialization is
5288            currently missing in the initTool */
5289         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5290                 REG_WR(bp, BAR_USTRORM_INTMEM +
5291                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5292 }
5293
5294 static void bnx2x_init_internal_port(struct bnx2x *bp)
5295 {
5296         int port = BP_PORT(bp);
5297
5298         REG_WR(bp,
5299                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5300         REG_WR(bp,
5301                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5302         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5303         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5304 }
5305
5306 static void bnx2x_init_internal_func(struct bnx2x *bp)
5307 {
5308         struct tstorm_eth_function_common_config tstorm_config = {0};
5309         struct stats_indication_flags stats_flags = {0};
5310         int port = BP_PORT(bp);
5311         int func = BP_FUNC(bp);
5312         int i, j;
5313         u32 offset;
5314         u16 max_agg_size;
5315
5316         if (is_multi(bp)) {
5317                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5318                 tstorm_config.rss_result_mask = MULTI_MASK;
5319         }
5320
5321         /* Enable TPA if needed */
5322         if (bp->flags & TPA_ENABLE_FLAG)
5323                 tstorm_config.config_flags |=
5324                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5325
5326         if (IS_E1HMF(bp))
5327                 tstorm_config.config_flags |=
5328                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5329
5330         tstorm_config.leading_client_id = BP_L_ID(bp);
5331
5332         REG_WR(bp, BAR_TSTRORM_INTMEM +
5333                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5334                (*(u32 *)&tstorm_config));
5335
5336         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5337         bnx2x_set_storm_rx_mode(bp);
5338
5339         for_each_queue(bp, i) {
5340                 u8 cl_id = bp->fp[i].cl_id;
5341
5342                 /* reset xstorm per client statistics */
5343                 offset = BAR_XSTRORM_INTMEM +
5344                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5345                 for (j = 0;
5346                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5347                         REG_WR(bp, offset + j*4, 0);
5348
5349                 /* reset tstorm per client statistics */
5350                 offset = BAR_TSTRORM_INTMEM +
5351                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5352                 for (j = 0;
5353                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5354                         REG_WR(bp, offset + j*4, 0);
5355
5356                 /* reset ustorm per client statistics */
5357                 offset = BAR_USTRORM_INTMEM +
5358                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5359                 for (j = 0;
5360                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5361                         REG_WR(bp, offset + j*4, 0);
5362         }
5363
5364         /* Init statistics related context */
5365         stats_flags.collect_eth = 1;
5366
5367         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5368                ((u32 *)&stats_flags)[0]);
5369         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5370                ((u32 *)&stats_flags)[1]);
5371
5372         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5373                ((u32 *)&stats_flags)[0]);
5374         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5375                ((u32 *)&stats_flags)[1]);
5376
5377         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5378                ((u32 *)&stats_flags)[0]);
5379         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5380                ((u32 *)&stats_flags)[1]);
5381
5382         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5383                ((u32 *)&stats_flags)[0]);
5384         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5385                ((u32 *)&stats_flags)[1]);
5386
5387         REG_WR(bp, BAR_XSTRORM_INTMEM +
5388                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5389                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5390         REG_WR(bp, BAR_XSTRORM_INTMEM +
5391                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5392                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5393
5394         REG_WR(bp, BAR_TSTRORM_INTMEM +
5395                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5396                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5397         REG_WR(bp, BAR_TSTRORM_INTMEM +
5398                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5399                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5400
5401         REG_WR(bp, BAR_USTRORM_INTMEM +
5402                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404         REG_WR(bp, BAR_USTRORM_INTMEM +
5405                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5407
5408         if (CHIP_IS_E1H(bp)) {
5409                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5410                         IS_E1HMF(bp));
5411                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5412                         IS_E1HMF(bp));
5413                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5414                         IS_E1HMF(bp));
5415                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5416                         IS_E1HMF(bp));
5417
5418                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5419                          bp->e1hov);
5420         }
5421
5422         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5423         max_agg_size =
5424                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5425                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5426                     (u32)0xffff);
5427         for_each_rx_queue(bp, i) {
5428                 struct bnx2x_fastpath *fp = &bp->fp[i];
5429
5430                 REG_WR(bp, BAR_USTRORM_INTMEM +
5431                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5432                        U64_LO(fp->rx_comp_mapping));
5433                 REG_WR(bp, BAR_USTRORM_INTMEM +
5434                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5435                        U64_HI(fp->rx_comp_mapping));
5436
5437                 /* Next page */
5438                 REG_WR(bp, BAR_USTRORM_INTMEM +
5439                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5440                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5441                 REG_WR(bp, BAR_USTRORM_INTMEM +
5442                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5443                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444
5445                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5446                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5447                          max_agg_size);
5448         }
5449
5450         /* dropless flow control */
5451         if (CHIP_IS_E1H(bp)) {
5452                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5453
5454                 rx_pause.bd_thr_low = 250;
5455                 rx_pause.cqe_thr_low = 250;
5456                 rx_pause.cos = 1;
5457                 rx_pause.sge_thr_low = 0;
5458                 rx_pause.bd_thr_high = 350;
5459                 rx_pause.cqe_thr_high = 350;
5460                 rx_pause.sge_thr_high = 0;
5461
5462                 for_each_rx_queue(bp, i) {
5463                         struct bnx2x_fastpath *fp = &bp->fp[i];
5464
5465                         if (!fp->disable_tpa) {
5466                                 rx_pause.sge_thr_low = 150;
5467                                 rx_pause.sge_thr_high = 250;
5468                         }
5469
5470
5471                         offset = BAR_USTRORM_INTMEM +
5472                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5473                                                                    fp->cl_id);
5474                         for (j = 0;
5475                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5476                              j++)
5477                                 REG_WR(bp, offset + j*4,
5478                                        ((u32 *)&rx_pause)[j]);
5479                 }
5480         }
5481
5482         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5483
5484         /* Init rate shaping and fairness contexts */
5485         if (IS_E1HMF(bp)) {
5486                 int vn;
5487
5488                 /* During init there is no active link
5489                    Until link is up, set link rate to 10Gbps */
5490                 bp->link_vars.line_speed = SPEED_10000;
5491                 bnx2x_init_port_minmax(bp);
5492
5493                 bnx2x_calc_vn_weight_sum(bp);
5494
5495                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5496                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5497
5498                 /* Enable rate shaping and fairness */
5499                 bp->cmng.flags.cmng_enables =
5500                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5501                 if (bp->vn_weight_sum)
5502                         bp->cmng.flags.cmng_enables |=
5503                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5504                 else
5505                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5506                            "  fairness will be disabled\n");
5507         } else {
5508                 /* rate shaping and fairness are disabled */
5509                 DP(NETIF_MSG_IFUP,
5510                    "single function mode  minmax will be disabled\n");
5511         }
5512
5513
5514         /* Store it to internal memory */
5515         if (bp->port.pmf)
5516                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5517                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5518                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5519                                ((u32 *)(&bp->cmng))[i]);
5520 }
5521
5522 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5523 {
5524         switch (load_code) {
5525         case FW_MSG_CODE_DRV_LOAD_COMMON:
5526                 bnx2x_init_internal_common(bp);
5527                 /* no break */
5528
5529         case FW_MSG_CODE_DRV_LOAD_PORT:
5530                 bnx2x_init_internal_port(bp);
5531                 /* no break */
5532
5533         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5534                 bnx2x_init_internal_func(bp);
5535                 break;
5536
5537         default:
5538                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5539                 break;
5540         }
5541 }
5542
5543 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5544 {
5545         int i;
5546
5547         for_each_queue(bp, i) {
5548                 struct bnx2x_fastpath *fp = &bp->fp[i];
5549
5550                 fp->bp = bp;
5551                 fp->state = BNX2X_FP_STATE_CLOSED;
5552                 fp->index = i;
5553                 fp->cl_id = BP_L_ID(bp) + i;
5554                 fp->sb_id = fp->cl_id;
5555                 /* Suitable Rx and Tx SBs are served by the same client */
5556                 if (i >= bp->num_rx_queues)
5557                         fp->cl_id -= bp->num_rx_queues;
5558                 DP(NETIF_MSG_IFUP,
5559                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5560                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5561                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5562                               fp->sb_id);
5563                 bnx2x_update_fpsb_idx(fp);
5564         }
5565
5566         /* ensure status block indices were read */
5567         rmb();
5568
5569
5570         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5571                           DEF_SB_ID);
5572         bnx2x_update_dsb_idx(bp);
5573         bnx2x_update_coalesce(bp);
5574         bnx2x_init_rx_rings(bp);
5575         bnx2x_init_tx_ring(bp);
5576         bnx2x_init_sp_ring(bp);
5577         bnx2x_init_context(bp);
5578         bnx2x_init_internal(bp, load_code);
5579         bnx2x_init_ind_table(bp);
5580         bnx2x_stats_init(bp);
5581
5582         /* At this point, we are ready for interrupts */
5583         atomic_set(&bp->intr_sem, 0);
5584
5585         /* flush all before enabling interrupts */
5586         mb();
5587         mmiowb();
5588
5589         bnx2x_int_enable(bp);
5590
5591         /* Check for SPIO5 */
5592         bnx2x_attn_int_deasserted0(bp,
5593                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5594                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5595 }
5596
5597 /* end of nic init */
5598
5599 /*
5600  * gzip service functions
5601  */
5602
5603 static int bnx2x_gunzip_init(struct bnx2x *bp)
5604 {
5605         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5606                                               &bp->gunzip_mapping);
5607         if (bp->gunzip_buf  == NULL)
5608                 goto gunzip_nomem1;
5609
5610         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5611         if (bp->strm  == NULL)
5612                 goto gunzip_nomem2;
5613
5614         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5615                                       GFP_KERNEL);
5616         if (bp->strm->workspace == NULL)
5617                 goto gunzip_nomem3;
5618
5619         return 0;
5620
5621 gunzip_nomem3:
5622         kfree(bp->strm);
5623         bp->strm = NULL;
5624
5625 gunzip_nomem2:
5626         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5627                             bp->gunzip_mapping);
5628         bp->gunzip_buf = NULL;
5629
5630 gunzip_nomem1:
5631         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5632                " un-compression\n", bp->dev->name);
5633         return -ENOMEM;
5634 }
5635
5636 static void bnx2x_gunzip_end(struct bnx2x *bp)
5637 {
5638         kfree(bp->strm->workspace);
5639
5640         kfree(bp->strm);
5641         bp->strm = NULL;
5642
5643         if (bp->gunzip_buf) {
5644                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5645                                     bp->gunzip_mapping);
5646                 bp->gunzip_buf = NULL;
5647         }
5648 }
5649
5650 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5651 {
5652         int n, rc;
5653
5654         /* check gzip header */
5655         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5656                 BNX2X_ERR("Bad gzip header\n");
5657                 return -EINVAL;
5658         }
5659
5660         n = 10;
5661
5662 #define FNAME                           0x8
5663
5664         if (zbuf[3] & FNAME)
5665                 while ((zbuf[n++] != 0) && (n < len));
5666
5667         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5668         bp->strm->avail_in = len - n;
5669         bp->strm->next_out = bp->gunzip_buf;
5670         bp->strm->avail_out = FW_BUF_SIZE;
5671
5672         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5673         if (rc != Z_OK)
5674                 return rc;
5675
5676         rc = zlib_inflate(bp->strm, Z_FINISH);
5677         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5678                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5679                        bp->dev->name, bp->strm->msg);
5680
5681         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5682         if (bp->gunzip_outlen & 0x3)
5683                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5684                                     " gunzip_outlen (%d) not aligned\n",
5685                        bp->dev->name, bp->gunzip_outlen);
5686         bp->gunzip_outlen >>= 2;
5687
5688         zlib_inflateEnd(bp->strm);
5689
5690         if (rc == Z_STREAM_END)
5691                 return 0;
5692
5693         return rc;
5694 }
5695
5696 /* nic load/unload */
5697
5698 /*
5699  * General service functions
5700  */
5701
5702 /* send a NIG loopback debug packet */
5703 static void bnx2x_lb_pckt(struct bnx2x *bp)
5704 {
5705         u32 wb_write[3];
5706
5707         /* Ethernet source and destination addresses */
5708         wb_write[0] = 0x55555555;
5709         wb_write[1] = 0x55555555;
5710         wb_write[2] = 0x20;             /* SOP */
5711         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5712
5713         /* NON-IP protocol */
5714         wb_write[0] = 0x09000000;
5715         wb_write[1] = 0x55555555;
5716         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5717         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5718 }
5719
5720 /* some of the internal memories
5721  * are not directly readable from the driver
5722  * to test them we send debug packets
5723  */
5724 static int bnx2x_int_mem_test(struct bnx2x *bp)
5725 {
5726         int factor;
5727         int count, i;
5728         u32 val = 0;
5729
5730         if (CHIP_REV_IS_FPGA(bp))
5731                 factor = 120;
5732         else if (CHIP_REV_IS_EMUL(bp))
5733                 factor = 200;
5734         else
5735                 factor = 1;
5736
5737         DP(NETIF_MSG_HW, "start part1\n");
5738
5739         /* Disable inputs of parser neighbor blocks */
5740         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5741         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5742         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5743         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5744
5745         /*  Write 0 to parser credits for CFC search request */
5746         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5747
5748         /* send Ethernet packet */
5749         bnx2x_lb_pckt(bp);
5750
5751         /* TODO do i reset NIG statistic? */
5752         /* Wait until NIG register shows 1 packet of size 0x10 */
5753         count = 1000 * factor;
5754         while (count) {
5755
5756                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5757                 val = *bnx2x_sp(bp, wb_data[0]);
5758                 if (val == 0x10)
5759                         break;
5760
5761                 msleep(10);
5762                 count--;
5763         }
5764         if (val != 0x10) {
5765                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5766                 return -1;
5767         }
5768
5769         /* Wait until PRS register shows 1 packet */
5770         count = 1000 * factor;
5771         while (count) {
5772                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5773                 if (val == 1)
5774                         break;
5775
5776                 msleep(10);
5777                 count--;
5778         }
5779         if (val != 0x1) {
5780                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5781                 return -2;
5782         }
5783
5784         /* Reset and init BRB, PRS */
5785         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5786         msleep(50);
5787         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5788         msleep(50);
5789         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5790         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5791
5792         DP(NETIF_MSG_HW, "part2\n");
5793
5794         /* Disable inputs of parser neighbor blocks */
5795         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5798         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5799
5800         /* Write 0 to parser credits for CFC search request */
5801         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803         /* send 10 Ethernet packets */
5804         for (i = 0; i < 10; i++)
5805                 bnx2x_lb_pckt(bp);
5806
5807         /* Wait until NIG register shows 10 + 1
5808            packets of size 11*0x10 = 0xb0 */
5809         count = 1000 * factor;
5810         while (count) {
5811
5812                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5813                 val = *bnx2x_sp(bp, wb_data[0]);
5814                 if (val == 0xb0)
5815                         break;
5816
5817                 msleep(10);
5818                 count--;
5819         }
5820         if (val != 0xb0) {
5821                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5822                 return -3;
5823         }
5824
5825         /* Wait until PRS register shows 2 packets */
5826         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827         if (val != 2)
5828                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5829
5830         /* Write 1 to parser credits for CFC search request */
5831         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5832
5833         /* Wait until PRS register shows 3 packets */
5834         msleep(10 * factor);
5835         /* Wait until NIG register shows 1 packet of size 0x10 */
5836         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5837         if (val != 3)
5838                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5839
5840         /* clear NIG EOP FIFO */
5841         for (i = 0; i < 11; i++)
5842                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5843         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5844         if (val != 1) {
5845                 BNX2X_ERR("clear of NIG failed\n");
5846                 return -4;
5847         }
5848
5849         /* Reset and init BRB, PRS, NIG */
5850         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5851         msleep(50);
5852         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5853         msleep(50);
5854         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5855         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5856 #ifndef BCM_ISCSI
5857         /* set NIC mode */
5858         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5859 #endif
5860
5861         /* Enable inputs of parser neighbor blocks */
5862         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5863         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5864         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5865         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5866
5867         DP(NETIF_MSG_HW, "done\n");
5868
5869         return 0; /* OK */
5870 }
5871
5872 static void enable_blocks_attention(struct bnx2x *bp)
5873 {
5874         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5875         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5876         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5877         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5878         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5879         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5880         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5881         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5882         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5883 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5884 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5885         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5886         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5887         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5888 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5889 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5890         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5891         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5892         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5893         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5894 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5895 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5896         if (CHIP_REV_IS_FPGA(bp))
5897                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5898         else
5899                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5900         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5901         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5902         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5903 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5904 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5905         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5906         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5907 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5908         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5909 }
5910
5911
5912 static void bnx2x_reset_common(struct bnx2x *bp)
5913 {
5914         /* reset_common */
5915         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5916                0xd3ffff7f);
5917         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5918 }
5919
5920
5921 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5922 {
5923         u32 val;
5924         u8 port;
5925         u8 is_required = 0;
5926
5927         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5928               SHARED_HW_CFG_FAN_FAILURE_MASK;
5929
5930         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5931                 is_required = 1;
5932
5933         /*
5934          * The fan failure mechanism is usually related to the PHY type since
5935          * the power consumption of the board is affected by the PHY. Currently,
5936          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5937          */
5938         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5939                 for (port = PORT_0; port < PORT_MAX; port++) {
5940                         u32 phy_type =
5941                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5942                                          external_phy_config) &
5943                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5944                         is_required |=
5945                                 ((phy_type ==
5946                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5947                                  (phy_type ==
5948                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5949                                  (phy_type ==
5950                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5951                 }
5952
5953         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5954
5955         if (is_required == 0)
5956                 return;
5957
5958         /* Fan failure is indicated by SPIO 5 */
5959         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5960                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5961
5962         /* set to active low mode */
5963         val = REG_RD(bp, MISC_REG_SPIO_INT);
5964         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5965                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5966         REG_WR(bp, MISC_REG_SPIO_INT, val);
5967
5968         /* enable interrupt to signal the IGU */
5969         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5970         val |= (1 << MISC_REGISTERS_SPIO_5);
5971         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5972 }
5973
5974 static int bnx2x_init_common(struct bnx2x *bp)
5975 {
5976         u32 val, i;
5977
5978         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5979
5980         bnx2x_reset_common(bp);
5981         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5982         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5983
5984         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5985         if (CHIP_IS_E1H(bp))
5986                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5987
5988         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5989         msleep(30);
5990         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5991
5992         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5993         if (CHIP_IS_E1(bp)) {
5994                 /* enable HW interrupt from PXP on USDM overflow
5995                    bit 16 on INT_MASK_0 */
5996                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5997         }
5998
5999         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6000         bnx2x_init_pxp(bp);
6001
6002 #ifdef __BIG_ENDIAN
6003         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6004         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6005         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6006         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6007         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6008         /* make sure this value is 0 */
6009         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6010
6011 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6012         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6013         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6014         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6015         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6016 #endif
6017
6018         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6019 #ifdef BCM_ISCSI
6020         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6021         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6022         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6023 #endif
6024
6025         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6026                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6027
6028         /* let the HW do it's magic ... */
6029         msleep(100);
6030         /* finish PXP init */
6031         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6032         if (val != 1) {
6033                 BNX2X_ERR("PXP2 CFG failed\n");
6034                 return -EBUSY;
6035         }
6036         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6037         if (val != 1) {
6038                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6039                 return -EBUSY;
6040         }
6041
6042         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6043         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6044
6045         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6046
6047         /* clean the DMAE memory */
6048         bp->dmae_ready = 1;
6049         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6050
6051         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6052         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6053         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6054         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6055
6056         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6057         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6058         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6059         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6060
6061         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6062         /* soft reset pulse */
6063         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6064         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6065
6066 #ifdef BCM_ISCSI
6067         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6068 #endif
6069
6070         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6071         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6072         if (!CHIP_REV_IS_SLOW(bp)) {
6073                 /* enable hw interrupt from doorbell Q */
6074                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6075         }
6076
6077         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6078         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6079         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6080         /* set NIC mode */
6081         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6082         if (CHIP_IS_E1H(bp))
6083                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6084
6085         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6086         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6087         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6088         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6089
6090         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6091         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6092         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6093         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6094
6095         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6096         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6097         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6098         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6099
6100         /* sync semi rtc */
6101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6102                0x80000000);
6103         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6104                0x80000000);
6105
6106         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6107         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6108         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6109
6110         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6111         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6112                 REG_WR(bp, i, 0xc0cac01a);
6113                 /* TODO: replace with something meaningful */
6114         }
6115         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6116         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6117
6118         if (sizeof(union cdu_context) != 1024)
6119                 /* we currently assume that a context is 1024 bytes */
6120                 printk(KERN_ALERT PFX "please adjust the size of"
6121                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6122
6123         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6124         val = (4 << 24) + (0 << 12) + 1024;
6125         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6126
6127         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6128         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6129         /* enable context validation interrupt from CFC */
6130         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6131
6132         /* set the thresholds to prevent CFC/CDU race */
6133         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6134
6135         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6136         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6137
6138         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6139         /* Reset PCIE errors for debug */
6140         REG_WR(bp, 0x2814, 0xffffffff);
6141         REG_WR(bp, 0x3820, 0xffffffff);
6142
6143         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6144         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6145         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6146         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6147
6148         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6149         if (CHIP_IS_E1H(bp)) {
6150                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6151                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6152         }
6153
6154         if (CHIP_REV_IS_SLOW(bp))
6155                 msleep(200);
6156
6157         /* finish CFC init */
6158         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6159         if (val != 1) {
6160                 BNX2X_ERR("CFC LL_INIT failed\n");
6161                 return -EBUSY;
6162         }
6163         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6164         if (val != 1) {
6165                 BNX2X_ERR("CFC AC_INIT failed\n");
6166                 return -EBUSY;
6167         }
6168         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6169         if (val != 1) {
6170                 BNX2X_ERR("CFC CAM_INIT failed\n");
6171                 return -EBUSY;
6172         }
6173         REG_WR(bp, CFC_REG_DEBUG0, 0);
6174
6175         /* read NIG statistic
6176            to see if this is our first up since powerup */
6177         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6178         val = *bnx2x_sp(bp, wb_data[0]);
6179
6180         /* do internal memory self test */
6181         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6182                 BNX2X_ERR("internal mem self test failed\n");
6183                 return -EBUSY;
6184         }
6185
6186         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6187         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6188         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6189         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6190         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6191                 bp->port.need_hw_lock = 1;
6192                 break;
6193
6194         default:
6195                 break;
6196         }
6197
6198         bnx2x_setup_fan_failure_detection(bp);
6199
6200         /* clear PXP2 attentions */
6201         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6202
6203         enable_blocks_attention(bp);
6204
6205         if (!BP_NOMCP(bp)) {
6206                 bnx2x_acquire_phy_lock(bp);
6207                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6208                 bnx2x_release_phy_lock(bp);
6209         } else
6210                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6211
6212         return 0;
6213 }
6214
6215 static int bnx2x_init_port(struct bnx2x *bp)
6216 {
6217         int port = BP_PORT(bp);
6218         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6219         u32 low, high;
6220         u32 val;
6221
6222         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6223
6224         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6225
6226         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6227         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6228
6229         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6230         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6231         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6232 #ifdef BCM_ISCSI
6233         /* Port0  1
6234          * Port1  385 */
6235         i++;
6236         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6237         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6238         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6239         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6240
6241         /* Port0  2
6242          * Port1  386 */
6243         i++;
6244         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6245         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6246         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6247         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6248
6249         /* Port0  3
6250          * Port1  387 */
6251         i++;
6252         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6253         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6254         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6255         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6256 #endif
6257         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6258
6259 #ifdef BCM_ISCSI
6260         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6261         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6262
6263         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6264 #endif
6265         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6266
6267         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6268         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6269                 /* no pause for emulation and FPGA */
6270                 low = 0;
6271                 high = 513;
6272         } else {
6273                 if (IS_E1HMF(bp))
6274                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6275                 else if (bp->dev->mtu > 4096) {
6276                         if (bp->flags & ONE_PORT_FLAG)
6277                                 low = 160;
6278                         else {
6279                                 val = bp->dev->mtu;
6280                                 /* (24*1024 + val*4)/256 */
6281                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6282                         }
6283                 } else
6284                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6285                 high = low + 56;        /* 14*1024/256 */
6286         }
6287         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6288         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6289
6290
6291         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6292
6293         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6294         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6295         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6296         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6297
6298         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6299         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6300         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6301         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6302
6303         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6304         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6305
6306         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6307
6308         /* configure PBF to work without PAUSE mtu 9000 */
6309         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6310
6311         /* update threshold */
6312         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6313         /* update init credit */
6314         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6315
6316         /* probe changes */
6317         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6318         msleep(5);
6319         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6320
6321 #ifdef BCM_ISCSI
6322         /* tell the searcher where the T2 table is */
6323         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6324
6325         wb_write[0] = U64_LO(bp->t2_mapping);
6326         wb_write[1] = U64_HI(bp->t2_mapping);
6327         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6328         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6329         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6330         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6331
6332         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6333 #endif
6334         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6335         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6336
6337         if (CHIP_IS_E1(bp)) {
6338                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6339                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6340         }
6341         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6342
6343         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6344         /* init aeu_mask_attn_func_0/1:
6345          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6346          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6347          *             bits 4-7 are used for "per vn group attention" */
6348         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6349                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6350
6351         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6352         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6353         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6354         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6355         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6356
6357         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6358
6359         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6360
6361         if (CHIP_IS_E1H(bp)) {
6362                 /* 0x2 disable e1hov, 0x1 enable */
6363                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6364                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6365
6366                 {
6367                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6368                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6369                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6370                 }
6371         }
6372
6373         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6374         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6375
6376         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6377         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6378                 {
6379                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6380
6381                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6382                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6383
6384                 /* The GPIO should be swapped if the swap register is
6385                    set and active */
6386                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6387                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6388
6389                 /* Select function upon port-swap configuration */
6390                 if (port == 0) {
6391                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6392                         aeu_gpio_mask = (swap_val && swap_override) ?
6393                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6394                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6395                 } else {
6396                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6397                         aeu_gpio_mask = (swap_val && swap_override) ?
6398                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6399                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6400                 }
6401                 val = REG_RD(bp, offset);
6402                 /* add GPIO3 to group */
6403                 val |= aeu_gpio_mask;
6404                 REG_WR(bp, offset, val);
6405                 }
6406                 break;
6407
6408         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6409         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6410                 /* add SPIO 5 to group 0 */
6411                 {
6412                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6413                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6414                 val = REG_RD(bp, reg_addr);
6415                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6416                 REG_WR(bp, reg_addr, val);
6417                 }
6418                 break;
6419
6420         default:
6421                 break;
6422         }
6423
6424         bnx2x__link_reset(bp);
6425
6426         return 0;
6427 }
6428
6429 #define ILT_PER_FUNC            (768/2)
6430 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6431 /* the phys address is shifted right 12 bits and has an added
6432    1=valid bit added to the 53rd bit
6433    then since this is a wide register(TM)
6434    we split it into two 32 bit writes
6435  */
6436 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6437 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6438 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6439 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6440
6441 #define CNIC_ILT_LINES          0
6442
6443 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6444 {
6445         int reg;
6446
6447         if (CHIP_IS_E1H(bp))
6448                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6449         else /* E1 */
6450                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6451
6452         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6453 }
6454
6455 static int bnx2x_init_func(struct bnx2x *bp)
6456 {
6457         int port = BP_PORT(bp);
6458         int func = BP_FUNC(bp);
6459         u32 addr, val;
6460         int i;
6461
6462         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6463
6464         /* set MSI reconfigure capability */
6465         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6466         val = REG_RD(bp, addr);
6467         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6468         REG_WR(bp, addr, val);
6469
6470         i = FUNC_ILT_BASE(func);
6471
6472         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6473         if (CHIP_IS_E1H(bp)) {
6474                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6475                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6476         } else /* E1 */
6477                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6478                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6479
6480
6481         if (CHIP_IS_E1H(bp)) {
6482                 for (i = 0; i < 9; i++)
6483                         bnx2x_init_block(bp,
6484                                          cm_blocks[i], FUNC0_STAGE + func);
6485
6486                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6487                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6488         }
6489
6490         /* HC init per function */
6491         if (CHIP_IS_E1H(bp)) {
6492                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6493
6494                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6495                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6496         }
6497         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6498
6499         /* Reset PCIE errors for debug */
6500         REG_WR(bp, 0x2114, 0xffffffff);
6501         REG_WR(bp, 0x2120, 0xffffffff);
6502
6503         return 0;
6504 }
6505
6506 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6507 {
6508         int i, rc = 0;
6509
6510         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6511            BP_FUNC(bp), load_code);
6512
6513         bp->dmae_ready = 0;
6514         mutex_init(&bp->dmae_mutex);
6515         rc = bnx2x_gunzip_init(bp);
6516         if (rc)
6517                 return rc;
6518
6519         switch (load_code) {
6520         case FW_MSG_CODE_DRV_LOAD_COMMON:
6521                 rc = bnx2x_init_common(bp);
6522                 if (rc)
6523                         goto init_hw_err;
6524                 /* no break */
6525
6526         case FW_MSG_CODE_DRV_LOAD_PORT:
6527                 bp->dmae_ready = 1;
6528                 rc = bnx2x_init_port(bp);
6529                 if (rc)
6530                         goto init_hw_err;
6531                 /* no break */
6532
6533         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6534                 bp->dmae_ready = 1;
6535                 rc = bnx2x_init_func(bp);
6536                 if (rc)
6537                         goto init_hw_err;
6538                 break;
6539
6540         default:
6541                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6542                 break;
6543         }
6544
6545         if (!BP_NOMCP(bp)) {
6546                 int func = BP_FUNC(bp);
6547
6548                 bp->fw_drv_pulse_wr_seq =
6549                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6550                                  DRV_PULSE_SEQ_MASK);
6551                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6552         }
6553
6554         /* this needs to be done before gunzip end */
6555         bnx2x_zero_def_sb(bp);
6556         for_each_queue(bp, i)
6557                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6558
6559 init_hw_err:
6560         bnx2x_gunzip_end(bp);
6561
6562         return rc;
6563 }
6564
6565 static void bnx2x_free_mem(struct bnx2x *bp)
6566 {
6567
6568 #define BNX2X_PCI_FREE(x, y, size) \
6569         do { \
6570                 if (x) { \
6571                         pci_free_consistent(bp->pdev, size, x, y); \
6572                         x = NULL; \
6573                         y = 0; \
6574                 } \
6575         } while (0)
6576
6577 #define BNX2X_FREE(x) \
6578         do { \
6579                 if (x) { \
6580                         vfree(x); \
6581                         x = NULL; \
6582                 } \
6583         } while (0)
6584
6585         int i;
6586
6587         /* fastpath */
6588         /* Common */
6589         for_each_queue(bp, i) {
6590
6591                 /* status blocks */
6592                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6593                                bnx2x_fp(bp, i, status_blk_mapping),
6594                                sizeof(struct host_status_block));
6595         }
6596         /* Rx */
6597         for_each_rx_queue(bp, i) {
6598
6599                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6600                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6601                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6602                                bnx2x_fp(bp, i, rx_desc_mapping),
6603                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6604
6605                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6606                                bnx2x_fp(bp, i, rx_comp_mapping),
6607                                sizeof(struct eth_fast_path_rx_cqe) *
6608                                NUM_RCQ_BD);
6609
6610                 /* SGE ring */
6611                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6612                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6613                                bnx2x_fp(bp, i, rx_sge_mapping),
6614                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6615         }
6616         /* Tx */
6617         for_each_tx_queue(bp, i) {
6618
6619                 /* fastpath tx rings: tx_buf tx_desc */
6620                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6621                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6622                                bnx2x_fp(bp, i, tx_desc_mapping),
6623                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6624         }
6625         /* end of fastpath */
6626
6627         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6628                        sizeof(struct host_def_status_block));
6629
6630         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6631                        sizeof(struct bnx2x_slowpath));
6632
6633 #ifdef BCM_ISCSI
6634         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6635         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6636         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6637         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6638 #endif
6639         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6640
6641 #undef BNX2X_PCI_FREE
6642 #undef BNX2X_KFREE
6643 }
6644
6645 static int bnx2x_alloc_mem(struct bnx2x *bp)
6646 {
6647
6648 #define BNX2X_PCI_ALLOC(x, y, size) \
6649         do { \
6650                 x = pci_alloc_consistent(bp->pdev, size, y); \
6651                 if (x == NULL) \
6652                         goto alloc_mem_err; \
6653                 memset(x, 0, size); \
6654         } while (0)
6655
6656 #define BNX2X_ALLOC(x, size) \
6657         do { \
6658                 x = vmalloc(size); \
6659                 if (x == NULL) \
6660                         goto alloc_mem_err; \
6661                 memset(x, 0, size); \
6662         } while (0)
6663
6664         int i;
6665
6666         /* fastpath */
6667         /* Common */
6668         for_each_queue(bp, i) {
6669                 bnx2x_fp(bp, i, bp) = bp;
6670
6671                 /* status blocks */
6672                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6673                                 &bnx2x_fp(bp, i, status_blk_mapping),
6674                                 sizeof(struct host_status_block));
6675         }
6676         /* Rx */
6677         for_each_rx_queue(bp, i) {
6678
6679                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6680                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6681                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6682                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6683                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6684                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6685
6686                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6687                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6688                                 sizeof(struct eth_fast_path_rx_cqe) *
6689                                 NUM_RCQ_BD);
6690
6691                 /* SGE ring */
6692                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6693                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6694                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6695                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6696                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6697         }
6698         /* Tx */
6699         for_each_tx_queue(bp, i) {
6700
6701                 /* fastpath tx rings: tx_buf tx_desc */
6702                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6703                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6704                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6705                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6706                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6707         }
6708         /* end of fastpath */
6709
6710         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6711                         sizeof(struct host_def_status_block));
6712
6713         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6714                         sizeof(struct bnx2x_slowpath));
6715
6716 #ifdef BCM_ISCSI
6717         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6718
6719         /* Initialize T1 */
6720         for (i = 0; i < 64*1024; i += 64) {
6721                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6722                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6723         }
6724
6725         /* allocate searcher T2 table
6726            we allocate 1/4 of alloc num for T2
6727           (which is not entered into the ILT) */
6728         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6729
6730         /* Initialize T2 */
6731         for (i = 0; i < 16*1024; i += 64)
6732                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6733
6734         /* now fixup the last line in the block to point to the next block */
6735         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6736
6737         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6738         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6739
6740         /* QM queues (128*MAX_CONN) */
6741         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6742 #endif
6743
6744         /* Slow path ring */
6745         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6746
6747         return 0;
6748
6749 alloc_mem_err:
6750         bnx2x_free_mem(bp);
6751         return -ENOMEM;
6752
6753 #undef BNX2X_PCI_ALLOC
6754 #undef BNX2X_ALLOC
6755 }
6756
6757 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6758 {
6759         int i;
6760
6761         for_each_tx_queue(bp, i) {
6762                 struct bnx2x_fastpath *fp = &bp->fp[i];
6763
6764                 u16 bd_cons = fp->tx_bd_cons;
6765                 u16 sw_prod = fp->tx_pkt_prod;
6766                 u16 sw_cons = fp->tx_pkt_cons;
6767
6768                 while (sw_cons != sw_prod) {
6769                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6770                         sw_cons++;
6771                 }
6772         }
6773 }
6774
6775 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6776 {
6777         int i, j;
6778
6779         for_each_rx_queue(bp, j) {
6780                 struct bnx2x_fastpath *fp = &bp->fp[j];
6781
6782                 for (i = 0; i < NUM_RX_BD; i++) {
6783                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6784                         struct sk_buff *skb = rx_buf->skb;
6785
6786                         if (skb == NULL)
6787                                 continue;
6788
6789                         pci_unmap_single(bp->pdev,
6790                                          pci_unmap_addr(rx_buf, mapping),
6791                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6792
6793                         rx_buf->skb = NULL;
6794                         dev_kfree_skb(skb);
6795                 }
6796                 if (!fp->disable_tpa)
6797                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6798                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6799                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6800         }
6801 }
6802
6803 static void bnx2x_free_skbs(struct bnx2x *bp)
6804 {
6805         bnx2x_free_tx_skbs(bp);
6806         bnx2x_free_rx_skbs(bp);
6807 }
6808
6809 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6810 {
6811         int i, offset = 1;
6812
6813         free_irq(bp->msix_table[0].vector, bp->dev);
6814         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6815            bp->msix_table[0].vector);
6816
6817         for_each_queue(bp, i) {
6818                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6819                    "state %x\n", i, bp->msix_table[i + offset].vector,
6820                    bnx2x_fp(bp, i, state));
6821
6822                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6823         }
6824 }
6825
6826 static void bnx2x_free_irq(struct bnx2x *bp)
6827 {
6828         if (bp->flags & USING_MSIX_FLAG) {
6829                 bnx2x_free_msix_irqs(bp);
6830                 pci_disable_msix(bp->pdev);
6831                 bp->flags &= ~USING_MSIX_FLAG;
6832
6833         } else if (bp->flags & USING_MSI_FLAG) {
6834                 free_irq(bp->pdev->irq, bp->dev);
6835                 pci_disable_msi(bp->pdev);
6836                 bp->flags &= ~USING_MSI_FLAG;
6837
6838         } else
6839                 free_irq(bp->pdev->irq, bp->dev);
6840 }
6841
6842 static int bnx2x_enable_msix(struct bnx2x *bp)
6843 {
6844         int i, rc, offset = 1;
6845         int igu_vec = 0;
6846
6847         bp->msix_table[0].entry = igu_vec;
6848         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6849
6850         for_each_queue(bp, i) {
6851                 igu_vec = BP_L_ID(bp) + offset + i;
6852                 bp->msix_table[i + offset].entry = igu_vec;
6853                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6854                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6855         }
6856
6857         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6858                              BNX2X_NUM_QUEUES(bp) + offset);
6859         if (rc) {
6860                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6861                 return rc;
6862         }
6863
6864         bp->flags |= USING_MSIX_FLAG;
6865
6866         return 0;
6867 }
6868
6869 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6870 {
6871         int i, rc, offset = 1;
6872
6873         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6874                          bp->dev->name, bp->dev);
6875         if (rc) {
6876                 BNX2X_ERR("request sp irq failed\n");
6877                 return -EBUSY;
6878         }
6879
6880         for_each_queue(bp, i) {
6881                 struct bnx2x_fastpath *fp = &bp->fp[i];
6882
6883                 if (i < bp->num_rx_queues)
6884                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6885                 else
6886                         sprintf(fp->name, "%s-tx-%d",
6887                                 bp->dev->name, i - bp->num_rx_queues);
6888
6889                 rc = request_irq(bp->msix_table[i + offset].vector,
6890                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6891                 if (rc) {
6892                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6893                         bnx2x_free_msix_irqs(bp);
6894                         return -EBUSY;
6895                 }
6896
6897                 fp->state = BNX2X_FP_STATE_IRQ;
6898         }
6899
6900         i = BNX2X_NUM_QUEUES(bp);
6901         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
6902                " ... fp[%d] %d\n",
6903                bp->dev->name, bp->msix_table[0].vector,
6904                0, bp->msix_table[offset].vector,
6905                i - 1, bp->msix_table[offset + i - 1].vector);
6906
6907         return 0;
6908 }
6909
6910 static int bnx2x_enable_msi(struct bnx2x *bp)
6911 {
6912         int rc;
6913
6914         rc = pci_enable_msi(bp->pdev);
6915         if (rc) {
6916                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6917                 return -1;
6918         }
6919         bp->flags |= USING_MSI_FLAG;
6920
6921         return 0;
6922 }
6923
6924 static int bnx2x_req_irq(struct bnx2x *bp)
6925 {
6926         unsigned long flags;
6927         int rc;
6928
6929         if (bp->flags & USING_MSI_FLAG)
6930                 flags = 0;
6931         else
6932                 flags = IRQF_SHARED;
6933
6934         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6935                          bp->dev->name, bp->dev);
6936         if (!rc)
6937                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6938
6939         return rc;
6940 }
6941
6942 static void bnx2x_napi_enable(struct bnx2x *bp)
6943 {
6944         int i;
6945
6946         for_each_rx_queue(bp, i)
6947                 napi_enable(&bnx2x_fp(bp, i, napi));
6948 }
6949
6950 static void bnx2x_napi_disable(struct bnx2x *bp)
6951 {
6952         int i;
6953
6954         for_each_rx_queue(bp, i)
6955                 napi_disable(&bnx2x_fp(bp, i, napi));
6956 }
6957
6958 static void bnx2x_netif_start(struct bnx2x *bp)
6959 {
6960         int intr_sem;
6961
6962         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6963         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6964
6965         if (intr_sem) {
6966                 if (netif_running(bp->dev)) {
6967                         bnx2x_napi_enable(bp);
6968                         bnx2x_int_enable(bp);
6969                         if (bp->state == BNX2X_STATE_OPEN)
6970                                 netif_tx_wake_all_queues(bp->dev);
6971                 }
6972         }
6973 }
6974
6975 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6976 {
6977         bnx2x_int_disable_sync(bp, disable_hw);
6978         bnx2x_napi_disable(bp);
6979         netif_tx_disable(bp->dev);
6980         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6981 }
6982
6983 /*
6984  * Init service functions
6985  */
6986
6987 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6988 {
6989         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6990         int port = BP_PORT(bp);
6991
6992         /* CAM allocation
6993          * unicasts 0-31:port0 32-63:port1
6994          * multicast 64-127:port0 128-191:port1
6995          */
6996         config->hdr.length = 2;
6997         config->hdr.offset = port ? 32 : 0;
6998         config->hdr.client_id = bp->fp->cl_id;
6999         config->hdr.reserved1 = 0;
7000
7001         /* primary MAC */
7002         config->config_table[0].cam_entry.msb_mac_addr =
7003                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7004         config->config_table[0].cam_entry.middle_mac_addr =
7005                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7006         config->config_table[0].cam_entry.lsb_mac_addr =
7007                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7008         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7009         if (set)
7010                 config->config_table[0].target_table_entry.flags = 0;
7011         else
7012                 CAM_INVALIDATE(config->config_table[0]);
7013         config->config_table[0].target_table_entry.clients_bit_vector =
7014                                                 cpu_to_le32(1 << BP_L_ID(bp));
7015         config->config_table[0].target_table_entry.vlan_id = 0;
7016
7017         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7018            (set ? "setting" : "clearing"),
7019            config->config_table[0].cam_entry.msb_mac_addr,
7020            config->config_table[0].cam_entry.middle_mac_addr,
7021            config->config_table[0].cam_entry.lsb_mac_addr);
7022
7023         /* broadcast */
7024         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7025         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7026         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7027         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7028         if (set)
7029                 config->config_table[1].target_table_entry.flags =
7030                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7031         else
7032                 CAM_INVALIDATE(config->config_table[1]);
7033         config->config_table[1].target_table_entry.clients_bit_vector =
7034                                                 cpu_to_le32(1 << BP_L_ID(bp));
7035         config->config_table[1].target_table_entry.vlan_id = 0;
7036
7037         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7038                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7039                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7040 }
7041
7042 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7043 {
7044         struct mac_configuration_cmd_e1h *config =
7045                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7046
7047         /* CAM allocation for E1H
7048          * unicasts: by func number
7049          * multicast: 20+FUNC*20, 20 each
7050          */
7051         config->hdr.length = 1;
7052         config->hdr.offset = BP_FUNC(bp);
7053         config->hdr.client_id = bp->fp->cl_id;
7054         config->hdr.reserved1 = 0;
7055
7056         /* primary MAC */
7057         config->config_table[0].msb_mac_addr =
7058                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7059         config->config_table[0].middle_mac_addr =
7060                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7061         config->config_table[0].lsb_mac_addr =
7062                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7063         config->config_table[0].clients_bit_vector =
7064                                         cpu_to_le32(1 << BP_L_ID(bp));
7065         config->config_table[0].vlan_id = 0;
7066         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7067         if (set)
7068                 config->config_table[0].flags = BP_PORT(bp);
7069         else
7070                 config->config_table[0].flags =
7071                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7072
7073         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
7074            (set ? "setting" : "clearing"),
7075            config->config_table[0].msb_mac_addr,
7076            config->config_table[0].middle_mac_addr,
7077            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7078
7079         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7080                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7081                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7082 }
7083
7084 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7085                              int *state_p, int poll)
7086 {
7087         /* can take a while if any port is running */
7088         int cnt = 5000;
7089
7090         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7091            poll ? "polling" : "waiting", state, idx);
7092
7093         might_sleep();
7094         while (cnt--) {
7095                 if (poll) {
7096                         bnx2x_rx_int(bp->fp, 10);
7097                         /* if index is different from 0
7098                          * the reply for some commands will
7099                          * be on the non default queue
7100                          */
7101                         if (idx)
7102                                 bnx2x_rx_int(&bp->fp[idx], 10);
7103                 }
7104
7105                 mb(); /* state is changed by bnx2x_sp_event() */
7106                 if (*state_p == state) {
7107 #ifdef BNX2X_STOP_ON_ERROR
7108                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7109 #endif
7110                         return 0;
7111                 }
7112
7113                 msleep(1);
7114
7115                 if (bp->panic)
7116                         return -EIO;
7117         }
7118
7119         /* timeout! */
7120         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7121                   poll ? "polling" : "waiting", state, idx);
7122 #ifdef BNX2X_STOP_ON_ERROR
7123         bnx2x_panic();
7124 #endif
7125
7126         return -EBUSY;
7127 }
7128
7129 static int bnx2x_setup_leading(struct bnx2x *bp)
7130 {
7131         int rc;
7132
7133         /* reset IGU state */
7134         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7135
7136         /* SETUP ramrod */
7137         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7138
7139         /* Wait for completion */
7140         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7141
7142         return rc;
7143 }
7144
7145 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7146 {
7147         struct bnx2x_fastpath *fp = &bp->fp[index];
7148
7149         /* reset IGU state */
7150         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7151
7152         /* SETUP ramrod */
7153         fp->state = BNX2X_FP_STATE_OPENING;
7154         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7155                       fp->cl_id, 0);
7156
7157         /* Wait for completion */
7158         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7159                                  &(fp->state), 0);
7160 }
7161
7162 static int bnx2x_poll(struct napi_struct *napi, int budget);
7163
7164 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7165                                     int *num_tx_queues_out)
7166 {
7167         int _num_rx_queues = 0, _num_tx_queues = 0;
7168
7169         switch (bp->multi_mode) {
7170         case ETH_RSS_MODE_DISABLED:
7171                 _num_rx_queues = 1;
7172                 _num_tx_queues = 1;
7173                 break;
7174
7175         case ETH_RSS_MODE_REGULAR:
7176                 if (num_rx_queues)
7177                         _num_rx_queues = min_t(u32, num_rx_queues,
7178                                                BNX2X_MAX_QUEUES(bp));
7179                 else
7180                         _num_rx_queues = min_t(u32, num_online_cpus(),
7181                                                BNX2X_MAX_QUEUES(bp));
7182
7183                 if (num_tx_queues)
7184                         _num_tx_queues = min_t(u32, num_tx_queues,
7185                                                BNX2X_MAX_QUEUES(bp));
7186                 else
7187                         _num_tx_queues = min_t(u32, num_online_cpus(),
7188                                                BNX2X_MAX_QUEUES(bp));
7189
7190                 /* There must be not more Tx queues than Rx queues */
7191                 if (_num_tx_queues > _num_rx_queues) {
7192                         BNX2X_ERR("number of tx queues (%d) > "
7193                                   "number of rx queues (%d)"
7194                                   "  defaulting to %d\n",
7195                                   _num_tx_queues, _num_rx_queues,
7196                                   _num_rx_queues);
7197                         _num_tx_queues = _num_rx_queues;
7198                 }
7199                 break;
7200
7201
7202         default:
7203                 _num_rx_queues = 1;
7204                 _num_tx_queues = 1;
7205                 break;
7206         }
7207
7208         *num_rx_queues_out = _num_rx_queues;
7209         *num_tx_queues_out = _num_tx_queues;
7210 }
7211
7212 static int bnx2x_set_int_mode(struct bnx2x *bp)
7213 {
7214         int rc = 0;
7215
7216         switch (int_mode) {
7217         case INT_MODE_INTx:
7218         case INT_MODE_MSI:
7219                 bp->num_rx_queues = 1;
7220                 bp->num_tx_queues = 1;
7221                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7222                 break;
7223
7224         case INT_MODE_MSIX:
7225         default:
7226                 /* Set interrupt mode according to bp->multi_mode value */
7227                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7228                                         &bp->num_tx_queues);
7229
7230                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7231                    bp->num_rx_queues, bp->num_tx_queues);
7232
7233                 /* if we can't use MSI-X we only need one fp,
7234                  * so try to enable MSI-X with the requested number of fp's
7235                  * and fallback to MSI or legacy INTx with one fp
7236                  */
7237                 rc = bnx2x_enable_msix(bp);
7238                 if (rc) {
7239                         /* failed to enable MSI-X */
7240                         if (bp->multi_mode)
7241                                 BNX2X_ERR("Multi requested but failed to "
7242                                           "enable MSI-X (rx %d tx %d), "
7243                                           "set number of queues to 1\n",
7244                                           bp->num_rx_queues, bp->num_tx_queues);
7245                         bp->num_rx_queues = 1;
7246                         bp->num_tx_queues = 1;
7247                 }
7248                 break;
7249         }
7250         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7251         return rc;
7252 }
7253
7254
7255 /* must be called with rtnl_lock */
7256 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7257 {
7258         u32 load_code;
7259         int i, rc;
7260
7261 #ifdef BNX2X_STOP_ON_ERROR
7262         if (unlikely(bp->panic))
7263                 return -EPERM;
7264 #endif
7265
7266         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7267
7268         rc = bnx2x_set_int_mode(bp);
7269
7270         if (bnx2x_alloc_mem(bp))
7271                 return -ENOMEM;
7272
7273         for_each_rx_queue(bp, i)
7274                 bnx2x_fp(bp, i, disable_tpa) =
7275                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7276
7277         for_each_rx_queue(bp, i)
7278                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7279                                bnx2x_poll, 128);
7280
7281         bnx2x_napi_enable(bp);
7282
7283         if (bp->flags & USING_MSIX_FLAG) {
7284                 rc = bnx2x_req_msix_irqs(bp);
7285                 if (rc) {
7286                         pci_disable_msix(bp->pdev);
7287                         goto load_error1;
7288                 }
7289         } else {
7290                 /* Fall to INTx if failed to enable MSI-X due to lack of
7291                    memory (in bnx2x_set_int_mode()) */
7292                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7293                         bnx2x_enable_msi(bp);
7294                 bnx2x_ack_int(bp);
7295                 rc = bnx2x_req_irq(bp);
7296                 if (rc) {
7297                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7298                         if (bp->flags & USING_MSI_FLAG)
7299                                 pci_disable_msi(bp->pdev);
7300                         goto load_error1;
7301                 }
7302                 if (bp->flags & USING_MSI_FLAG) {
7303                         bp->dev->irq = bp->pdev->irq;
7304                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7305                                bp->dev->name, bp->pdev->irq);
7306                 }
7307         }
7308
7309         /* Send LOAD_REQUEST command to MCP
7310            Returns the type of LOAD command:
7311            if it is the first port to be initialized
7312            common blocks should be initialized, otherwise - not
7313         */
7314         if (!BP_NOMCP(bp)) {
7315                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7316                 if (!load_code) {
7317                         BNX2X_ERR("MCP response failure, aborting\n");
7318                         rc = -EBUSY;
7319                         goto load_error2;
7320                 }
7321                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7322                         rc = -EBUSY; /* other port in diagnostic mode */
7323                         goto load_error2;
7324                 }
7325
7326         } else {
7327                 int port = BP_PORT(bp);
7328
7329                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7330                    load_count[0], load_count[1], load_count[2]);
7331                 load_count[0]++;
7332                 load_count[1 + port]++;
7333                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7334                    load_count[0], load_count[1], load_count[2]);
7335                 if (load_count[0] == 1)
7336                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7337                 else if (load_count[1 + port] == 1)
7338                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7339                 else
7340                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7341         }
7342
7343         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7344             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7345                 bp->port.pmf = 1;
7346         else
7347                 bp->port.pmf = 0;
7348         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7349
7350         /* Initialize HW */
7351         rc = bnx2x_init_hw(bp, load_code);
7352         if (rc) {
7353                 BNX2X_ERR("HW init failed, aborting\n");
7354                 goto load_error2;
7355         }
7356
7357         /* Setup NIC internals and enable interrupts */
7358         bnx2x_nic_init(bp, load_code);
7359
7360         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7361             (bp->common.shmem2_base))
7362                 SHMEM2_WR(bp, dcc_support,
7363                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7364                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7365
7366         /* Send LOAD_DONE command to MCP */
7367         if (!BP_NOMCP(bp)) {
7368                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7369                 if (!load_code) {
7370                         BNX2X_ERR("MCP response failure, aborting\n");
7371                         rc = -EBUSY;
7372                         goto load_error3;
7373                 }
7374         }
7375
7376         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7377
7378         rc = bnx2x_setup_leading(bp);
7379         if (rc) {
7380                 BNX2X_ERR("Setup leading failed!\n");
7381 #ifndef BNX2X_STOP_ON_ERROR
7382                 goto load_error3;
7383 #else
7384                 bp->panic = 1;
7385                 return -EBUSY;
7386 #endif
7387         }
7388
7389         if (CHIP_IS_E1H(bp))
7390                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7391                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7392                         bp->state = BNX2X_STATE_DISABLED;
7393                 }
7394
7395         if (bp->state == BNX2X_STATE_OPEN) {
7396                 for_each_nondefault_queue(bp, i) {
7397                         rc = bnx2x_setup_multi(bp, i);
7398                         if (rc)
7399                                 goto load_error3;
7400                 }
7401
7402                 if (CHIP_IS_E1(bp))
7403                         bnx2x_set_mac_addr_e1(bp, 1);
7404                 else
7405                         bnx2x_set_mac_addr_e1h(bp, 1);
7406         }
7407
7408         if (bp->port.pmf)
7409                 bnx2x_initial_phy_init(bp, load_mode);
7410
7411         /* Start fast path */
7412         switch (load_mode) {
7413         case LOAD_NORMAL:
7414                 if (bp->state == BNX2X_STATE_OPEN) {
7415                         /* Tx queue should be only reenabled */
7416                         netif_tx_wake_all_queues(bp->dev);
7417                 }
7418                 /* Initialize the receive filter. */
7419                 bnx2x_set_rx_mode(bp->dev);
7420                 break;
7421
7422         case LOAD_OPEN:
7423                 netif_tx_start_all_queues(bp->dev);
7424                 if (bp->state != BNX2X_STATE_OPEN)
7425                         netif_tx_disable(bp->dev);
7426                 /* Initialize the receive filter. */
7427                 bnx2x_set_rx_mode(bp->dev);
7428                 break;
7429
7430         case LOAD_DIAG:
7431                 /* Initialize the receive filter. */
7432                 bnx2x_set_rx_mode(bp->dev);
7433                 bp->state = BNX2X_STATE_DIAG;
7434                 break;
7435
7436         default:
7437                 break;
7438         }
7439
7440         if (!bp->port.pmf)
7441                 bnx2x__link_status_update(bp);
7442
7443         /* start the timer */
7444         mod_timer(&bp->timer, jiffies + bp->current_interval);
7445
7446
7447         return 0;
7448
7449 load_error3:
7450         bnx2x_int_disable_sync(bp, 1);
7451         if (!BP_NOMCP(bp)) {
7452                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7453                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454         }
7455         bp->port.pmf = 0;
7456         /* Free SKBs, SGEs, TPA pool and driver internals */
7457         bnx2x_free_skbs(bp);
7458         for_each_rx_queue(bp, i)
7459                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7460 load_error2:
7461         /* Release IRQs */
7462         bnx2x_free_irq(bp);
7463 load_error1:
7464         bnx2x_napi_disable(bp);
7465         for_each_rx_queue(bp, i)
7466                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7467         bnx2x_free_mem(bp);
7468
7469         return rc;
7470 }
7471
7472 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7473 {
7474         struct bnx2x_fastpath *fp = &bp->fp[index];
7475         int rc;
7476
7477         /* halt the connection */
7478         fp->state = BNX2X_FP_STATE_HALTING;
7479         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7480
7481         /* Wait for completion */
7482         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7483                                &(fp->state), 1);
7484         if (rc) /* timeout */
7485                 return rc;
7486
7487         /* delete cfc entry */
7488         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7489
7490         /* Wait for completion */
7491         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7492                                &(fp->state), 1);
7493         return rc;
7494 }
7495
7496 static int bnx2x_stop_leading(struct bnx2x *bp)
7497 {
7498         __le16 dsb_sp_prod_idx;
7499         /* if the other port is handling traffic,
7500            this can take a lot of time */
7501         int cnt = 500;
7502         int rc;
7503
7504         might_sleep();
7505
7506         /* Send HALT ramrod */
7507         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7508         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7509
7510         /* Wait for completion */
7511         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7512                                &(bp->fp[0].state), 1);
7513         if (rc) /* timeout */
7514                 return rc;
7515
7516         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7517
7518         /* Send PORT_DELETE ramrod */
7519         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7520
7521         /* Wait for completion to arrive on default status block
7522            we are going to reset the chip anyway
7523            so there is not much to do if this times out
7524          */
7525         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7526                 if (!cnt) {
7527                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7528                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7529                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7530 #ifdef BNX2X_STOP_ON_ERROR
7531                         bnx2x_panic();
7532 #endif
7533                         rc = -EBUSY;
7534                         break;
7535                 }
7536                 cnt--;
7537                 msleep(1);
7538                 rmb(); /* Refresh the dsb_sp_prod */
7539         }
7540         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7541         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7542
7543         return rc;
7544 }
7545
7546 static void bnx2x_reset_func(struct bnx2x *bp)
7547 {
7548         int port = BP_PORT(bp);
7549         int func = BP_FUNC(bp);
7550         int base, i;
7551
7552         /* Configure IGU */
7553         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7554         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7555
7556         /* Clear ILT */
7557         base = FUNC_ILT_BASE(func);
7558         for (i = base; i < base + ILT_PER_FUNC; i++)
7559                 bnx2x_ilt_wr(bp, i, 0);
7560 }
7561
7562 static void bnx2x_reset_port(struct bnx2x *bp)
7563 {
7564         int port = BP_PORT(bp);
7565         u32 val;
7566
7567         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7568
7569         /* Do not rcv packets to BRB */
7570         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7571         /* Do not direct rcv packets that are not for MCP to the BRB */
7572         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7573                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7574
7575         /* Configure AEU */
7576         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7577
7578         msleep(100);
7579         /* Check for BRB port occupancy */
7580         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7581         if (val)
7582                 DP(NETIF_MSG_IFDOWN,
7583                    "BRB1 is not empty  %d blocks are occupied\n", val);
7584
7585         /* TODO: Close Doorbell port? */
7586 }
7587
7588 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7589 {
7590         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7591            BP_FUNC(bp), reset_code);
7592
7593         switch (reset_code) {
7594         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7595                 bnx2x_reset_port(bp);
7596                 bnx2x_reset_func(bp);
7597                 bnx2x_reset_common(bp);
7598                 break;
7599
7600         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7601                 bnx2x_reset_port(bp);
7602                 bnx2x_reset_func(bp);
7603                 break;
7604
7605         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7606                 bnx2x_reset_func(bp);
7607                 break;
7608
7609         default:
7610                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7611                 break;
7612         }
7613 }
7614
7615 /* must be called with rtnl_lock */
7616 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7617 {
7618         int port = BP_PORT(bp);
7619         u32 reset_code = 0;
7620         int i, cnt, rc;
7621
7622         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7623
7624         bp->rx_mode = BNX2X_RX_MODE_NONE;
7625         bnx2x_set_storm_rx_mode(bp);
7626
7627         bnx2x_netif_stop(bp, 1);
7628
7629         del_timer_sync(&bp->timer);
7630         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7631                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7632         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7633
7634         /* Release IRQs */
7635         bnx2x_free_irq(bp);
7636
7637         /* Wait until tx fastpath tasks complete */
7638         for_each_tx_queue(bp, i) {
7639                 struct bnx2x_fastpath *fp = &bp->fp[i];
7640
7641                 cnt = 1000;
7642                 while (bnx2x_has_tx_work_unload(fp)) {
7643
7644                         bnx2x_tx_int(fp);
7645                         if (!cnt) {
7646                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7647                                           i);
7648 #ifdef BNX2X_STOP_ON_ERROR
7649                                 bnx2x_panic();
7650                                 return -EBUSY;
7651 #else
7652                                 break;
7653 #endif
7654                         }
7655                         cnt--;
7656                         msleep(1);
7657                 }
7658         }
7659         /* Give HW time to discard old tx messages */
7660         msleep(1);
7661
7662         if (CHIP_IS_E1(bp)) {
7663                 struct mac_configuration_cmd *config =
7664                                                 bnx2x_sp(bp, mcast_config);
7665
7666                 bnx2x_set_mac_addr_e1(bp, 0);
7667
7668                 for (i = 0; i < config->hdr.length; i++)
7669                         CAM_INVALIDATE(config->config_table[i]);
7670
7671                 config->hdr.length = i;
7672                 if (CHIP_REV_IS_SLOW(bp))
7673                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7674                 else
7675                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7676                 config->hdr.client_id = bp->fp->cl_id;
7677                 config->hdr.reserved1 = 0;
7678
7679                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7680                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7681                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7682
7683         } else { /* E1H */
7684                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7685
7686                 bnx2x_set_mac_addr_e1h(bp, 0);
7687
7688                 for (i = 0; i < MC_HASH_SIZE; i++)
7689                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7690
7691                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7692         }
7693
7694         if (unload_mode == UNLOAD_NORMAL)
7695                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7696
7697         else if (bp->flags & NO_WOL_FLAG)
7698                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7699
7700         else if (bp->wol) {
7701                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7702                 u8 *mac_addr = bp->dev->dev_addr;
7703                 u32 val;
7704                 /* The mac address is written to entries 1-4 to
7705                    preserve entry 0 which is used by the PMF */
7706                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7707
7708                 val = (mac_addr[0] << 8) | mac_addr[1];
7709                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7710
7711                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7712                       (mac_addr[4] << 8) | mac_addr[5];
7713                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7714
7715                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7716
7717         } else
7718                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7719
7720         /* Close multi and leading connections
7721            Completions for ramrods are collected in a synchronous way */
7722         for_each_nondefault_queue(bp, i)
7723                 if (bnx2x_stop_multi(bp, i))
7724                         goto unload_error;
7725
7726         rc = bnx2x_stop_leading(bp);
7727         if (rc) {
7728                 BNX2X_ERR("Stop leading failed!\n");
7729 #ifdef BNX2X_STOP_ON_ERROR
7730                 return -EBUSY;
7731 #else
7732                 goto unload_error;
7733 #endif
7734         }
7735
7736 unload_error:
7737         if (!BP_NOMCP(bp))
7738                 reset_code = bnx2x_fw_command(bp, reset_code);
7739         else {
7740                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7741                    load_count[0], load_count[1], load_count[2]);
7742                 load_count[0]--;
7743                 load_count[1 + port]--;
7744                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7745                    load_count[0], load_count[1], load_count[2]);
7746                 if (load_count[0] == 0)
7747                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7748                 else if (load_count[1 + port] == 0)
7749                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7750                 else
7751                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7752         }
7753
7754         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7755             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7756                 bnx2x__link_reset(bp);
7757
7758         /* Reset the chip */
7759         bnx2x_reset_chip(bp, reset_code);
7760
7761         /* Report UNLOAD_DONE to MCP */
7762         if (!BP_NOMCP(bp))
7763                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7764
7765         bp->port.pmf = 0;
7766
7767         /* Free SKBs, SGEs, TPA pool and driver internals */
7768         bnx2x_free_skbs(bp);
7769         for_each_rx_queue(bp, i)
7770                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7771         for_each_rx_queue(bp, i)
7772                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7773         bnx2x_free_mem(bp);
7774
7775         bp->state = BNX2X_STATE_CLOSED;
7776
7777         netif_carrier_off(bp->dev);
7778
7779         return 0;
7780 }
7781
7782 static void bnx2x_reset_task(struct work_struct *work)
7783 {
7784         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7785
7786 #ifdef BNX2X_STOP_ON_ERROR
7787         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7788                   " so reset not done to allow debug dump,\n"
7789                   " you will need to reboot when done\n");
7790         return;
7791 #endif
7792
7793         rtnl_lock();
7794
7795         if (!netif_running(bp->dev))
7796                 goto reset_task_exit;
7797
7798         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7799         bnx2x_nic_load(bp, LOAD_NORMAL);
7800
7801 reset_task_exit:
7802         rtnl_unlock();
7803 }
7804
7805 /* end of nic load/unload */
7806
7807 /* ethtool_ops */
7808
7809 /*
7810  * Init service functions
7811  */
7812
7813 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7814 {
7815         switch (func) {
7816         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7817         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7818         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7819         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7820         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7821         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7822         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7823         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7824         default:
7825                 BNX2X_ERR("Unsupported function index: %d\n", func);
7826                 return (u32)(-1);
7827         }
7828 }
7829
7830 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7831 {
7832         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7833
7834         /* Flush all outstanding writes */
7835         mmiowb();
7836
7837         /* Pretend to be function 0 */
7838         REG_WR(bp, reg, 0);
7839         /* Flush the GRC transaction (in the chip) */
7840         new_val = REG_RD(bp, reg);
7841         if (new_val != 0) {
7842                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7843                           new_val);
7844                 BUG();
7845         }
7846
7847         /* From now we are in the "like-E1" mode */
7848         bnx2x_int_disable(bp);
7849
7850         /* Flush all outstanding writes */
7851         mmiowb();
7852
7853         /* Restore the original funtion settings */
7854         REG_WR(bp, reg, orig_func);
7855         new_val = REG_RD(bp, reg);
7856         if (new_val != orig_func) {
7857                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7858                           orig_func, new_val);
7859                 BUG();
7860         }
7861 }
7862
7863 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7864 {
7865         if (CHIP_IS_E1H(bp))
7866                 bnx2x_undi_int_disable_e1h(bp, func);
7867         else
7868                 bnx2x_int_disable(bp);
7869 }
7870
7871 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7872 {
7873         u32 val;
7874
7875         /* Check if there is any driver already loaded */
7876         val = REG_RD(bp, MISC_REG_UNPREPARED);
7877         if (val == 0x1) {
7878                 /* Check if it is the UNDI driver
7879                  * UNDI driver initializes CID offset for normal bell to 0x7
7880                  */
7881                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7882                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7883                 if (val == 0x7) {
7884                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7885                         /* save our func */
7886                         int func = BP_FUNC(bp);
7887                         u32 swap_en;
7888                         u32 swap_val;
7889
7890                         /* clear the UNDI indication */
7891                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7892
7893                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7894
7895                         /* try unload UNDI on port 0 */
7896                         bp->func = 0;
7897                         bp->fw_seq =
7898                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7899                                 DRV_MSG_SEQ_NUMBER_MASK);
7900                         reset_code = bnx2x_fw_command(bp, reset_code);
7901
7902                         /* if UNDI is loaded on the other port */
7903                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7904
7905                                 /* send "DONE" for previous unload */
7906                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7907
7908                                 /* unload UNDI on port 1 */
7909                                 bp->func = 1;
7910                                 bp->fw_seq =
7911                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7912                                         DRV_MSG_SEQ_NUMBER_MASK);
7913                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7914
7915                                 bnx2x_fw_command(bp, reset_code);
7916                         }
7917
7918                         /* now it's safe to release the lock */
7919                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7920
7921                         bnx2x_undi_int_disable(bp, func);
7922
7923                         /* close input traffic and wait for it */
7924                         /* Do not rcv packets to BRB */
7925                         REG_WR(bp,
7926                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7927                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7928                         /* Do not direct rcv packets that are not for MCP to
7929                          * the BRB */
7930                         REG_WR(bp,
7931                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7932                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7933                         /* clear AEU */
7934                         REG_WR(bp,
7935                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7936                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7937                         msleep(10);
7938
7939                         /* save NIG port swap info */
7940                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7941                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7942                         /* reset device */
7943                         REG_WR(bp,
7944                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7945                                0xd3ffffff);
7946                         REG_WR(bp,
7947                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7948                                0x1403);
7949                         /* take the NIG out of reset and restore swap values */
7950                         REG_WR(bp,
7951                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7952                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7953                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7954                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7955
7956                         /* send unload done to the MCP */
7957                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7958
7959                         /* restore our func and fw_seq */
7960                         bp->func = func;
7961                         bp->fw_seq =
7962                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7963                                 DRV_MSG_SEQ_NUMBER_MASK);
7964
7965                 } else
7966                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7967         }
7968 }
7969
7970 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7971 {
7972         u32 val, val2, val3, val4, id;
7973         u16 pmc;
7974
7975         /* Get the chip revision id and number. */
7976         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7977         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7978         id = ((val & 0xffff) << 16);
7979         val = REG_RD(bp, MISC_REG_CHIP_REV);
7980         id |= ((val & 0xf) << 12);
7981         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7982         id |= ((val & 0xff) << 4);
7983         val = REG_RD(bp, MISC_REG_BOND_ID);
7984         id |= (val & 0xf);
7985         bp->common.chip_id = id;
7986         bp->link_params.chip_id = bp->common.chip_id;
7987         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7988
7989         val = (REG_RD(bp, 0x2874) & 0x55);
7990         if ((bp->common.chip_id & 0x1) ||
7991             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7992                 bp->flags |= ONE_PORT_FLAG;
7993                 BNX2X_DEV_INFO("single port device\n");
7994         }
7995
7996         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7997         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7998                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7999         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8000                        bp->common.flash_size, bp->common.flash_size);
8001
8002         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8003         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8004         bp->link_params.shmem_base = bp->common.shmem_base;
8005         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8006                        bp->common.shmem_base, bp->common.shmem2_base);
8007
8008         if (!bp->common.shmem_base ||
8009             (bp->common.shmem_base < 0xA0000) ||
8010             (bp->common.shmem_base >= 0xC0000)) {
8011                 BNX2X_DEV_INFO("MCP not active\n");
8012                 bp->flags |= NO_MCP_FLAG;
8013                 return;
8014         }
8015
8016         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8017         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8018                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8019                 BNX2X_ERR("BAD MCP validity signature\n");
8020
8021         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8022         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8023
8024         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8025                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8026                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8027
8028         bp->link_params.feature_config_flags = 0;
8029         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8030         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8031                 bp->link_params.feature_config_flags |=
8032                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8033         else
8034                 bp->link_params.feature_config_flags &=
8035                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8036
8037         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8038         bp->common.bc_ver = val;
8039         BNX2X_DEV_INFO("bc_ver %X\n", val);
8040         if (val < BNX2X_BC_VER) {
8041                 /* for now only warn
8042                  * later we might need to enforce this */
8043                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8044                           " please upgrade BC\n", BNX2X_BC_VER, val);
8045         }
8046         bp->link_params.feature_config_flags |=
8047                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8048                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8049
8050         if (BP_E1HVN(bp) == 0) {
8051                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8052                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8053         } else {
8054                 /* no WOL capability for E1HVN != 0 */
8055                 bp->flags |= NO_WOL_FLAG;
8056         }
8057         BNX2X_DEV_INFO("%sWoL capable\n",
8058                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8059
8060         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8061         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8062         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8063         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8064
8065         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8066                val, val2, val3, val4);
8067 }
8068
8069 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8070                                                     u32 switch_cfg)
8071 {
8072         int port = BP_PORT(bp);
8073         u32 ext_phy_type;
8074
8075         switch (switch_cfg) {
8076         case SWITCH_CFG_1G:
8077                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8078
8079                 ext_phy_type =
8080                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8081                 switch (ext_phy_type) {
8082                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8083                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8084                                        ext_phy_type);
8085
8086                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8087                                                SUPPORTED_10baseT_Full |
8088                                                SUPPORTED_100baseT_Half |
8089                                                SUPPORTED_100baseT_Full |
8090                                                SUPPORTED_1000baseT_Full |
8091                                                SUPPORTED_2500baseX_Full |
8092                                                SUPPORTED_TP |
8093                                                SUPPORTED_FIBRE |
8094                                                SUPPORTED_Autoneg |
8095                                                SUPPORTED_Pause |
8096                                                SUPPORTED_Asym_Pause);
8097                         break;
8098
8099                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8100                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8101                                        ext_phy_type);
8102
8103                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8104                                                SUPPORTED_10baseT_Full |
8105                                                SUPPORTED_100baseT_Half |
8106                                                SUPPORTED_100baseT_Full |
8107                                                SUPPORTED_1000baseT_Full |
8108                                                SUPPORTED_TP |
8109                                                SUPPORTED_FIBRE |
8110                                                SUPPORTED_Autoneg |
8111                                                SUPPORTED_Pause |
8112                                                SUPPORTED_Asym_Pause);
8113                         break;
8114
8115                 default:
8116                         BNX2X_ERR("NVRAM config error. "
8117                                   "BAD SerDes ext_phy_config 0x%x\n",
8118                                   bp->link_params.ext_phy_config);
8119                         return;
8120                 }
8121
8122                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8123                                            port*0x10);
8124                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8125                 break;
8126
8127         case SWITCH_CFG_10G:
8128                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8129
8130                 ext_phy_type =
8131                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8132                 switch (ext_phy_type) {
8133                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8134                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8135                                        ext_phy_type);
8136
8137                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8138                                                SUPPORTED_10baseT_Full |
8139                                                SUPPORTED_100baseT_Half |
8140                                                SUPPORTED_100baseT_Full |
8141                                                SUPPORTED_1000baseT_Full |
8142                                                SUPPORTED_2500baseX_Full |
8143                                                SUPPORTED_10000baseT_Full |
8144                                                SUPPORTED_TP |
8145                                                SUPPORTED_FIBRE |
8146                                                SUPPORTED_Autoneg |
8147                                                SUPPORTED_Pause |
8148                                                SUPPORTED_Asym_Pause);
8149                         break;
8150
8151                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8152                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8153                                        ext_phy_type);
8154
8155                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8156                                                SUPPORTED_1000baseT_Full |
8157                                                SUPPORTED_FIBRE |
8158                                                SUPPORTED_Autoneg |
8159                                                SUPPORTED_Pause |
8160                                                SUPPORTED_Asym_Pause);
8161                         break;
8162
8163                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8164                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8165                                        ext_phy_type);
8166
8167                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8168                                                SUPPORTED_2500baseX_Full |
8169                                                SUPPORTED_1000baseT_Full |
8170                                                SUPPORTED_FIBRE |
8171                                                SUPPORTED_Autoneg |
8172                                                SUPPORTED_Pause |
8173                                                SUPPORTED_Asym_Pause);
8174                         break;
8175
8176                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8177                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8178                                        ext_phy_type);
8179
8180                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8181                                                SUPPORTED_FIBRE |
8182                                                SUPPORTED_Pause |
8183                                                SUPPORTED_Asym_Pause);
8184                         break;
8185
8186                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8188                                        ext_phy_type);
8189
8190                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8191                                                SUPPORTED_1000baseT_Full |
8192                                                SUPPORTED_FIBRE |
8193                                                SUPPORTED_Pause |
8194                                                SUPPORTED_Asym_Pause);
8195                         break;
8196
8197                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8198                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8199                                        ext_phy_type);
8200
8201                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8202                                                SUPPORTED_1000baseT_Full |
8203                                                SUPPORTED_Autoneg |
8204                                                SUPPORTED_FIBRE |
8205                                                SUPPORTED_Pause |
8206                                                SUPPORTED_Asym_Pause);
8207                         break;
8208
8209                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8210                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8211                                        ext_phy_type);
8212
8213                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8214                                                SUPPORTED_1000baseT_Full |
8215                                                SUPPORTED_Autoneg |
8216                                                SUPPORTED_FIBRE |
8217                                                SUPPORTED_Pause |
8218                                                SUPPORTED_Asym_Pause);
8219                         break;
8220
8221                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8222                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8223                                        ext_phy_type);
8224
8225                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8226                                                SUPPORTED_TP |
8227                                                SUPPORTED_Autoneg |
8228                                                SUPPORTED_Pause |
8229                                                SUPPORTED_Asym_Pause);
8230                         break;
8231
8232                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8233                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8234                                        ext_phy_type);
8235
8236                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8237                                                SUPPORTED_10baseT_Full |
8238                                                SUPPORTED_100baseT_Half |
8239                                                SUPPORTED_100baseT_Full |
8240                                                SUPPORTED_1000baseT_Full |
8241                                                SUPPORTED_10000baseT_Full |
8242                                                SUPPORTED_TP |
8243                                                SUPPORTED_Autoneg |
8244                                                SUPPORTED_Pause |
8245                                                SUPPORTED_Asym_Pause);
8246                         break;
8247
8248                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8249                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8250                                   bp->link_params.ext_phy_config);
8251                         break;
8252
8253                 default:
8254                         BNX2X_ERR("NVRAM config error. "
8255                                   "BAD XGXS ext_phy_config 0x%x\n",
8256                                   bp->link_params.ext_phy_config);
8257                         return;
8258                 }
8259
8260                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8261                                            port*0x18);
8262                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8263
8264                 break;
8265
8266         default:
8267                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8268                           bp->port.link_config);
8269                 return;
8270         }
8271         bp->link_params.phy_addr = bp->port.phy_addr;
8272
8273         /* mask what we support according to speed_cap_mask */
8274         if (!(bp->link_params.speed_cap_mask &
8275                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8276                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8277
8278         if (!(bp->link_params.speed_cap_mask &
8279                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8280                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8281
8282         if (!(bp->link_params.speed_cap_mask &
8283                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8284                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8285
8286         if (!(bp->link_params.speed_cap_mask &
8287                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8288                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8289
8290         if (!(bp->link_params.speed_cap_mask &
8291                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8292                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8293                                         SUPPORTED_1000baseT_Full);
8294
8295         if (!(bp->link_params.speed_cap_mask &
8296                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8297                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8298
8299         if (!(bp->link_params.speed_cap_mask &
8300                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8301                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8302
8303         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8304 }
8305
8306 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8307 {
8308         bp->link_params.req_duplex = DUPLEX_FULL;
8309
8310         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8311         case PORT_FEATURE_LINK_SPEED_AUTO:
8312                 if (bp->port.supported & SUPPORTED_Autoneg) {
8313                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8314                         bp->port.advertising = bp->port.supported;
8315                 } else {
8316                         u32 ext_phy_type =
8317                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8318
8319                         if ((ext_phy_type ==
8320                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8321                             (ext_phy_type ==
8322                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8323                                 /* force 10G, no AN */
8324                                 bp->link_params.req_line_speed = SPEED_10000;
8325                                 bp->port.advertising =
8326                                                 (ADVERTISED_10000baseT_Full |
8327                                                  ADVERTISED_FIBRE);
8328                                 break;
8329                         }
8330                         BNX2X_ERR("NVRAM config error. "
8331                                   "Invalid link_config 0x%x"
8332                                   "  Autoneg not supported\n",
8333                                   bp->port.link_config);
8334                         return;
8335                 }
8336                 break;
8337
8338         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8339                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8340                         bp->link_params.req_line_speed = SPEED_10;
8341                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8342                                                 ADVERTISED_TP);
8343                 } else {
8344                         BNX2X_ERR("NVRAM config error. "
8345                                   "Invalid link_config 0x%x"
8346                                   "  speed_cap_mask 0x%x\n",
8347                                   bp->port.link_config,
8348                                   bp->link_params.speed_cap_mask);
8349                         return;
8350                 }
8351                 break;
8352
8353         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8354                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8355                         bp->link_params.req_line_speed = SPEED_10;
8356                         bp->link_params.req_duplex = DUPLEX_HALF;
8357                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8358                                                 ADVERTISED_TP);
8359                 } else {
8360                         BNX2X_ERR("NVRAM config error. "
8361                                   "Invalid link_config 0x%x"
8362                                   "  speed_cap_mask 0x%x\n",
8363                                   bp->port.link_config,
8364                                   bp->link_params.speed_cap_mask);
8365                         return;
8366                 }
8367                 break;
8368
8369         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8370                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8371                         bp->link_params.req_line_speed = SPEED_100;
8372                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8373                                                 ADVERTISED_TP);
8374                 } else {
8375                         BNX2X_ERR("NVRAM config error. "
8376                                   "Invalid link_config 0x%x"
8377                                   "  speed_cap_mask 0x%x\n",
8378                                   bp->port.link_config,
8379                                   bp->link_params.speed_cap_mask);
8380                         return;
8381                 }
8382                 break;
8383
8384         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8385                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8386                         bp->link_params.req_line_speed = SPEED_100;
8387                         bp->link_params.req_duplex = DUPLEX_HALF;
8388                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8389                                                 ADVERTISED_TP);
8390                 } else {
8391                         BNX2X_ERR("NVRAM config error. "
8392                                   "Invalid link_config 0x%x"
8393                                   "  speed_cap_mask 0x%x\n",
8394                                   bp->port.link_config,
8395                                   bp->link_params.speed_cap_mask);
8396                         return;
8397                 }
8398                 break;
8399
8400         case PORT_FEATURE_LINK_SPEED_1G:
8401                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8402                         bp->link_params.req_line_speed = SPEED_1000;
8403                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8404                                                 ADVERTISED_TP);
8405                 } else {
8406                         BNX2X_ERR("NVRAM config error. "
8407                                   "Invalid link_config 0x%x"
8408                                   "  speed_cap_mask 0x%x\n",
8409                                   bp->port.link_config,
8410                                   bp->link_params.speed_cap_mask);
8411                         return;
8412                 }
8413                 break;
8414
8415         case PORT_FEATURE_LINK_SPEED_2_5G:
8416                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8417                         bp->link_params.req_line_speed = SPEED_2500;
8418                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8419                                                 ADVERTISED_TP);
8420                 } else {
8421                         BNX2X_ERR("NVRAM config error. "
8422                                   "Invalid link_config 0x%x"
8423                                   "  speed_cap_mask 0x%x\n",
8424                                   bp->port.link_config,
8425                                   bp->link_params.speed_cap_mask);
8426                         return;
8427                 }
8428                 break;
8429
8430         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8431         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8432         case PORT_FEATURE_LINK_SPEED_10G_KR:
8433                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8434                         bp->link_params.req_line_speed = SPEED_10000;
8435                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8436                                                 ADVERTISED_FIBRE);
8437                 } else {
8438                         BNX2X_ERR("NVRAM config error. "
8439                                   "Invalid link_config 0x%x"
8440                                   "  speed_cap_mask 0x%x\n",
8441                                   bp->port.link_config,
8442                                   bp->link_params.speed_cap_mask);
8443                         return;
8444                 }
8445                 break;
8446
8447         default:
8448                 BNX2X_ERR("NVRAM config error. "
8449                           "BAD link speed link_config 0x%x\n",
8450                           bp->port.link_config);
8451                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8452                 bp->port.advertising = bp->port.supported;
8453                 break;
8454         }
8455
8456         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8457                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8458         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8459             !(bp->port.supported & SUPPORTED_Autoneg))
8460                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8461
8462         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8463                        "  advertising 0x%x\n",
8464                        bp->link_params.req_line_speed,
8465                        bp->link_params.req_duplex,
8466                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8467 }
8468
8469 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8470 {
8471         int port = BP_PORT(bp);
8472         u32 val, val2;
8473         u32 config;
8474         u16 i;
8475         u32 ext_phy_type;
8476
8477         bp->link_params.bp = bp;
8478         bp->link_params.port = port;
8479
8480         bp->link_params.lane_config =
8481                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8482         bp->link_params.ext_phy_config =
8483                 SHMEM_RD(bp,
8484                          dev_info.port_hw_config[port].external_phy_config);
8485         /* BCM8727_NOC => BCM8727 no over current */
8486         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8487             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8488                 bp->link_params.ext_phy_config &=
8489                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8490                 bp->link_params.ext_phy_config |=
8491                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8492                 bp->link_params.feature_config_flags |=
8493                         FEATURE_CONFIG_BCM8727_NOC;
8494         }
8495
8496         bp->link_params.speed_cap_mask =
8497                 SHMEM_RD(bp,
8498                          dev_info.port_hw_config[port].speed_capability_mask);
8499
8500         bp->port.link_config =
8501                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8502
8503         /* Get the 4 lanes xgxs config rx and tx */
8504         for (i = 0; i < 2; i++) {
8505                 val = SHMEM_RD(bp,
8506                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8507                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8508                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8509
8510                 val = SHMEM_RD(bp,
8511                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8512                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8513                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8514         }
8515
8516         /* If the device is capable of WoL, set the default state according
8517          * to the HW
8518          */
8519         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8520         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8521                    (config & PORT_FEATURE_WOL_ENABLED));
8522
8523         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8524                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8525                        bp->link_params.lane_config,
8526                        bp->link_params.ext_phy_config,
8527                        bp->link_params.speed_cap_mask, bp->port.link_config);
8528
8529         bp->link_params.switch_cfg |= (bp->port.link_config &
8530                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8531         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8532
8533         bnx2x_link_settings_requested(bp);
8534
8535         /*
8536          * If connected directly, work with the internal PHY, otherwise, work
8537          * with the external PHY
8538          */
8539         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8540         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8541                 bp->mdio.prtad = bp->link_params.phy_addr;
8542
8543         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8544                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8545                 bp->mdio.prtad =
8546                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8547
8548         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8549         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8550         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8551         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8552         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8553         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8554         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8555         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8556         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8557         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8558 }
8559
8560 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8561 {
8562         int func = BP_FUNC(bp);
8563         u32 val, val2;
8564         int rc = 0;
8565
8566         bnx2x_get_common_hwinfo(bp);
8567
8568         bp->e1hov = 0;
8569         bp->e1hmf = 0;
8570         if (CHIP_IS_E1H(bp)) {
8571                 bp->mf_config =
8572                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8573
8574                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8575                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8576                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8577                         bp->e1hmf = 1;
8578                 BNX2X_DEV_INFO("%s function mode\n",
8579                                IS_E1HMF(bp) ? "multi" : "single");
8580
8581                 if (IS_E1HMF(bp)) {
8582                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8583                                                                 e1hov_tag) &
8584                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8585                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8586                                 bp->e1hov = val;
8587                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8588                                                "(0x%04x)\n",
8589                                                func, bp->e1hov, bp->e1hov);
8590                         } else {
8591                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8592                                           "  aborting\n", func);
8593                                 rc = -EPERM;
8594                         }
8595                 } else {
8596                         if (BP_E1HVN(bp)) {
8597                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8598                                           "  aborting\n", BP_E1HVN(bp));
8599                                 rc = -EPERM;
8600                         }
8601                 }
8602         }
8603
8604         if (!BP_NOMCP(bp)) {
8605                 bnx2x_get_port_hwinfo(bp);
8606
8607                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8608                               DRV_MSG_SEQ_NUMBER_MASK);
8609                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8610         }
8611
8612         if (IS_E1HMF(bp)) {
8613                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8614                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8615                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8616                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8617                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8618                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8619                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8620                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8621                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8622                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8623                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8624                                ETH_ALEN);
8625                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8626                                ETH_ALEN);
8627                 }
8628
8629                 return rc;
8630         }
8631
8632         if (BP_NOMCP(bp)) {
8633                 /* only supposed to happen on emulation/FPGA */
8634                 BNX2X_ERR("warning random MAC workaround active\n");
8635                 random_ether_addr(bp->dev->dev_addr);
8636                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8637         }
8638
8639         return rc;
8640 }
8641
8642 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8643 {
8644         int func = BP_FUNC(bp);
8645         int timer_interval;
8646         int rc;
8647
8648         /* Disable interrupt handling until HW is initialized */
8649         atomic_set(&bp->intr_sem, 1);
8650         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8651
8652         mutex_init(&bp->port.phy_mutex);
8653
8654         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8655         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8656
8657         rc = bnx2x_get_hwinfo(bp);
8658
8659         /* need to reset chip if undi was active */
8660         if (!BP_NOMCP(bp))
8661                 bnx2x_undi_unload(bp);
8662
8663         if (CHIP_REV_IS_FPGA(bp))
8664                 printk(KERN_ERR PFX "FPGA detected\n");
8665
8666         if (BP_NOMCP(bp) && (func == 0))
8667                 printk(KERN_ERR PFX
8668                        "MCP disabled, must load devices in order!\n");
8669
8670         /* Set multi queue mode */
8671         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8672             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8673                 printk(KERN_ERR PFX
8674                       "Multi disabled since int_mode requested is not MSI-X\n");
8675                 multi_mode = ETH_RSS_MODE_DISABLED;
8676         }
8677         bp->multi_mode = multi_mode;
8678
8679
8680         /* Set TPA flags */
8681         if (disable_tpa) {
8682                 bp->flags &= ~TPA_ENABLE_FLAG;
8683                 bp->dev->features &= ~NETIF_F_LRO;
8684         } else {
8685                 bp->flags |= TPA_ENABLE_FLAG;
8686                 bp->dev->features |= NETIF_F_LRO;
8687         }
8688
8689         if (CHIP_IS_E1(bp))
8690                 bp->dropless_fc = 0;
8691         else
8692                 bp->dropless_fc = dropless_fc;
8693
8694         bp->mrrs = mrrs;
8695
8696         bp->tx_ring_size = MAX_TX_AVAIL;
8697         bp->rx_ring_size = MAX_RX_AVAIL;
8698
8699         bp->rx_csum = 1;
8700
8701         bp->tx_ticks = 50;
8702         bp->rx_ticks = 25;
8703
8704         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8705         bp->current_interval = (poll ? poll : timer_interval);
8706
8707         init_timer(&bp->timer);
8708         bp->timer.expires = jiffies + bp->current_interval;
8709         bp->timer.data = (unsigned long) bp;
8710         bp->timer.function = bnx2x_timer;
8711
8712         return rc;
8713 }
8714
8715 /*
8716  * ethtool service functions
8717  */
8718
8719 /* All ethtool functions called with rtnl_lock */
8720
8721 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8722 {
8723         struct bnx2x *bp = netdev_priv(dev);
8724
8725         cmd->supported = bp->port.supported;
8726         cmd->advertising = bp->port.advertising;
8727
8728         if (netif_carrier_ok(dev)) {
8729                 cmd->speed = bp->link_vars.line_speed;
8730                 cmd->duplex = bp->link_vars.duplex;
8731         } else {
8732                 cmd->speed = bp->link_params.req_line_speed;
8733                 cmd->duplex = bp->link_params.req_duplex;
8734         }
8735         if (IS_E1HMF(bp)) {
8736                 u16 vn_max_rate;
8737
8738                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8739                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8740                 if (vn_max_rate < cmd->speed)
8741                         cmd->speed = vn_max_rate;
8742         }
8743
8744         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8745                 u32 ext_phy_type =
8746                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8747
8748                 switch (ext_phy_type) {
8749                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8750                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8751                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8752                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8753                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8754                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8755                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8756                         cmd->port = PORT_FIBRE;
8757                         break;
8758
8759                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8760                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8761                         cmd->port = PORT_TP;
8762                         break;
8763
8764                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8765                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8766                                   bp->link_params.ext_phy_config);
8767                         break;
8768
8769                 default:
8770                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8771                            bp->link_params.ext_phy_config);
8772                         break;
8773                 }
8774         } else
8775                 cmd->port = PORT_TP;
8776
8777         cmd->phy_address = bp->mdio.prtad;
8778         cmd->transceiver = XCVR_INTERNAL;
8779
8780         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8781                 cmd->autoneg = AUTONEG_ENABLE;
8782         else
8783                 cmd->autoneg = AUTONEG_DISABLE;
8784
8785         cmd->maxtxpkt = 0;
8786         cmd->maxrxpkt = 0;
8787
8788         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8789            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8790            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8791            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8792            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8793            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8794            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8795
8796         return 0;
8797 }
8798
8799 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8800 {
8801         struct bnx2x *bp = netdev_priv(dev);
8802         u32 advertising;
8803
8804         if (IS_E1HMF(bp))
8805                 return 0;
8806
8807         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8808            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8809            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8810            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8811            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8812            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8813            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8814
8815         if (cmd->autoneg == AUTONEG_ENABLE) {
8816                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8817                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8818                         return -EINVAL;
8819                 }
8820
8821                 /* advertise the requested speed and duplex if supported */
8822                 cmd->advertising &= bp->port.supported;
8823
8824                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8825                 bp->link_params.req_duplex = DUPLEX_FULL;
8826                 bp->port.advertising |= (ADVERTISED_Autoneg |
8827                                          cmd->advertising);
8828
8829         } else { /* forced speed */
8830                 /* advertise the requested speed and duplex if supported */
8831                 switch (cmd->speed) {
8832                 case SPEED_10:
8833                         if (cmd->duplex == DUPLEX_FULL) {
8834                                 if (!(bp->port.supported &
8835                                       SUPPORTED_10baseT_Full)) {
8836                                         DP(NETIF_MSG_LINK,
8837                                            "10M full not supported\n");
8838                                         return -EINVAL;
8839                                 }
8840
8841                                 advertising = (ADVERTISED_10baseT_Full |
8842                                                ADVERTISED_TP);
8843                         } else {
8844                                 if (!(bp->port.supported &
8845                                       SUPPORTED_10baseT_Half)) {
8846                                         DP(NETIF_MSG_LINK,
8847                                            "10M half not supported\n");
8848                                         return -EINVAL;
8849                                 }
8850
8851                                 advertising = (ADVERTISED_10baseT_Half |
8852                                                ADVERTISED_TP);
8853                         }
8854                         break;
8855
8856                 case SPEED_100:
8857                         if (cmd->duplex == DUPLEX_FULL) {
8858                                 if (!(bp->port.supported &
8859                                                 SUPPORTED_100baseT_Full)) {
8860                                         DP(NETIF_MSG_LINK,
8861                                            "100M full not supported\n");
8862                                         return -EINVAL;
8863                                 }
8864
8865                                 advertising = (ADVERTISED_100baseT_Full |
8866                                                ADVERTISED_TP);
8867                         } else {
8868                                 if (!(bp->port.supported &
8869                                                 SUPPORTED_100baseT_Half)) {
8870                                         DP(NETIF_MSG_LINK,
8871                                            "100M half not supported\n");
8872                                         return -EINVAL;
8873                                 }
8874
8875                                 advertising = (ADVERTISED_100baseT_Half |
8876                                                ADVERTISED_TP);
8877                         }
8878                         break;
8879
8880                 case SPEED_1000:
8881                         if (cmd->duplex != DUPLEX_FULL) {
8882                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8883                                 return -EINVAL;
8884                         }
8885
8886                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8887                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8888                                 return -EINVAL;
8889                         }
8890
8891                         advertising = (ADVERTISED_1000baseT_Full |
8892                                        ADVERTISED_TP);
8893                         break;
8894
8895                 case SPEED_2500:
8896                         if (cmd->duplex != DUPLEX_FULL) {
8897                                 DP(NETIF_MSG_LINK,
8898                                    "2.5G half not supported\n");
8899                                 return -EINVAL;
8900                         }
8901
8902                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8903                                 DP(NETIF_MSG_LINK,
8904                                    "2.5G full not supported\n");
8905                                 return -EINVAL;
8906                         }
8907
8908                         advertising = (ADVERTISED_2500baseX_Full |
8909                                        ADVERTISED_TP);
8910                         break;
8911
8912                 case SPEED_10000:
8913                         if (cmd->duplex != DUPLEX_FULL) {
8914                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8915                                 return -EINVAL;
8916                         }
8917
8918                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8919                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8920                                 return -EINVAL;
8921                         }
8922
8923                         advertising = (ADVERTISED_10000baseT_Full |
8924                                        ADVERTISED_FIBRE);
8925                         break;
8926
8927                 default:
8928                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8929                         return -EINVAL;
8930                 }
8931
8932                 bp->link_params.req_line_speed = cmd->speed;
8933                 bp->link_params.req_duplex = cmd->duplex;
8934                 bp->port.advertising = advertising;
8935         }
8936
8937         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8938            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8939            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8940            bp->port.advertising);
8941
8942         if (netif_running(dev)) {
8943                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8944                 bnx2x_link_set(bp);
8945         }
8946
8947         return 0;
8948 }
8949
8950 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8951 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8952
8953 static int bnx2x_get_regs_len(struct net_device *dev)
8954 {
8955         struct bnx2x *bp = netdev_priv(dev);
8956         int regdump_len = 0;
8957         int i;
8958
8959         if (CHIP_IS_E1(bp)) {
8960                 for (i = 0; i < REGS_COUNT; i++)
8961                         if (IS_E1_ONLINE(reg_addrs[i].info))
8962                                 regdump_len += reg_addrs[i].size;
8963
8964                 for (i = 0; i < WREGS_COUNT_E1; i++)
8965                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8966                                 regdump_len += wreg_addrs_e1[i].size *
8967                                         (1 + wreg_addrs_e1[i].read_regs_count);
8968
8969         } else { /* E1H */
8970                 for (i = 0; i < REGS_COUNT; i++)
8971                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8972                                 regdump_len += reg_addrs[i].size;
8973
8974                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8975                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8976                                 regdump_len += wreg_addrs_e1h[i].size *
8977                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8978         }
8979         regdump_len *= 4;
8980         regdump_len += sizeof(struct dump_hdr);
8981
8982         return regdump_len;
8983 }
8984
8985 static void bnx2x_get_regs(struct net_device *dev,
8986                            struct ethtool_regs *regs, void *_p)
8987 {
8988         u32 *p = _p, i, j;
8989         struct bnx2x *bp = netdev_priv(dev);
8990         struct dump_hdr dump_hdr = {0};
8991
8992         regs->version = 0;
8993         memset(p, 0, regs->len);
8994
8995         if (!netif_running(bp->dev))
8996                 return;
8997
8998         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8999         dump_hdr.dump_sign = dump_sign_all;
9000         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9001         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9002         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9003         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9004         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9005
9006         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9007         p += dump_hdr.hdr_size + 1;
9008
9009         if (CHIP_IS_E1(bp)) {
9010                 for (i = 0; i < REGS_COUNT; i++)
9011                         if (IS_E1_ONLINE(reg_addrs[i].info))
9012                                 for (j = 0; j < reg_addrs[i].size; j++)
9013                                         *p++ = REG_RD(bp,
9014                                                       reg_addrs[i].addr + j*4);
9015
9016         } else { /* E1H */
9017                 for (i = 0; i < REGS_COUNT; i++)
9018                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9019                                 for (j = 0; j < reg_addrs[i].size; j++)
9020                                         *p++ = REG_RD(bp,
9021                                                       reg_addrs[i].addr + j*4);
9022         }
9023 }
9024
9025 #define PHY_FW_VER_LEN                  10
9026
9027 static void bnx2x_get_drvinfo(struct net_device *dev,
9028                               struct ethtool_drvinfo *info)
9029 {
9030         struct bnx2x *bp = netdev_priv(dev);
9031         u8 phy_fw_ver[PHY_FW_VER_LEN];
9032
9033         strcpy(info->driver, DRV_MODULE_NAME);
9034         strcpy(info->version, DRV_MODULE_VERSION);
9035
9036         phy_fw_ver[0] = '\0';
9037         if (bp->port.pmf) {
9038                 bnx2x_acquire_phy_lock(bp);
9039                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9040                                              (bp->state != BNX2X_STATE_CLOSED),
9041                                              phy_fw_ver, PHY_FW_VER_LEN);
9042                 bnx2x_release_phy_lock(bp);
9043         }
9044
9045         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9046                  (bp->common.bc_ver & 0xff0000) >> 16,
9047                  (bp->common.bc_ver & 0xff00) >> 8,
9048                  (bp->common.bc_ver & 0xff),
9049                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9050         strcpy(info->bus_info, pci_name(bp->pdev));
9051         info->n_stats = BNX2X_NUM_STATS;
9052         info->testinfo_len = BNX2X_NUM_TESTS;
9053         info->eedump_len = bp->common.flash_size;
9054         info->regdump_len = bnx2x_get_regs_len(dev);
9055 }
9056
9057 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9058 {
9059         struct bnx2x *bp = netdev_priv(dev);
9060
9061         if (bp->flags & NO_WOL_FLAG) {
9062                 wol->supported = 0;
9063                 wol->wolopts = 0;
9064         } else {
9065                 wol->supported = WAKE_MAGIC;
9066                 if (bp->wol)
9067                         wol->wolopts = WAKE_MAGIC;
9068                 else
9069                         wol->wolopts = 0;
9070         }
9071         memset(&wol->sopass, 0, sizeof(wol->sopass));
9072 }
9073
9074 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9075 {
9076         struct bnx2x *bp = netdev_priv(dev);
9077
9078         if (wol->wolopts & ~WAKE_MAGIC)
9079                 return -EINVAL;
9080
9081         if (wol->wolopts & WAKE_MAGIC) {
9082                 if (bp->flags & NO_WOL_FLAG)
9083                         return -EINVAL;
9084
9085                 bp->wol = 1;
9086         } else
9087                 bp->wol = 0;
9088
9089         return 0;
9090 }
9091
9092 static u32 bnx2x_get_msglevel(struct net_device *dev)
9093 {
9094         struct bnx2x *bp = netdev_priv(dev);
9095
9096         return bp->msglevel;
9097 }
9098
9099 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9100 {
9101         struct bnx2x *bp = netdev_priv(dev);
9102
9103         if (capable(CAP_NET_ADMIN))
9104                 bp->msglevel = level;
9105 }
9106
9107 static int bnx2x_nway_reset(struct net_device *dev)
9108 {
9109         struct bnx2x *bp = netdev_priv(dev);
9110
9111         if (!bp->port.pmf)
9112                 return 0;
9113
9114         if (netif_running(dev)) {
9115                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9116                 bnx2x_link_set(bp);
9117         }
9118
9119         return 0;
9120 }
9121
9122 static u32
9123 bnx2x_get_link(struct net_device *dev)
9124 {
9125         struct bnx2x *bp = netdev_priv(dev);
9126
9127         return bp->link_vars.link_up;
9128 }
9129
9130 static int bnx2x_get_eeprom_len(struct net_device *dev)
9131 {
9132         struct bnx2x *bp = netdev_priv(dev);
9133
9134         return bp->common.flash_size;
9135 }
9136
9137 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9138 {
9139         int port = BP_PORT(bp);
9140         int count, i;
9141         u32 val = 0;
9142
9143         /* adjust timeout for emulation/FPGA */
9144         count = NVRAM_TIMEOUT_COUNT;
9145         if (CHIP_REV_IS_SLOW(bp))
9146                 count *= 100;
9147
9148         /* request access to nvram interface */
9149         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9150                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9151
9152         for (i = 0; i < count*10; i++) {
9153                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9154                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9155                         break;
9156
9157                 udelay(5);
9158         }
9159
9160         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9161                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9162                 return -EBUSY;
9163         }
9164
9165         return 0;
9166 }
9167
9168 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9169 {
9170         int port = BP_PORT(bp);
9171         int count, i;
9172         u32 val = 0;
9173
9174         /* adjust timeout for emulation/FPGA */
9175         count = NVRAM_TIMEOUT_COUNT;
9176         if (CHIP_REV_IS_SLOW(bp))
9177                 count *= 100;
9178
9179         /* relinquish nvram interface */
9180         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9181                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9182
9183         for (i = 0; i < count*10; i++) {
9184                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9185                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9186                         break;
9187
9188                 udelay(5);
9189         }
9190
9191         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9192                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9193                 return -EBUSY;
9194         }
9195
9196         return 0;
9197 }
9198
9199 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9200 {
9201         u32 val;
9202
9203         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9204
9205         /* enable both bits, even on read */
9206         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9207                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9208                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9209 }
9210
9211 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9212 {
9213         u32 val;
9214
9215         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9216
9217         /* disable both bits, even after read */
9218         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9219                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9220                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9221 }
9222
9223 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9224                                   u32 cmd_flags)
9225 {
9226         int count, i, rc;
9227         u32 val;
9228
9229         /* build the command word */
9230         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9231
9232         /* need to clear DONE bit separately */
9233         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9234
9235         /* address of the NVRAM to read from */
9236         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9237                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9238
9239         /* issue a read command */
9240         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9241
9242         /* adjust timeout for emulation/FPGA */
9243         count = NVRAM_TIMEOUT_COUNT;
9244         if (CHIP_REV_IS_SLOW(bp))
9245                 count *= 100;
9246
9247         /* wait for completion */
9248         *ret_val = 0;
9249         rc = -EBUSY;
9250         for (i = 0; i < count; i++) {
9251                 udelay(5);
9252                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9253
9254                 if (val & MCPR_NVM_COMMAND_DONE) {
9255                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9256                         /* we read nvram data in cpu order
9257                          * but ethtool sees it as an array of bytes
9258                          * converting to big-endian will do the work */
9259                         *ret_val = cpu_to_be32(val);
9260                         rc = 0;
9261                         break;
9262                 }
9263         }
9264
9265         return rc;
9266 }
9267
9268 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9269                             int buf_size)
9270 {
9271         int rc;
9272         u32 cmd_flags;
9273         __be32 val;
9274
9275         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9276                 DP(BNX2X_MSG_NVM,
9277                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9278                    offset, buf_size);
9279                 return -EINVAL;
9280         }
9281
9282         if (offset + buf_size > bp->common.flash_size) {
9283                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9284                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9285                    offset, buf_size, bp->common.flash_size);
9286                 return -EINVAL;
9287         }
9288
9289         /* request access to nvram interface */
9290         rc = bnx2x_acquire_nvram_lock(bp);
9291         if (rc)
9292                 return rc;
9293
9294         /* enable access to nvram interface */
9295         bnx2x_enable_nvram_access(bp);
9296
9297         /* read the first word(s) */
9298         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9299         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9300                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9301                 memcpy(ret_buf, &val, 4);
9302
9303                 /* advance to the next dword */
9304                 offset += sizeof(u32);
9305                 ret_buf += sizeof(u32);
9306                 buf_size -= sizeof(u32);
9307                 cmd_flags = 0;
9308         }
9309
9310         if (rc == 0) {
9311                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9312                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9313                 memcpy(ret_buf, &val, 4);
9314         }
9315
9316         /* disable access to nvram interface */
9317         bnx2x_disable_nvram_access(bp);
9318         bnx2x_release_nvram_lock(bp);
9319
9320         return rc;
9321 }
9322
9323 static int bnx2x_get_eeprom(struct net_device *dev,
9324                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9325 {
9326         struct bnx2x *bp = netdev_priv(dev);
9327         int rc;
9328
9329         if (!netif_running(dev))
9330                 return -EAGAIN;
9331
9332         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9333            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9334            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9335            eeprom->len, eeprom->len);
9336
9337         /* parameters already validated in ethtool_get_eeprom */
9338
9339         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9340
9341         return rc;
9342 }
9343
9344 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9345                                    u32 cmd_flags)
9346 {
9347         int count, i, rc;
9348
9349         /* build the command word */
9350         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9351
9352         /* need to clear DONE bit separately */
9353         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9354
9355         /* write the data */
9356         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9357
9358         /* address of the NVRAM to write to */
9359         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9360                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9361
9362         /* issue the write command */
9363         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9364
9365         /* adjust timeout for emulation/FPGA */
9366         count = NVRAM_TIMEOUT_COUNT;
9367         if (CHIP_REV_IS_SLOW(bp))
9368                 count *= 100;
9369
9370         /* wait for completion */
9371         rc = -EBUSY;
9372         for (i = 0; i < count; i++) {
9373                 udelay(5);
9374                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9375                 if (val & MCPR_NVM_COMMAND_DONE) {
9376                         rc = 0;
9377                         break;
9378                 }
9379         }
9380
9381         return rc;
9382 }
9383
9384 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9385
9386 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9387                               int buf_size)
9388 {
9389         int rc;
9390         u32 cmd_flags;
9391         u32 align_offset;
9392         __be32 val;
9393
9394         if (offset + buf_size > bp->common.flash_size) {
9395                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9396                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9397                    offset, buf_size, bp->common.flash_size);
9398                 return -EINVAL;
9399         }
9400
9401         /* request access to nvram interface */
9402         rc = bnx2x_acquire_nvram_lock(bp);
9403         if (rc)
9404                 return rc;
9405
9406         /* enable access to nvram interface */
9407         bnx2x_enable_nvram_access(bp);
9408
9409         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9410         align_offset = (offset & ~0x03);
9411         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9412
9413         if (rc == 0) {
9414                 val &= ~(0xff << BYTE_OFFSET(offset));
9415                 val |= (*data_buf << BYTE_OFFSET(offset));
9416
9417                 /* nvram data is returned as an array of bytes
9418                  * convert it back to cpu order */
9419                 val = be32_to_cpu(val);
9420
9421                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9422                                              cmd_flags);
9423         }
9424
9425         /* disable access to nvram interface */
9426         bnx2x_disable_nvram_access(bp);
9427         bnx2x_release_nvram_lock(bp);
9428
9429         return rc;
9430 }
9431
9432 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9433                              int buf_size)
9434 {
9435         int rc;
9436         u32 cmd_flags;
9437         u32 val;
9438         u32 written_so_far;
9439
9440         if (buf_size == 1)      /* ethtool */
9441                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9442
9443         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9444                 DP(BNX2X_MSG_NVM,
9445                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9446                    offset, buf_size);
9447                 return -EINVAL;
9448         }
9449
9450         if (offset + buf_size > bp->common.flash_size) {
9451                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9452                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9453                    offset, buf_size, bp->common.flash_size);
9454                 return -EINVAL;
9455         }
9456
9457         /* request access to nvram interface */
9458         rc = bnx2x_acquire_nvram_lock(bp);
9459         if (rc)
9460                 return rc;
9461
9462         /* enable access to nvram interface */
9463         bnx2x_enable_nvram_access(bp);
9464
9465         written_so_far = 0;
9466         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9467         while ((written_so_far < buf_size) && (rc == 0)) {
9468                 if (written_so_far == (buf_size - sizeof(u32)))
9469                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9470                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9471                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9472                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9473                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9474
9475                 memcpy(&val, data_buf, 4);
9476
9477                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9478
9479                 /* advance to the next dword */
9480                 offset += sizeof(u32);
9481                 data_buf += sizeof(u32);
9482                 written_so_far += sizeof(u32);
9483                 cmd_flags = 0;
9484         }
9485
9486         /* disable access to nvram interface */
9487         bnx2x_disable_nvram_access(bp);
9488         bnx2x_release_nvram_lock(bp);
9489
9490         return rc;
9491 }
9492
9493 static int bnx2x_set_eeprom(struct net_device *dev,
9494                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9495 {
9496         struct bnx2x *bp = netdev_priv(dev);
9497         int port = BP_PORT(bp);
9498         int rc = 0;
9499
9500         if (!netif_running(dev))
9501                 return -EAGAIN;
9502
9503         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9504            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9505            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9506            eeprom->len, eeprom->len);
9507
9508         /* parameters already validated in ethtool_set_eeprom */
9509
9510         /* PHY eeprom can be accessed only by the PMF */
9511         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9512             !bp->port.pmf)
9513                 return -EINVAL;
9514
9515         if (eeprom->magic == 0x50485950) {
9516                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9517                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9518
9519                 bnx2x_acquire_phy_lock(bp);
9520                 rc |= bnx2x_link_reset(&bp->link_params,
9521                                        &bp->link_vars, 0);
9522                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9523                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9524                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9525                                        MISC_REGISTERS_GPIO_HIGH, port);
9526                 bnx2x_release_phy_lock(bp);
9527                 bnx2x_link_report(bp);
9528
9529         } else if (eeprom->magic == 0x50485952) {
9530                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9531                 if ((bp->state == BNX2X_STATE_OPEN) ||
9532                     (bp->state == BNX2X_STATE_DISABLED)) {
9533                         bnx2x_acquire_phy_lock(bp);
9534                         rc |= bnx2x_link_reset(&bp->link_params,
9535                                                &bp->link_vars, 1);
9536
9537                         rc |= bnx2x_phy_init(&bp->link_params,
9538                                              &bp->link_vars);
9539                         bnx2x_release_phy_lock(bp);
9540                         bnx2x_calc_fc_adv(bp);
9541                 }
9542         } else if (eeprom->magic == 0x53985943) {
9543                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9544                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9545                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9546                         u8 ext_phy_addr =
9547                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9548
9549                         /* DSP Remove Download Mode */
9550                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9551                                        MISC_REGISTERS_GPIO_LOW, port);
9552
9553                         bnx2x_acquire_phy_lock(bp);
9554
9555                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9556
9557                         /* wait 0.5 sec to allow it to run */
9558                         msleep(500);
9559                         bnx2x_ext_phy_hw_reset(bp, port);
9560                         msleep(500);
9561                         bnx2x_release_phy_lock(bp);
9562                 }
9563         } else
9564                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9565
9566         return rc;
9567 }
9568
9569 static int bnx2x_get_coalesce(struct net_device *dev,
9570                               struct ethtool_coalesce *coal)
9571 {
9572         struct bnx2x *bp = netdev_priv(dev);
9573
9574         memset(coal, 0, sizeof(struct ethtool_coalesce));
9575
9576         coal->rx_coalesce_usecs = bp->rx_ticks;
9577         coal->tx_coalesce_usecs = bp->tx_ticks;
9578
9579         return 0;
9580 }
9581
9582 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9583 static int bnx2x_set_coalesce(struct net_device *dev,
9584                               struct ethtool_coalesce *coal)
9585 {
9586         struct bnx2x *bp = netdev_priv(dev);
9587
9588         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9589         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9590                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9591
9592         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9593         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9594                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9595
9596         if (netif_running(dev))
9597                 bnx2x_update_coalesce(bp);
9598
9599         return 0;
9600 }
9601
9602 static void bnx2x_get_ringparam(struct net_device *dev,
9603                                 struct ethtool_ringparam *ering)
9604 {
9605         struct bnx2x *bp = netdev_priv(dev);
9606
9607         ering->rx_max_pending = MAX_RX_AVAIL;
9608         ering->rx_mini_max_pending = 0;
9609         ering->rx_jumbo_max_pending = 0;
9610
9611         ering->rx_pending = bp->rx_ring_size;
9612         ering->rx_mini_pending = 0;
9613         ering->rx_jumbo_pending = 0;
9614
9615         ering->tx_max_pending = MAX_TX_AVAIL;
9616         ering->tx_pending = bp->tx_ring_size;
9617 }
9618
9619 static int bnx2x_set_ringparam(struct net_device *dev,
9620                                struct ethtool_ringparam *ering)
9621 {
9622         struct bnx2x *bp = netdev_priv(dev);
9623         int rc = 0;
9624
9625         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9626             (ering->tx_pending > MAX_TX_AVAIL) ||
9627             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9628                 return -EINVAL;
9629
9630         bp->rx_ring_size = ering->rx_pending;
9631         bp->tx_ring_size = ering->tx_pending;
9632
9633         if (netif_running(dev)) {
9634                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9635                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9636         }
9637
9638         return rc;
9639 }
9640
9641 static void bnx2x_get_pauseparam(struct net_device *dev,
9642                                  struct ethtool_pauseparam *epause)
9643 {
9644         struct bnx2x *bp = netdev_priv(dev);
9645
9646         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9647                            BNX2X_FLOW_CTRL_AUTO) &&
9648                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9649
9650         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9651                             BNX2X_FLOW_CTRL_RX);
9652         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9653                             BNX2X_FLOW_CTRL_TX);
9654
9655         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9656            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9657            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9658 }
9659
9660 static int bnx2x_set_pauseparam(struct net_device *dev,
9661                                 struct ethtool_pauseparam *epause)
9662 {
9663         struct bnx2x *bp = netdev_priv(dev);
9664
9665         if (IS_E1HMF(bp))
9666                 return 0;
9667
9668         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9669            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9670            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9671
9672         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9673
9674         if (epause->rx_pause)
9675                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9676
9677         if (epause->tx_pause)
9678                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9679
9680         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9681                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9682
9683         if (epause->autoneg) {
9684                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9685                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9686                         return -EINVAL;
9687                 }
9688
9689                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9690                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9691         }
9692
9693         DP(NETIF_MSG_LINK,
9694            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9695
9696         if (netif_running(dev)) {
9697                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9698                 bnx2x_link_set(bp);
9699         }
9700
9701         return 0;
9702 }
9703
9704 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9705 {
9706         struct bnx2x *bp = netdev_priv(dev);
9707         int changed = 0;
9708         int rc = 0;
9709
9710         /* TPA requires Rx CSUM offloading */
9711         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9712                 if (!(dev->features & NETIF_F_LRO)) {
9713                         dev->features |= NETIF_F_LRO;
9714                         bp->flags |= TPA_ENABLE_FLAG;
9715                         changed = 1;
9716                 }
9717
9718         } else if (dev->features & NETIF_F_LRO) {
9719                 dev->features &= ~NETIF_F_LRO;
9720                 bp->flags &= ~TPA_ENABLE_FLAG;
9721                 changed = 1;
9722         }
9723
9724         if (changed && netif_running(dev)) {
9725                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9726                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9727         }
9728
9729         return rc;
9730 }
9731
9732 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9733 {
9734         struct bnx2x *bp = netdev_priv(dev);
9735
9736         return bp->rx_csum;
9737 }
9738
9739 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9740 {
9741         struct bnx2x *bp = netdev_priv(dev);
9742         int rc = 0;
9743
9744         bp->rx_csum = data;
9745
9746         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9747            TPA'ed packets will be discarded due to wrong TCP CSUM */
9748         if (!data) {
9749                 u32 flags = ethtool_op_get_flags(dev);
9750
9751                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9752         }
9753
9754         return rc;
9755 }
9756
9757 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9758 {
9759         if (data) {
9760                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9761                 dev->features |= NETIF_F_TSO6;
9762         } else {
9763                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9764                 dev->features &= ~NETIF_F_TSO6;
9765         }
9766
9767         return 0;
9768 }
9769
9770 static const struct {
9771         char string[ETH_GSTRING_LEN];
9772 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9773         { "register_test (offline)" },
9774         { "memory_test (offline)" },
9775         { "loopback_test (offline)" },
9776         { "nvram_test (online)" },
9777         { "interrupt_test (online)" },
9778         { "link_test (online)" },
9779         { "idle check (online)" }
9780 };
9781
9782 static int bnx2x_self_test_count(struct net_device *dev)
9783 {
9784         return BNX2X_NUM_TESTS;
9785 }
9786
9787 static int bnx2x_test_registers(struct bnx2x *bp)
9788 {
9789         int idx, i, rc = -ENODEV;
9790         u32 wr_val = 0;
9791         int port = BP_PORT(bp);
9792         static const struct {
9793                 u32  offset0;
9794                 u32  offset1;
9795                 u32  mask;
9796         } reg_tbl[] = {
9797 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9798                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9799                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9800                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9801                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9802                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9803                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9804                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9805                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9806                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9807 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9808                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9809                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9810                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9811                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9812                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9813                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9814                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9815                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9816                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9817 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9818                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9819                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9820                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9821                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9822                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9823                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9824                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9825                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9826                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9827 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9828                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9829                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9830                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9831                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9832                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9833                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9834
9835                 { 0xffffffff, 0, 0x00000000 }
9836         };
9837
9838         if (!netif_running(bp->dev))
9839                 return rc;
9840
9841         /* Repeat the test twice:
9842            First by writing 0x00000000, second by writing 0xffffffff */
9843         for (idx = 0; idx < 2; idx++) {
9844
9845                 switch (idx) {
9846                 case 0:
9847                         wr_val = 0;
9848                         break;
9849                 case 1:
9850                         wr_val = 0xffffffff;
9851                         break;
9852                 }
9853
9854                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9855                         u32 offset, mask, save_val, val;
9856
9857                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9858                         mask = reg_tbl[i].mask;
9859
9860                         save_val = REG_RD(bp, offset);
9861
9862                         REG_WR(bp, offset, wr_val);
9863                         val = REG_RD(bp, offset);
9864
9865                         /* Restore the original register's value */
9866                         REG_WR(bp, offset, save_val);
9867
9868                         /* verify that value is as expected value */
9869                         if ((val & mask) != (wr_val & mask))
9870                                 goto test_reg_exit;
9871                 }
9872         }
9873
9874         rc = 0;
9875
9876 test_reg_exit:
9877         return rc;
9878 }
9879
9880 static int bnx2x_test_memory(struct bnx2x *bp)
9881 {
9882         int i, j, rc = -ENODEV;
9883         u32 val;
9884         static const struct {
9885                 u32 offset;
9886                 int size;
9887         } mem_tbl[] = {
9888                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9889                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9890                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9891                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9892                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9893                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9894                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9895
9896                 { 0xffffffff, 0 }
9897         };
9898         static const struct {
9899                 char *name;
9900                 u32 offset;
9901                 u32 e1_mask;
9902                 u32 e1h_mask;
9903         } prty_tbl[] = {
9904                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9905                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9906                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9907                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9908                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9909                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9910
9911                 { NULL, 0xffffffff, 0, 0 }
9912         };
9913
9914         if (!netif_running(bp->dev))
9915                 return rc;
9916
9917         /* Go through all the memories */
9918         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9919                 for (j = 0; j < mem_tbl[i].size; j++)
9920                         REG_RD(bp, mem_tbl[i].offset + j*4);
9921
9922         /* Check the parity status */
9923         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9924                 val = REG_RD(bp, prty_tbl[i].offset);
9925                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9926                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9927                         DP(NETIF_MSG_HW,
9928                            "%s is 0x%x\n", prty_tbl[i].name, val);
9929                         goto test_mem_exit;
9930                 }
9931         }
9932
9933         rc = 0;
9934
9935 test_mem_exit:
9936         return rc;
9937 }
9938
9939 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9940 {
9941         int cnt = 1000;
9942
9943         if (link_up)
9944                 while (bnx2x_link_test(bp) && cnt--)
9945                         msleep(10);
9946 }
9947
9948 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9949 {
9950         unsigned int pkt_size, num_pkts, i;
9951         struct sk_buff *skb;
9952         unsigned char *packet;
9953         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9954         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9955         u16 tx_start_idx, tx_idx;
9956         u16 rx_start_idx, rx_idx;
9957         u16 pkt_prod, bd_prod;
9958         struct sw_tx_bd *tx_buf;
9959         struct eth_tx_start_bd *tx_start_bd;
9960         struct eth_tx_parse_bd *pbd = NULL;
9961         dma_addr_t mapping;
9962         union eth_rx_cqe *cqe;
9963         u8 cqe_fp_flags;
9964         struct sw_rx_bd *rx_buf;
9965         u16 len;
9966         int rc = -ENODEV;
9967
9968         /* check the loopback mode */
9969         switch (loopback_mode) {
9970         case BNX2X_PHY_LOOPBACK:
9971                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9972                         return -EINVAL;
9973                 break;
9974         case BNX2X_MAC_LOOPBACK:
9975                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9976                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9977                 break;
9978         default:
9979                 return -EINVAL;
9980         }
9981
9982         /* prepare the loopback packet */
9983         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9984                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9985         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9986         if (!skb) {
9987                 rc = -ENOMEM;
9988                 goto test_loopback_exit;
9989         }
9990         packet = skb_put(skb, pkt_size);
9991         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9992         memset(packet + ETH_ALEN, 0, ETH_ALEN);
9993         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9994         for (i = ETH_HLEN; i < pkt_size; i++)
9995                 packet[i] = (unsigned char) (i & 0xff);
9996
9997         /* send the loopback packet */
9998         num_pkts = 0;
9999         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10000         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10001
10002         pkt_prod = fp_tx->tx_pkt_prod++;
10003         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10004         tx_buf->first_bd = fp_tx->tx_bd_prod;
10005         tx_buf->skb = skb;
10006         tx_buf->flags = 0;
10007
10008         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10009         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10010         mapping = pci_map_single(bp->pdev, skb->data,
10011                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10012         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10013         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10014         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10015         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10016         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10017         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10018         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10019                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10020
10021         /* turn on parsing and get a BD */
10022         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10023         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10024
10025         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10026
10027         wmb();
10028
10029         fp_tx->tx_db.data.prod += 2;
10030         barrier();
10031         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10032
10033         mmiowb();
10034
10035         num_pkts++;
10036         fp_tx->tx_bd_prod += 2; /* start + pbd */
10037         bp->dev->trans_start = jiffies;
10038
10039         udelay(100);
10040
10041         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10042         if (tx_idx != tx_start_idx + num_pkts)
10043                 goto test_loopback_exit;
10044
10045         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10046         if (rx_idx != rx_start_idx + num_pkts)
10047                 goto test_loopback_exit;
10048
10049         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10050         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10051         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10052                 goto test_loopback_rx_exit;
10053
10054         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10055         if (len != pkt_size)
10056                 goto test_loopback_rx_exit;
10057
10058         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10059         skb = rx_buf->skb;
10060         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10061         for (i = ETH_HLEN; i < pkt_size; i++)
10062                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10063                         goto test_loopback_rx_exit;
10064
10065         rc = 0;
10066
10067 test_loopback_rx_exit:
10068
10069         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10070         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10071         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10072         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10073
10074         /* Update producers */
10075         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10076                              fp_rx->rx_sge_prod);
10077
10078 test_loopback_exit:
10079         bp->link_params.loopback_mode = LOOPBACK_NONE;
10080
10081         return rc;
10082 }
10083
10084 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10085 {
10086         int rc = 0, res;
10087
10088         if (!netif_running(bp->dev))
10089                 return BNX2X_LOOPBACK_FAILED;
10090
10091         bnx2x_netif_stop(bp, 1);
10092         bnx2x_acquire_phy_lock(bp);
10093
10094         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10095         if (res) {
10096                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10097                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10098         }
10099
10100         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10101         if (res) {
10102                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10103                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10104         }
10105
10106         bnx2x_release_phy_lock(bp);
10107         bnx2x_netif_start(bp);
10108
10109         return rc;
10110 }
10111
10112 #define CRC32_RESIDUAL                  0xdebb20e3
10113
10114 static int bnx2x_test_nvram(struct bnx2x *bp)
10115 {
10116         static const struct {
10117                 int offset;
10118                 int size;
10119         } nvram_tbl[] = {
10120                 {     0,  0x14 }, /* bootstrap */
10121                 {  0x14,  0xec }, /* dir */
10122                 { 0x100, 0x350 }, /* manuf_info */
10123                 { 0x450,  0xf0 }, /* feature_info */
10124                 { 0x640,  0x64 }, /* upgrade_key_info */
10125                 { 0x6a4,  0x64 },
10126                 { 0x708,  0x70 }, /* manuf_key_info */
10127                 { 0x778,  0x70 },
10128                 {     0,     0 }
10129         };
10130         __be32 buf[0x350 / 4];
10131         u8 *data = (u8 *)buf;
10132         int i, rc;
10133         u32 magic, csum;
10134
10135         rc = bnx2x_nvram_read(bp, 0, data, 4);
10136         if (rc) {
10137                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10138                 goto test_nvram_exit;
10139         }
10140
10141         magic = be32_to_cpu(buf[0]);
10142         if (magic != 0x669955aa) {
10143                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10144                 rc = -ENODEV;
10145                 goto test_nvram_exit;
10146         }
10147
10148         for (i = 0; nvram_tbl[i].size; i++) {
10149
10150                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10151                                       nvram_tbl[i].size);
10152                 if (rc) {
10153                         DP(NETIF_MSG_PROBE,
10154                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10155                         goto test_nvram_exit;
10156                 }
10157
10158                 csum = ether_crc_le(nvram_tbl[i].size, data);
10159                 if (csum != CRC32_RESIDUAL) {
10160                         DP(NETIF_MSG_PROBE,
10161                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10162                         rc = -ENODEV;
10163                         goto test_nvram_exit;
10164                 }
10165         }
10166
10167 test_nvram_exit:
10168         return rc;
10169 }
10170
10171 static int bnx2x_test_intr(struct bnx2x *bp)
10172 {
10173         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10174         int i, rc;
10175
10176         if (!netif_running(bp->dev))
10177                 return -ENODEV;
10178
10179         config->hdr.length = 0;
10180         if (CHIP_IS_E1(bp))
10181                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10182         else
10183                 config->hdr.offset = BP_FUNC(bp);
10184         config->hdr.client_id = bp->fp->cl_id;
10185         config->hdr.reserved1 = 0;
10186
10187         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10188                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10189                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10190         if (rc == 0) {
10191                 bp->set_mac_pending++;
10192                 for (i = 0; i < 10; i++) {
10193                         if (!bp->set_mac_pending)
10194                                 break;
10195                         msleep_interruptible(10);
10196                 }
10197                 if (i == 10)
10198                         rc = -ENODEV;
10199         }
10200
10201         return rc;
10202 }
10203
10204 static void bnx2x_self_test(struct net_device *dev,
10205                             struct ethtool_test *etest, u64 *buf)
10206 {
10207         struct bnx2x *bp = netdev_priv(dev);
10208
10209         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10210
10211         if (!netif_running(dev))
10212                 return;
10213
10214         /* offline tests are not supported in MF mode */
10215         if (IS_E1HMF(bp))
10216                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10217
10218         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10219                 int port = BP_PORT(bp);
10220                 u32 val;
10221                 u8 link_up;
10222
10223                 /* save current value of input enable for TX port IF */
10224                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10225                 /* disable input for TX port IF */
10226                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10227
10228                 link_up = bp->link_vars.link_up;
10229                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10230                 bnx2x_nic_load(bp, LOAD_DIAG);
10231                 /* wait until link state is restored */
10232                 bnx2x_wait_for_link(bp, link_up);
10233
10234                 if (bnx2x_test_registers(bp) != 0) {
10235                         buf[0] = 1;
10236                         etest->flags |= ETH_TEST_FL_FAILED;
10237                 }
10238                 if (bnx2x_test_memory(bp) != 0) {
10239                         buf[1] = 1;
10240                         etest->flags |= ETH_TEST_FL_FAILED;
10241                 }
10242                 buf[2] = bnx2x_test_loopback(bp, link_up);
10243                 if (buf[2] != 0)
10244                         etest->flags |= ETH_TEST_FL_FAILED;
10245
10246                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10247
10248                 /* restore input for TX port IF */
10249                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10250
10251                 bnx2x_nic_load(bp, LOAD_NORMAL);
10252                 /* wait until link state is restored */
10253                 bnx2x_wait_for_link(bp, link_up);
10254         }
10255         if (bnx2x_test_nvram(bp) != 0) {
10256                 buf[3] = 1;
10257                 etest->flags |= ETH_TEST_FL_FAILED;
10258         }
10259         if (bnx2x_test_intr(bp) != 0) {
10260                 buf[4] = 1;
10261                 etest->flags |= ETH_TEST_FL_FAILED;
10262         }
10263         if (bp->port.pmf)
10264                 if (bnx2x_link_test(bp) != 0) {
10265                         buf[5] = 1;
10266                         etest->flags |= ETH_TEST_FL_FAILED;
10267                 }
10268
10269 #ifdef BNX2X_EXTRA_DEBUG
10270         bnx2x_panic_dump(bp);
10271 #endif
10272 }
10273
10274 static const struct {
10275         long offset;
10276         int size;
10277         u8 string[ETH_GSTRING_LEN];
10278 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10279 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10280         { Q_STATS_OFFSET32(error_bytes_received_hi),
10281                                                 8, "[%d]: rx_error_bytes" },
10282         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10283                                                 8, "[%d]: rx_ucast_packets" },
10284         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10285                                                 8, "[%d]: rx_mcast_packets" },
10286         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10287                                                 8, "[%d]: rx_bcast_packets" },
10288         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10289         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10290                                          4, "[%d]: rx_phy_ip_err_discards"},
10291         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10292                                          4, "[%d]: rx_skb_alloc_discard" },
10293         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10294
10295 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10296         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10297                                                         8, "[%d]: tx_packets" }
10298 };
10299
10300 static const struct {
10301         long offset;
10302         int size;
10303         u32 flags;
10304 #define STATS_FLAGS_PORT                1
10305 #define STATS_FLAGS_FUNC                2
10306 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10307         u8 string[ETH_GSTRING_LEN];
10308 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10309 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10310                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10311         { STATS_OFFSET32(error_bytes_received_hi),
10312                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10313         { STATS_OFFSET32(total_unicast_packets_received_hi),
10314                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10315         { STATS_OFFSET32(total_multicast_packets_received_hi),
10316                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10317         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10318                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10319         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10320                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10321         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10322                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10323         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10324                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10325         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10326                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10327 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10328                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10329         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10330                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10331         { STATS_OFFSET32(no_buff_discard_hi),
10332                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10333         { STATS_OFFSET32(mac_filter_discard),
10334                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10335         { STATS_OFFSET32(xxoverflow_discard),
10336                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10337         { STATS_OFFSET32(brb_drop_hi),
10338                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10339         { STATS_OFFSET32(brb_truncate_hi),
10340                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10341         { STATS_OFFSET32(pause_frames_received_hi),
10342                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10343         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10344                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10345         { STATS_OFFSET32(nig_timer_max),
10346                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10347 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10348                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10349         { STATS_OFFSET32(rx_skb_alloc_failed),
10350                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10351         { STATS_OFFSET32(hw_csum_err),
10352                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10353
10354         { STATS_OFFSET32(total_bytes_transmitted_hi),
10355                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10356         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10357                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10358         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10359                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10360         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10361                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10362         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10363                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10364         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10365                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10366         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10367                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10368 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10369                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10370         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10371                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10372         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10373                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10374         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10375                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10376         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10377                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10378         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10379                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10380         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10381                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10382         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10383                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10384         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10385                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10386         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10387                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10388 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10389                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10390         { STATS_OFFSET32(pause_frames_sent_hi),
10391                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10392 };
10393
10394 #define IS_PORT_STAT(i) \
10395         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10396 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10397 #define IS_E1HMF_MODE_STAT(bp) \
10398                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10399
10400 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10401 {
10402         struct bnx2x *bp = netdev_priv(dev);
10403         int i, j, k;
10404
10405         switch (stringset) {
10406         case ETH_SS_STATS:
10407                 if (is_multi(bp)) {
10408                         k = 0;
10409                         for_each_rx_queue(bp, i) {
10410                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10411                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10412                                                 bnx2x_q_stats_arr[j].string, i);
10413                                 k += BNX2X_NUM_Q_STATS;
10414                         }
10415                         if (IS_E1HMF_MODE_STAT(bp))
10416                                 break;
10417                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10418                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10419                                        bnx2x_stats_arr[j].string);
10420                 } else {
10421                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10422                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10423                                         continue;
10424                                 strcpy(buf + j*ETH_GSTRING_LEN,
10425                                        bnx2x_stats_arr[i].string);
10426                                 j++;
10427                         }
10428                 }
10429                 break;
10430
10431         case ETH_SS_TEST:
10432                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10433                 break;
10434         }
10435 }
10436
10437 static int bnx2x_get_stats_count(struct net_device *dev)
10438 {
10439         struct bnx2x *bp = netdev_priv(dev);
10440         int i, num_stats;
10441
10442         if (is_multi(bp)) {
10443                 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10444                 if (!IS_E1HMF_MODE_STAT(bp))
10445                         num_stats += BNX2X_NUM_STATS;
10446         } else {
10447                 if (IS_E1HMF_MODE_STAT(bp)) {
10448                         num_stats = 0;
10449                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10450                                 if (IS_FUNC_STAT(i))
10451                                         num_stats++;
10452                 } else
10453                         num_stats = BNX2X_NUM_STATS;
10454         }
10455
10456         return num_stats;
10457 }
10458
10459 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10460                                     struct ethtool_stats *stats, u64 *buf)
10461 {
10462         struct bnx2x *bp = netdev_priv(dev);
10463         u32 *hw_stats, *offset;
10464         int i, j, k;
10465
10466         if (is_multi(bp)) {
10467                 k = 0;
10468                 for_each_rx_queue(bp, i) {
10469                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10470                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10471                                 if (bnx2x_q_stats_arr[j].size == 0) {
10472                                         /* skip this counter */
10473                                         buf[k + j] = 0;
10474                                         continue;
10475                                 }
10476                                 offset = (hw_stats +
10477                                           bnx2x_q_stats_arr[j].offset);
10478                                 if (bnx2x_q_stats_arr[j].size == 4) {
10479                                         /* 4-byte counter */
10480                                         buf[k + j] = (u64) *offset;
10481                                         continue;
10482                                 }
10483                                 /* 8-byte counter */
10484                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10485                         }
10486                         k += BNX2X_NUM_Q_STATS;
10487                 }
10488                 if (IS_E1HMF_MODE_STAT(bp))
10489                         return;
10490                 hw_stats = (u32 *)&bp->eth_stats;
10491                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10492                         if (bnx2x_stats_arr[j].size == 0) {
10493                                 /* skip this counter */
10494                                 buf[k + j] = 0;
10495                                 continue;
10496                         }
10497                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10498                         if (bnx2x_stats_arr[j].size == 4) {
10499                                 /* 4-byte counter */
10500                                 buf[k + j] = (u64) *offset;
10501                                 continue;
10502                         }
10503                         /* 8-byte counter */
10504                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10505                 }
10506         } else {
10507                 hw_stats = (u32 *)&bp->eth_stats;
10508                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10509                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10510                                 continue;
10511                         if (bnx2x_stats_arr[i].size == 0) {
10512                                 /* skip this counter */
10513                                 buf[j] = 0;
10514                                 j++;
10515                                 continue;
10516                         }
10517                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10518                         if (bnx2x_stats_arr[i].size == 4) {
10519                                 /* 4-byte counter */
10520                                 buf[j] = (u64) *offset;
10521                                 j++;
10522                                 continue;
10523                         }
10524                         /* 8-byte counter */
10525                         buf[j] = HILO_U64(*offset, *(offset + 1));
10526                         j++;
10527                 }
10528         }
10529 }
10530
10531 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10532 {
10533         struct bnx2x *bp = netdev_priv(dev);
10534         int port = BP_PORT(bp);
10535         int i;
10536
10537         if (!netif_running(dev))
10538                 return 0;
10539
10540         if (!bp->port.pmf)
10541                 return 0;
10542
10543         if (data == 0)
10544                 data = 2;
10545
10546         for (i = 0; i < (data * 2); i++) {
10547                 if ((i % 2) == 0)
10548                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10549                                       bp->link_params.hw_led_mode,
10550                                       bp->link_params.chip_id);
10551                 else
10552                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10553                                       bp->link_params.hw_led_mode,
10554                                       bp->link_params.chip_id);
10555
10556                 msleep_interruptible(500);
10557                 if (signal_pending(current))
10558                         break;
10559         }
10560
10561         if (bp->link_vars.link_up)
10562                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10563                               bp->link_vars.line_speed,
10564                               bp->link_params.hw_led_mode,
10565                               bp->link_params.chip_id);
10566
10567         return 0;
10568 }
10569
10570 static struct ethtool_ops bnx2x_ethtool_ops = {
10571         .get_settings           = bnx2x_get_settings,
10572         .set_settings           = bnx2x_set_settings,
10573         .get_drvinfo            = bnx2x_get_drvinfo,
10574         .get_regs_len           = bnx2x_get_regs_len,
10575         .get_regs               = bnx2x_get_regs,
10576         .get_wol                = bnx2x_get_wol,
10577         .set_wol                = bnx2x_set_wol,
10578         .get_msglevel           = bnx2x_get_msglevel,
10579         .set_msglevel           = bnx2x_set_msglevel,
10580         .nway_reset             = bnx2x_nway_reset,
10581         .get_link               = bnx2x_get_link,
10582         .get_eeprom_len         = bnx2x_get_eeprom_len,
10583         .get_eeprom             = bnx2x_get_eeprom,
10584         .set_eeprom             = bnx2x_set_eeprom,
10585         .get_coalesce           = bnx2x_get_coalesce,
10586         .set_coalesce           = bnx2x_set_coalesce,
10587         .get_ringparam          = bnx2x_get_ringparam,
10588         .set_ringparam          = bnx2x_set_ringparam,
10589         .get_pauseparam         = bnx2x_get_pauseparam,
10590         .set_pauseparam         = bnx2x_set_pauseparam,
10591         .get_rx_csum            = bnx2x_get_rx_csum,
10592         .set_rx_csum            = bnx2x_set_rx_csum,
10593         .get_tx_csum            = ethtool_op_get_tx_csum,
10594         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10595         .set_flags              = bnx2x_set_flags,
10596         .get_flags              = ethtool_op_get_flags,
10597         .get_sg                 = ethtool_op_get_sg,
10598         .set_sg                 = ethtool_op_set_sg,
10599         .get_tso                = ethtool_op_get_tso,
10600         .set_tso                = bnx2x_set_tso,
10601         .self_test_count        = bnx2x_self_test_count,
10602         .self_test              = bnx2x_self_test,
10603         .get_strings            = bnx2x_get_strings,
10604         .phys_id                = bnx2x_phys_id,
10605         .get_stats_count        = bnx2x_get_stats_count,
10606         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10607 };
10608
10609 /* end of ethtool_ops */
10610
10611 /****************************************************************************
10612 * General service functions
10613 ****************************************************************************/
10614
10615 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10616 {
10617         u16 pmcsr;
10618
10619         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10620
10621         switch (state) {
10622         case PCI_D0:
10623                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10624                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10625                                        PCI_PM_CTRL_PME_STATUS));
10626
10627                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10628                         /* delay required during transition out of D3hot */
10629                         msleep(20);
10630                 break;
10631
10632         case PCI_D3hot:
10633                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10634                 pmcsr |= 3;
10635
10636                 if (bp->wol)
10637                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10638
10639                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10640                                       pmcsr);
10641
10642                 /* No more memory access after this point until
10643                 * device is brought back to D0.
10644                 */
10645                 break;
10646
10647         default:
10648                 return -EINVAL;
10649         }
10650         return 0;
10651 }
10652
10653 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10654 {
10655         u16 rx_cons_sb;
10656
10657         /* Tell compiler that status block fields can change */
10658         barrier();
10659         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10660         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10661                 rx_cons_sb++;
10662         return (fp->rx_comp_cons != rx_cons_sb);
10663 }
10664
10665 /*
10666  * net_device service functions
10667  */
10668
10669 static int bnx2x_poll(struct napi_struct *napi, int budget)
10670 {
10671         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10672                                                  napi);
10673         struct bnx2x *bp = fp->bp;
10674         int work_done = 0;
10675
10676 #ifdef BNX2X_STOP_ON_ERROR
10677         if (unlikely(bp->panic))
10678                 goto poll_panic;
10679 #endif
10680
10681         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10682         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10683
10684         bnx2x_update_fpsb_idx(fp);
10685
10686         if (bnx2x_has_rx_work(fp)) {
10687                 work_done = bnx2x_rx_int(fp, budget);
10688
10689                 /* must not complete if we consumed full budget */
10690                 if (work_done >= budget)
10691                         goto poll_again;
10692         }
10693
10694         /* bnx2x_has_rx_work() reads the status block, thus we need to
10695          * ensure that status block indices have been actually read
10696          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10697          * so that we won't write the "newer" value of the status block to IGU
10698          * (if there was a DMA right after bnx2x_has_rx_work and
10699          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10700          * may be postponed to right before bnx2x_ack_sb). In this case
10701          * there will never be another interrupt until there is another update
10702          * of the status block, while there is still unhandled work.
10703          */
10704         rmb();
10705
10706         if (!bnx2x_has_rx_work(fp)) {
10707 #ifdef BNX2X_STOP_ON_ERROR
10708 poll_panic:
10709 #endif
10710                 napi_complete(napi);
10711
10712                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10713                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10714                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10715                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10716         }
10717
10718 poll_again:
10719         return work_done;
10720 }
10721
10722
10723 /* we split the first BD into headers and data BDs
10724  * to ease the pain of our fellow microcode engineers
10725  * we use one mapping for both BDs
10726  * So far this has only been observed to happen
10727  * in Other Operating Systems(TM)
10728  */
10729 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10730                                    struct bnx2x_fastpath *fp,
10731                                    struct sw_tx_bd *tx_buf,
10732                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10733                                    u16 bd_prod, int nbd)
10734 {
10735         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10736         struct eth_tx_bd *d_tx_bd;
10737         dma_addr_t mapping;
10738         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10739
10740         /* first fix first BD */
10741         h_tx_bd->nbd = cpu_to_le16(nbd);
10742         h_tx_bd->nbytes = cpu_to_le16(hlen);
10743
10744         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10745            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10746            h_tx_bd->addr_lo, h_tx_bd->nbd);
10747
10748         /* now get a new data BD
10749          * (after the pbd) and fill it */
10750         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10751         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10752
10753         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10754                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10755
10756         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10757         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10758         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10759
10760         /* this marks the BD as one that has no individual mapping */
10761         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10762
10763         DP(NETIF_MSG_TX_QUEUED,
10764            "TSO split data size is %d (%x:%x)\n",
10765            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10766
10767         /* update tx_bd */
10768         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10769
10770         return bd_prod;
10771 }
10772
10773 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10774 {
10775         if (fix > 0)
10776                 csum = (u16) ~csum_fold(csum_sub(csum,
10777                                 csum_partial(t_header - fix, fix, 0)));
10778
10779         else if (fix < 0)
10780                 csum = (u16) ~csum_fold(csum_add(csum,
10781                                 csum_partial(t_header, -fix, 0)));
10782
10783         return swab16(csum);
10784 }
10785
10786 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10787 {
10788         u32 rc;
10789
10790         if (skb->ip_summed != CHECKSUM_PARTIAL)
10791                 rc = XMIT_PLAIN;
10792
10793         else {
10794                 if (skb->protocol == htons(ETH_P_IPV6)) {
10795                         rc = XMIT_CSUM_V6;
10796                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10797                                 rc |= XMIT_CSUM_TCP;
10798
10799                 } else {
10800                         rc = XMIT_CSUM_V4;
10801                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10802                                 rc |= XMIT_CSUM_TCP;
10803                 }
10804         }
10805
10806         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10807                 rc |= XMIT_GSO_V4;
10808
10809         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10810                 rc |= XMIT_GSO_V6;
10811
10812         return rc;
10813 }
10814
10815 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10816 /* check if packet requires linearization (packet is too fragmented)
10817    no need to check fragmentation if page size > 8K (there will be no
10818    violation to FW restrictions) */
10819 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10820                              u32 xmit_type)
10821 {
10822         int to_copy = 0;
10823         int hlen = 0;
10824         int first_bd_sz = 0;
10825
10826         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10827         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10828
10829                 if (xmit_type & XMIT_GSO) {
10830                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10831                         /* Check if LSO packet needs to be copied:
10832                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10833                         int wnd_size = MAX_FETCH_BD - 3;
10834                         /* Number of windows to check */
10835                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10836                         int wnd_idx = 0;
10837                         int frag_idx = 0;
10838                         u32 wnd_sum = 0;
10839
10840                         /* Headers length */
10841                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10842                                 tcp_hdrlen(skb);
10843
10844                         /* Amount of data (w/o headers) on linear part of SKB*/
10845                         first_bd_sz = skb_headlen(skb) - hlen;
10846
10847                         wnd_sum  = first_bd_sz;
10848
10849                         /* Calculate the first sum - it's special */
10850                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10851                                 wnd_sum +=
10852                                         skb_shinfo(skb)->frags[frag_idx].size;
10853
10854                         /* If there was data on linear skb data - check it */
10855                         if (first_bd_sz > 0) {
10856                                 if (unlikely(wnd_sum < lso_mss)) {
10857                                         to_copy = 1;
10858                                         goto exit_lbl;
10859                                 }
10860
10861                                 wnd_sum -= first_bd_sz;
10862                         }
10863
10864                         /* Others are easier: run through the frag list and
10865                            check all windows */
10866                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10867                                 wnd_sum +=
10868                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10869
10870                                 if (unlikely(wnd_sum < lso_mss)) {
10871                                         to_copy = 1;
10872                                         break;
10873                                 }
10874                                 wnd_sum -=
10875                                         skb_shinfo(skb)->frags[wnd_idx].size;
10876                         }
10877                 } else {
10878                         /* in non-LSO too fragmented packet should always
10879                            be linearized */
10880                         to_copy = 1;
10881                 }
10882         }
10883
10884 exit_lbl:
10885         if (unlikely(to_copy))
10886                 DP(NETIF_MSG_TX_QUEUED,
10887                    "Linearization IS REQUIRED for %s packet. "
10888                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10889                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10890                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10891
10892         return to_copy;
10893 }
10894 #endif
10895
10896 /* called with netif_tx_lock
10897  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10898  * netif_wake_queue()
10899  */
10900 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10901 {
10902         struct bnx2x *bp = netdev_priv(dev);
10903         struct bnx2x_fastpath *fp, *fp_stat;
10904         struct netdev_queue *txq;
10905         struct sw_tx_bd *tx_buf;
10906         struct eth_tx_start_bd *tx_start_bd;
10907         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10908         struct eth_tx_parse_bd *pbd = NULL;
10909         u16 pkt_prod, bd_prod;
10910         int nbd, fp_index;
10911         dma_addr_t mapping;
10912         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10913         int i;
10914         u8 hlen = 0;
10915         __le16 pkt_size = 0;
10916
10917 #ifdef BNX2X_STOP_ON_ERROR
10918         if (unlikely(bp->panic))
10919                 return NETDEV_TX_BUSY;
10920 #endif
10921
10922         fp_index = skb_get_queue_mapping(skb);
10923         txq = netdev_get_tx_queue(dev, fp_index);
10924
10925         fp = &bp->fp[fp_index + bp->num_rx_queues];
10926         fp_stat = &bp->fp[fp_index];
10927
10928         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10929                 fp_stat->eth_q_stats.driver_xoff++;
10930                 netif_tx_stop_queue(txq);
10931                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10932                 return NETDEV_TX_BUSY;
10933         }
10934
10935         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10936            "  gso type %x  xmit_type %x\n",
10937            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10938            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10939
10940 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10941         /* First, check if we need to linearize the skb (due to FW
10942            restrictions). No need to check fragmentation if page size > 8K
10943            (there will be no violation to FW restrictions) */
10944         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10945                 /* Statistics of linearization */
10946                 bp->lin_cnt++;
10947                 if (skb_linearize(skb) != 0) {
10948                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10949                            "silently dropping this SKB\n");
10950                         dev_kfree_skb_any(skb);
10951                         return NETDEV_TX_OK;
10952                 }
10953         }
10954 #endif
10955
10956         /*
10957         Please read carefully. First we use one BD which we mark as start,
10958         then we have a parsing info BD (used for TSO or xsum),
10959         and only then we have the rest of the TSO BDs.
10960         (don't forget to mark the last one as last,
10961         and to unmap only AFTER you write to the BD ...)
10962         And above all, all pdb sizes are in words - NOT DWORDS!
10963         */
10964
10965         pkt_prod = fp->tx_pkt_prod++;
10966         bd_prod = TX_BD(fp->tx_bd_prod);
10967
10968         /* get a tx_buf and first BD */
10969         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10970         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10971
10972         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10973         tx_start_bd->general_data = (UNICAST_ADDRESS <<
10974                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10975         /* header nbd */
10976         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10977
10978         /* remember the first BD of the packet */
10979         tx_buf->first_bd = fp->tx_bd_prod;
10980         tx_buf->skb = skb;
10981         tx_buf->flags = 0;
10982
10983         DP(NETIF_MSG_TX_QUEUED,
10984            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10985            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10986
10987 #ifdef BCM_VLAN
10988         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10989             (bp->flags & HW_VLAN_TX_FLAG)) {
10990                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10991                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10992         } else
10993 #endif
10994                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10995
10996         /* turn on parsing and get a BD */
10997         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10998         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10999
11000         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11001
11002         if (xmit_type & XMIT_CSUM) {
11003                 hlen = (skb_network_header(skb) - skb->data) / 2;
11004
11005                 /* for now NS flag is not used in Linux */
11006                 pbd->global_data =
11007                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11008                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11009
11010                 pbd->ip_hlen = (skb_transport_header(skb) -
11011                                 skb_network_header(skb)) / 2;
11012
11013                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11014
11015                 pbd->total_hlen = cpu_to_le16(hlen);
11016                 hlen = hlen*2;
11017
11018                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11019
11020                 if (xmit_type & XMIT_CSUM_V4)
11021                         tx_start_bd->bd_flags.as_bitfield |=
11022                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11023                 else
11024                         tx_start_bd->bd_flags.as_bitfield |=
11025                                                 ETH_TX_BD_FLAGS_IPV6;
11026
11027                 if (xmit_type & XMIT_CSUM_TCP) {
11028                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11029
11030                 } else {
11031                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11032
11033                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11034
11035                         DP(NETIF_MSG_TX_QUEUED,
11036                            "hlen %d  fix %d  csum before fix %x\n",
11037                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11038
11039                         /* HW bug: fixup the CSUM */
11040                         pbd->tcp_pseudo_csum =
11041                                 bnx2x_csum_fix(skb_transport_header(skb),
11042                                                SKB_CS(skb), fix);
11043
11044                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11045                            pbd->tcp_pseudo_csum);
11046                 }
11047         }
11048
11049         mapping = pci_map_single(bp->pdev, skb->data,
11050                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11051
11052         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11053         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11054         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11055         tx_start_bd->nbd = cpu_to_le16(nbd);
11056         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11057         pkt_size = tx_start_bd->nbytes;
11058
11059         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11060            "  nbytes %d  flags %x  vlan %x\n",
11061            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11062            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11063            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11064
11065         if (xmit_type & XMIT_GSO) {
11066
11067                 DP(NETIF_MSG_TX_QUEUED,
11068                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11069                    skb->len, hlen, skb_headlen(skb),
11070                    skb_shinfo(skb)->gso_size);
11071
11072                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11073
11074                 if (unlikely(skb_headlen(skb) > hlen))
11075                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11076                                                  hlen, bd_prod, ++nbd);
11077
11078                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11079                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11080                 pbd->tcp_flags = pbd_tcp_flags(skb);
11081
11082                 if (xmit_type & XMIT_GSO_V4) {
11083                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11084                         pbd->tcp_pseudo_csum =
11085                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11086                                                           ip_hdr(skb)->daddr,
11087                                                           0, IPPROTO_TCP, 0));
11088
11089                 } else
11090                         pbd->tcp_pseudo_csum =
11091                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11092                                                         &ipv6_hdr(skb)->daddr,
11093                                                         0, IPPROTO_TCP, 0));
11094
11095                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11096         }
11097         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11098
11099         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11100                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11101
11102                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11103                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11104                 if (total_pkt_bd == NULL)
11105                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11106
11107                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11108                                        frag->size, PCI_DMA_TODEVICE);
11109
11110                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11111                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11112                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11113                 le16_add_cpu(&pkt_size, frag->size);
11114
11115                 DP(NETIF_MSG_TX_QUEUED,
11116                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11117                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11118                    le16_to_cpu(tx_data_bd->nbytes));
11119         }
11120
11121         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11122
11123         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11124
11125         /* now send a tx doorbell, counting the next BD
11126          * if the packet contains or ends with it
11127          */
11128         if (TX_BD_POFF(bd_prod) < nbd)
11129                 nbd++;
11130
11131         if (total_pkt_bd != NULL)
11132                 total_pkt_bd->total_pkt_bytes = pkt_size;
11133
11134         if (pbd)
11135                 DP(NETIF_MSG_TX_QUEUED,
11136                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11137                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11138                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11139                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11140                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11141
11142         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11143
11144         /*
11145          * Make sure that the BD data is updated before updating the producer
11146          * since FW might read the BD right after the producer is updated.
11147          * This is only applicable for weak-ordered memory model archs such
11148          * as IA-64. The following barrier is also mandatory since FW will
11149          * assumes packets must have BDs.
11150          */
11151         wmb();
11152
11153         fp->tx_db.data.prod += nbd;
11154         barrier();
11155         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11156
11157         mmiowb();
11158
11159         fp->tx_bd_prod += nbd;
11160
11161         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11162                 netif_tx_stop_queue(txq);
11163                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11164                    if we put Tx into XOFF state. */
11165                 smp_mb();
11166                 fp_stat->eth_q_stats.driver_xoff++;
11167                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11168                         netif_tx_wake_queue(txq);
11169         }
11170         fp_stat->tx_pkt++;
11171
11172         return NETDEV_TX_OK;
11173 }
11174
11175 /* called with rtnl_lock */
11176 static int bnx2x_open(struct net_device *dev)
11177 {
11178         struct bnx2x *bp = netdev_priv(dev);
11179
11180         netif_carrier_off(dev);
11181
11182         bnx2x_set_power_state(bp, PCI_D0);
11183
11184         return bnx2x_nic_load(bp, LOAD_OPEN);
11185 }
11186
11187 /* called with rtnl_lock */
11188 static int bnx2x_close(struct net_device *dev)
11189 {
11190         struct bnx2x *bp = netdev_priv(dev);
11191
11192         /* Unload the driver, release IRQs */
11193         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11194         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11195                 if (!CHIP_REV_IS_SLOW(bp))
11196                         bnx2x_set_power_state(bp, PCI_D3hot);
11197
11198         return 0;
11199 }
11200
11201 /* called with netif_tx_lock from dev_mcast.c */
11202 static void bnx2x_set_rx_mode(struct net_device *dev)
11203 {
11204         struct bnx2x *bp = netdev_priv(dev);
11205         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11206         int port = BP_PORT(bp);
11207
11208         if (bp->state != BNX2X_STATE_OPEN) {
11209                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11210                 return;
11211         }
11212
11213         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11214
11215         if (dev->flags & IFF_PROMISC)
11216                 rx_mode = BNX2X_RX_MODE_PROMISC;
11217
11218         else if ((dev->flags & IFF_ALLMULTI) ||
11219                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11220                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11221
11222         else { /* some multicasts */
11223                 if (CHIP_IS_E1(bp)) {
11224                         int i, old, offset;
11225                         struct dev_mc_list *mclist;
11226                         struct mac_configuration_cmd *config =
11227                                                 bnx2x_sp(bp, mcast_config);
11228
11229                         for (i = 0, mclist = dev->mc_list;
11230                              mclist && (i < dev->mc_count);
11231                              i++, mclist = mclist->next) {
11232
11233                                 config->config_table[i].
11234                                         cam_entry.msb_mac_addr =
11235                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11236                                 config->config_table[i].
11237                                         cam_entry.middle_mac_addr =
11238                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11239                                 config->config_table[i].
11240                                         cam_entry.lsb_mac_addr =
11241                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11242                                 config->config_table[i].cam_entry.flags =
11243                                                         cpu_to_le16(port);
11244                                 config->config_table[i].
11245                                         target_table_entry.flags = 0;
11246                                 config->config_table[i].target_table_entry.
11247                                         clients_bit_vector =
11248                                                 cpu_to_le32(1 << BP_L_ID(bp));
11249                                 config->config_table[i].
11250                                         target_table_entry.vlan_id = 0;
11251
11252                                 DP(NETIF_MSG_IFUP,
11253                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11254                                    config->config_table[i].
11255                                                 cam_entry.msb_mac_addr,
11256                                    config->config_table[i].
11257                                                 cam_entry.middle_mac_addr,
11258                                    config->config_table[i].
11259                                                 cam_entry.lsb_mac_addr);
11260                         }
11261                         old = config->hdr.length;
11262                         if (old > i) {
11263                                 for (; i < old; i++) {
11264                                         if (CAM_IS_INVALID(config->
11265                                                            config_table[i])) {
11266                                                 /* already invalidated */
11267                                                 break;
11268                                         }
11269                                         /* invalidate */
11270                                         CAM_INVALIDATE(config->
11271                                                        config_table[i]);
11272                                 }
11273                         }
11274
11275                         if (CHIP_REV_IS_SLOW(bp))
11276                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11277                         else
11278                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11279
11280                         config->hdr.length = i;
11281                         config->hdr.offset = offset;
11282                         config->hdr.client_id = bp->fp->cl_id;
11283                         config->hdr.reserved1 = 0;
11284
11285                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11286                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11287                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11288                                       0);
11289                 } else { /* E1H */
11290                         /* Accept one or more multicasts */
11291                         struct dev_mc_list *mclist;
11292                         u32 mc_filter[MC_HASH_SIZE];
11293                         u32 crc, bit, regidx;
11294                         int i;
11295
11296                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11297
11298                         for (i = 0, mclist = dev->mc_list;
11299                              mclist && (i < dev->mc_count);
11300                              i++, mclist = mclist->next) {
11301
11302                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11303                                    mclist->dmi_addr);
11304
11305                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11306                                 bit = (crc >> 24) & 0xff;
11307                                 regidx = bit >> 5;
11308                                 bit &= 0x1f;
11309                                 mc_filter[regidx] |= (1 << bit);
11310                         }
11311
11312                         for (i = 0; i < MC_HASH_SIZE; i++)
11313                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11314                                        mc_filter[i]);
11315                 }
11316         }
11317
11318         bp->rx_mode = rx_mode;
11319         bnx2x_set_storm_rx_mode(bp);
11320 }
11321
11322 /* called with rtnl_lock */
11323 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11324 {
11325         struct sockaddr *addr = p;
11326         struct bnx2x *bp = netdev_priv(dev);
11327
11328         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11329                 return -EINVAL;
11330
11331         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11332         if (netif_running(dev)) {
11333                 if (CHIP_IS_E1(bp))
11334                         bnx2x_set_mac_addr_e1(bp, 1);
11335                 else
11336                         bnx2x_set_mac_addr_e1h(bp, 1);
11337         }
11338
11339         return 0;
11340 }
11341
11342 /* called with rtnl_lock */
11343 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11344                            int devad, u16 addr)
11345 {
11346         struct bnx2x *bp = netdev_priv(netdev);
11347         u16 value;
11348         int rc;
11349         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11350
11351         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11352            prtad, devad, addr);
11353
11354         if (prtad != bp->mdio.prtad) {
11355                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11356                    prtad, bp->mdio.prtad);
11357                 return -EINVAL;
11358         }
11359
11360         /* The HW expects different devad if CL22 is used */
11361         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11362
11363         bnx2x_acquire_phy_lock(bp);
11364         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11365                              devad, addr, &value);
11366         bnx2x_release_phy_lock(bp);
11367         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11368
11369         if (!rc)
11370                 rc = value;
11371         return rc;
11372 }
11373
11374 /* called with rtnl_lock */
11375 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11376                             u16 addr, u16 value)
11377 {
11378         struct bnx2x *bp = netdev_priv(netdev);
11379         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11380         int rc;
11381
11382         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11383                            " value 0x%x\n", prtad, devad, addr, value);
11384
11385         if (prtad != bp->mdio.prtad) {
11386                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11387                    prtad, bp->mdio.prtad);
11388                 return -EINVAL;
11389         }
11390
11391         /* The HW expects different devad if CL22 is used */
11392         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11393
11394         bnx2x_acquire_phy_lock(bp);
11395         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11396                               devad, addr, value);
11397         bnx2x_release_phy_lock(bp);
11398         return rc;
11399 }
11400
11401 /* called with rtnl_lock */
11402 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11403 {
11404         struct bnx2x *bp = netdev_priv(dev);
11405         struct mii_ioctl_data *mdio = if_mii(ifr);
11406
11407         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11408            mdio->phy_id, mdio->reg_num, mdio->val_in);
11409
11410         if (!netif_running(dev))
11411                 return -EAGAIN;
11412
11413         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11414 }
11415
11416 /* called with rtnl_lock */
11417 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11418 {
11419         struct bnx2x *bp = netdev_priv(dev);
11420         int rc = 0;
11421
11422         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11423             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11424                 return -EINVAL;
11425
11426         /* This does not race with packet allocation
11427          * because the actual alloc size is
11428          * only updated as part of load
11429          */
11430         dev->mtu = new_mtu;
11431
11432         if (netif_running(dev)) {
11433                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11434                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11435         }
11436
11437         return rc;
11438 }
11439
11440 static void bnx2x_tx_timeout(struct net_device *dev)
11441 {
11442         struct bnx2x *bp = netdev_priv(dev);
11443
11444 #ifdef BNX2X_STOP_ON_ERROR
11445         if (!bp->panic)
11446                 bnx2x_panic();
11447 #endif
11448         /* This allows the netif to be shutdown gracefully before resetting */
11449         schedule_work(&bp->reset_task);
11450 }
11451
11452 #ifdef BCM_VLAN
11453 /* called with rtnl_lock */
11454 static void bnx2x_vlan_rx_register(struct net_device *dev,
11455                                    struct vlan_group *vlgrp)
11456 {
11457         struct bnx2x *bp = netdev_priv(dev);
11458
11459         bp->vlgrp = vlgrp;
11460
11461         /* Set flags according to the required capabilities */
11462         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11463
11464         if (dev->features & NETIF_F_HW_VLAN_TX)
11465                 bp->flags |= HW_VLAN_TX_FLAG;
11466
11467         if (dev->features & NETIF_F_HW_VLAN_RX)
11468                 bp->flags |= HW_VLAN_RX_FLAG;
11469
11470         if (netif_running(dev))
11471                 bnx2x_set_client_config(bp);
11472 }
11473
11474 #endif
11475
11476 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11477 static void poll_bnx2x(struct net_device *dev)
11478 {
11479         struct bnx2x *bp = netdev_priv(dev);
11480
11481         disable_irq(bp->pdev->irq);
11482         bnx2x_interrupt(bp->pdev->irq, dev);
11483         enable_irq(bp->pdev->irq);
11484 }
11485 #endif
11486
11487 static const struct net_device_ops bnx2x_netdev_ops = {
11488         .ndo_open               = bnx2x_open,
11489         .ndo_stop               = bnx2x_close,
11490         .ndo_start_xmit         = bnx2x_start_xmit,
11491         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11492         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11493         .ndo_validate_addr      = eth_validate_addr,
11494         .ndo_do_ioctl           = bnx2x_ioctl,
11495         .ndo_change_mtu         = bnx2x_change_mtu,
11496         .ndo_tx_timeout         = bnx2x_tx_timeout,
11497 #ifdef BCM_VLAN
11498         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11499 #endif
11500 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11501         .ndo_poll_controller    = poll_bnx2x,
11502 #endif
11503 };
11504
11505 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11506                                     struct net_device *dev)
11507 {
11508         struct bnx2x *bp;
11509         int rc;
11510
11511         SET_NETDEV_DEV(dev, &pdev->dev);
11512         bp = netdev_priv(dev);
11513
11514         bp->dev = dev;
11515         bp->pdev = pdev;
11516         bp->flags = 0;
11517         bp->func = PCI_FUNC(pdev->devfn);
11518
11519         rc = pci_enable_device(pdev);
11520         if (rc) {
11521                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11522                 goto err_out;
11523         }
11524
11525         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11526                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11527                        " aborting\n");
11528                 rc = -ENODEV;
11529                 goto err_out_disable;
11530         }
11531
11532         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11533                 printk(KERN_ERR PFX "Cannot find second PCI device"
11534                        " base address, aborting\n");
11535                 rc = -ENODEV;
11536                 goto err_out_disable;
11537         }
11538
11539         if (atomic_read(&pdev->enable_cnt) == 1) {
11540                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11541                 if (rc) {
11542                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11543                                " aborting\n");
11544                         goto err_out_disable;
11545                 }
11546
11547                 pci_set_master(pdev);
11548                 pci_save_state(pdev);
11549         }
11550
11551         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11552         if (bp->pm_cap == 0) {
11553                 printk(KERN_ERR PFX "Cannot find power management"
11554                        " capability, aborting\n");
11555                 rc = -EIO;
11556                 goto err_out_release;
11557         }
11558
11559         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11560         if (bp->pcie_cap == 0) {
11561                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11562                        " aborting\n");
11563                 rc = -EIO;
11564                 goto err_out_release;
11565         }
11566
11567         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11568                 bp->flags |= USING_DAC_FLAG;
11569                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11570                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11571                                " failed, aborting\n");
11572                         rc = -EIO;
11573                         goto err_out_release;
11574                 }
11575
11576         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11577                 printk(KERN_ERR PFX "System does not support DMA,"
11578                        " aborting\n");
11579                 rc = -EIO;
11580                 goto err_out_release;
11581         }
11582
11583         dev->mem_start = pci_resource_start(pdev, 0);
11584         dev->base_addr = dev->mem_start;
11585         dev->mem_end = pci_resource_end(pdev, 0);
11586
11587         dev->irq = pdev->irq;
11588
11589         bp->regview = pci_ioremap_bar(pdev, 0);
11590         if (!bp->regview) {
11591                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11592                 rc = -ENOMEM;
11593                 goto err_out_release;
11594         }
11595
11596         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11597                                         min_t(u64, BNX2X_DB_SIZE,
11598                                               pci_resource_len(pdev, 2)));
11599         if (!bp->doorbells) {
11600                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11601                 rc = -ENOMEM;
11602                 goto err_out_unmap;
11603         }
11604
11605         bnx2x_set_power_state(bp, PCI_D0);
11606
11607         /* clean indirect addresses */
11608         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11609                                PCICFG_VENDOR_ID_OFFSET);
11610         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11611         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11612         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11613         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11614
11615         dev->watchdog_timeo = TX_TIMEOUT;
11616
11617         dev->netdev_ops = &bnx2x_netdev_ops;
11618         dev->ethtool_ops = &bnx2x_ethtool_ops;
11619         dev->features |= NETIF_F_SG;
11620         dev->features |= NETIF_F_HW_CSUM;
11621         if (bp->flags & USING_DAC_FLAG)
11622                 dev->features |= NETIF_F_HIGHDMA;
11623         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11624         dev->features |= NETIF_F_TSO6;
11625 #ifdef BCM_VLAN
11626         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11627         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11628
11629         dev->vlan_features |= NETIF_F_SG;
11630         dev->vlan_features |= NETIF_F_HW_CSUM;
11631         if (bp->flags & USING_DAC_FLAG)
11632                 dev->vlan_features |= NETIF_F_HIGHDMA;
11633         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11634         dev->vlan_features |= NETIF_F_TSO6;
11635 #endif
11636
11637         /* get_port_hwinfo() will set prtad and mmds properly */
11638         bp->mdio.prtad = MDIO_PRTAD_NONE;
11639         bp->mdio.mmds = 0;
11640         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11641         bp->mdio.dev = dev;
11642         bp->mdio.mdio_read = bnx2x_mdio_read;
11643         bp->mdio.mdio_write = bnx2x_mdio_write;
11644
11645         return 0;
11646
11647 err_out_unmap:
11648         if (bp->regview) {
11649                 iounmap(bp->regview);
11650                 bp->regview = NULL;
11651         }
11652         if (bp->doorbells) {
11653                 iounmap(bp->doorbells);
11654                 bp->doorbells = NULL;
11655         }
11656
11657 err_out_release:
11658         if (atomic_read(&pdev->enable_cnt) == 1)
11659                 pci_release_regions(pdev);
11660
11661 err_out_disable:
11662         pci_disable_device(pdev);
11663         pci_set_drvdata(pdev, NULL);
11664
11665 err_out:
11666         return rc;
11667 }
11668
11669 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11670                                                  int *width, int *speed)
11671 {
11672         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11673
11674         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11675
11676         /* return value of 1=2.5GHz 2=5GHz */
11677         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11678 }
11679
11680 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11681 {
11682         const struct firmware *firmware = bp->firmware;
11683         struct bnx2x_fw_file_hdr *fw_hdr;
11684         struct bnx2x_fw_file_section *sections;
11685         u32 offset, len, num_ops;
11686         u16 *ops_offsets;
11687         int i;
11688         const u8 *fw_ver;
11689
11690         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11691                 return -EINVAL;
11692
11693         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11694         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11695
11696         /* Make sure none of the offsets and sizes make us read beyond
11697          * the end of the firmware data */
11698         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11699                 offset = be32_to_cpu(sections[i].offset);
11700                 len = be32_to_cpu(sections[i].len);
11701                 if (offset + len > firmware->size) {
11702                         printk(KERN_ERR PFX "Section %d length is out of "
11703                                             "bounds\n", i);
11704                         return -EINVAL;
11705                 }
11706         }
11707
11708         /* Likewise for the init_ops offsets */
11709         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11710         ops_offsets = (u16 *)(firmware->data + offset);
11711         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11712
11713         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11714                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11715                         printk(KERN_ERR PFX "Section offset %d is out of "
11716                                             "bounds\n", i);
11717                         return -EINVAL;
11718                 }
11719         }
11720
11721         /* Check FW version */
11722         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11723         fw_ver = firmware->data + offset;
11724         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11725             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11726             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11727             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11728                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11729                                     " Should be %d.%d.%d.%d\n",
11730                        fw_ver[0], fw_ver[1], fw_ver[2],
11731                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11732                        BCM_5710_FW_MINOR_VERSION,
11733                        BCM_5710_FW_REVISION_VERSION,
11734                        BCM_5710_FW_ENGINEERING_VERSION);
11735                 return -EINVAL;
11736         }
11737
11738         return 0;
11739 }
11740
11741 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11742 {
11743         u32 i;
11744         const __be32 *source = (const __be32*)_source;
11745         u32 *target = (u32*)_target;
11746
11747         for (i = 0; i < n/4; i++)
11748                 target[i] = be32_to_cpu(source[i]);
11749 }
11750
11751 /*
11752    Ops array is stored in the following format:
11753    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11754  */
11755 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11756 {
11757         u32 i, j, tmp;
11758         const __be32 *source = (const __be32*)_source;
11759         struct raw_op *target = (struct raw_op*)_target;
11760
11761         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11762                 tmp = be32_to_cpu(source[j]);
11763                 target[i].op = (tmp >> 24) & 0xff;
11764                 target[i].offset =  tmp & 0xffffff;
11765                 target[i].raw_data = be32_to_cpu(source[j+1]);
11766         }
11767 }
11768 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11769 {
11770         u32 i;
11771         u16 *target = (u16*)_target;
11772         const __be16 *source = (const __be16*)_source;
11773
11774         for (i = 0; i < n/2; i++)
11775                 target[i] = be16_to_cpu(source[i]);
11776 }
11777
11778 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11779         do {   \
11780                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11781                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11782                 if (!bp->arr) { \
11783                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11784                         goto lbl; \
11785                 } \
11786                 func(bp->firmware->data + \
11787                         be32_to_cpu(fw_hdr->arr.offset), \
11788                         (u8*)bp->arr, len); \
11789         } while (0)
11790
11791
11792 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11793 {
11794         char fw_file_name[40] = {0};
11795         int rc, offset;
11796         struct bnx2x_fw_file_hdr *fw_hdr;
11797
11798         /* Create a FW file name */
11799         if (CHIP_IS_E1(bp))
11800                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11801         else
11802                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11803
11804         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11805                 BCM_5710_FW_MAJOR_VERSION,
11806                 BCM_5710_FW_MINOR_VERSION,
11807                 BCM_5710_FW_REVISION_VERSION,
11808                 BCM_5710_FW_ENGINEERING_VERSION);
11809
11810         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11811
11812         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11813         if (rc) {
11814                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11815                 goto request_firmware_exit;
11816         }
11817
11818         rc = bnx2x_check_firmware(bp);
11819         if (rc) {
11820                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11821                 goto request_firmware_exit;
11822         }
11823
11824         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11825
11826         /* Initialize the pointers to the init arrays */
11827         /* Blob */
11828         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11829
11830         /* Opcodes */
11831         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11832
11833         /* Offsets */
11834         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11835
11836         /* STORMs firmware */
11837         bp->tsem_int_table_data = bp->firmware->data +
11838                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11839         bp->tsem_pram_data      = bp->firmware->data +
11840                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11841         bp->usem_int_table_data = bp->firmware->data +
11842                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11843         bp->usem_pram_data      = bp->firmware->data +
11844                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11845         bp->xsem_int_table_data = bp->firmware->data +
11846                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11847         bp->xsem_pram_data      = bp->firmware->data +
11848                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11849         bp->csem_int_table_data = bp->firmware->data +
11850                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11851         bp->csem_pram_data      = bp->firmware->data +
11852                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11853
11854         return 0;
11855 init_offsets_alloc_err:
11856         kfree(bp->init_ops);
11857 init_ops_alloc_err:
11858         kfree(bp->init_data);
11859 request_firmware_exit:
11860         release_firmware(bp->firmware);
11861
11862         return rc;
11863 }
11864
11865
11866
11867 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11868                                     const struct pci_device_id *ent)
11869 {
11870         struct net_device *dev = NULL;
11871         struct bnx2x *bp;
11872         int pcie_width, pcie_speed;
11873         int rc;
11874
11875         /* dev zeroed in init_etherdev */
11876         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11877         if (!dev) {
11878                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11879                 return -ENOMEM;
11880         }
11881
11882         bp = netdev_priv(dev);
11883         bp->msglevel = debug;
11884
11885         pci_set_drvdata(pdev, dev);
11886
11887         rc = bnx2x_init_dev(pdev, dev);
11888         if (rc < 0) {
11889                 free_netdev(dev);
11890                 return rc;
11891         }
11892
11893         rc = bnx2x_init_bp(bp);
11894         if (rc)
11895                 goto init_one_exit;
11896
11897         /* Set init arrays */
11898         rc = bnx2x_init_firmware(bp, &pdev->dev);
11899         if (rc) {
11900                 printk(KERN_ERR PFX "Error loading firmware\n");
11901                 goto init_one_exit;
11902         }
11903
11904         rc = register_netdev(dev);
11905         if (rc) {
11906                 dev_err(&pdev->dev, "Cannot register net device\n");
11907                 goto init_one_exit;
11908         }
11909
11910         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11911         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11912                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11913                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11914                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11915                dev->base_addr, bp->pdev->irq);
11916         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11917
11918         return 0;
11919
11920 init_one_exit:
11921         if (bp->regview)
11922                 iounmap(bp->regview);
11923
11924         if (bp->doorbells)
11925                 iounmap(bp->doorbells);
11926
11927         free_netdev(dev);
11928
11929         if (atomic_read(&pdev->enable_cnt) == 1)
11930                 pci_release_regions(pdev);
11931
11932         pci_disable_device(pdev);
11933         pci_set_drvdata(pdev, NULL);
11934
11935         return rc;
11936 }
11937
11938 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11939 {
11940         struct net_device *dev = pci_get_drvdata(pdev);
11941         struct bnx2x *bp;
11942
11943         if (!dev) {
11944                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11945                 return;
11946         }
11947         bp = netdev_priv(dev);
11948
11949         unregister_netdev(dev);
11950
11951         kfree(bp->init_ops_offsets);
11952         kfree(bp->init_ops);
11953         kfree(bp->init_data);
11954         release_firmware(bp->firmware);
11955
11956         if (bp->regview)
11957                 iounmap(bp->regview);
11958
11959         if (bp->doorbells)
11960                 iounmap(bp->doorbells);
11961
11962         free_netdev(dev);
11963
11964         if (atomic_read(&pdev->enable_cnt) == 1)
11965                 pci_release_regions(pdev);
11966
11967         pci_disable_device(pdev);
11968         pci_set_drvdata(pdev, NULL);
11969 }
11970
11971 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11972 {
11973         struct net_device *dev = pci_get_drvdata(pdev);
11974         struct bnx2x *bp;
11975
11976         if (!dev) {
11977                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11978                 return -ENODEV;
11979         }
11980         bp = netdev_priv(dev);
11981
11982         rtnl_lock();
11983
11984         pci_save_state(pdev);
11985
11986         if (!netif_running(dev)) {
11987                 rtnl_unlock();
11988                 return 0;
11989         }
11990
11991         netif_device_detach(dev);
11992
11993         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11994
11995         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11996
11997         rtnl_unlock();
11998
11999         return 0;
12000 }
12001
12002 static int bnx2x_resume(struct pci_dev *pdev)
12003 {
12004         struct net_device *dev = pci_get_drvdata(pdev);
12005         struct bnx2x *bp;
12006         int rc;
12007
12008         if (!dev) {
12009                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12010                 return -ENODEV;
12011         }
12012         bp = netdev_priv(dev);
12013
12014         rtnl_lock();
12015
12016         pci_restore_state(pdev);
12017
12018         if (!netif_running(dev)) {
12019                 rtnl_unlock();
12020                 return 0;
12021         }
12022
12023         bnx2x_set_power_state(bp, PCI_D0);
12024         netif_device_attach(dev);
12025
12026         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12027
12028         rtnl_unlock();
12029
12030         return rc;
12031 }
12032
12033 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12034 {
12035         int i;
12036
12037         bp->state = BNX2X_STATE_ERROR;
12038
12039         bp->rx_mode = BNX2X_RX_MODE_NONE;
12040
12041         bnx2x_netif_stop(bp, 0);
12042
12043         del_timer_sync(&bp->timer);
12044         bp->stats_state = STATS_STATE_DISABLED;
12045         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12046
12047         /* Release IRQs */
12048         bnx2x_free_irq(bp);
12049
12050         if (CHIP_IS_E1(bp)) {
12051                 struct mac_configuration_cmd *config =
12052                                                 bnx2x_sp(bp, mcast_config);
12053
12054                 for (i = 0; i < config->hdr.length; i++)
12055                         CAM_INVALIDATE(config->config_table[i]);
12056         }
12057
12058         /* Free SKBs, SGEs, TPA pool and driver internals */
12059         bnx2x_free_skbs(bp);
12060         for_each_rx_queue(bp, i)
12061                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12062         for_each_rx_queue(bp, i)
12063                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12064         bnx2x_free_mem(bp);
12065
12066         bp->state = BNX2X_STATE_CLOSED;
12067
12068         netif_carrier_off(bp->dev);
12069
12070         return 0;
12071 }
12072
12073 static void bnx2x_eeh_recover(struct bnx2x *bp)
12074 {
12075         u32 val;
12076
12077         mutex_init(&bp->port.phy_mutex);
12078
12079         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12080         bp->link_params.shmem_base = bp->common.shmem_base;
12081         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12082
12083         if (!bp->common.shmem_base ||
12084             (bp->common.shmem_base < 0xA0000) ||
12085             (bp->common.shmem_base >= 0xC0000)) {
12086                 BNX2X_DEV_INFO("MCP not active\n");
12087                 bp->flags |= NO_MCP_FLAG;
12088                 return;
12089         }
12090
12091         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12092         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12093                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12094                 BNX2X_ERR("BAD MCP validity signature\n");
12095
12096         if (!BP_NOMCP(bp)) {
12097                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12098                               & DRV_MSG_SEQ_NUMBER_MASK);
12099                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12100         }
12101 }
12102
12103 /**
12104  * bnx2x_io_error_detected - called when PCI error is detected
12105  * @pdev: Pointer to PCI device
12106  * @state: The current pci connection state
12107  *
12108  * This function is called after a PCI bus error affecting
12109  * this device has been detected.
12110  */
12111 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12112                                                 pci_channel_state_t state)
12113 {
12114         struct net_device *dev = pci_get_drvdata(pdev);
12115         struct bnx2x *bp = netdev_priv(dev);
12116
12117         rtnl_lock();
12118
12119         netif_device_detach(dev);
12120
12121         if (state == pci_channel_io_perm_failure) {
12122                 rtnl_unlock();
12123                 return PCI_ERS_RESULT_DISCONNECT;
12124         }
12125
12126         if (netif_running(dev))
12127                 bnx2x_eeh_nic_unload(bp);
12128
12129         pci_disable_device(pdev);
12130
12131         rtnl_unlock();
12132
12133         /* Request a slot reset */
12134         return PCI_ERS_RESULT_NEED_RESET;
12135 }
12136
12137 /**
12138  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12139  * @pdev: Pointer to PCI device
12140  *
12141  * Restart the card from scratch, as if from a cold-boot.
12142  */
12143 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12144 {
12145         struct net_device *dev = pci_get_drvdata(pdev);
12146         struct bnx2x *bp = netdev_priv(dev);
12147
12148         rtnl_lock();
12149
12150         if (pci_enable_device(pdev)) {
12151                 dev_err(&pdev->dev,
12152                         "Cannot re-enable PCI device after reset\n");
12153                 rtnl_unlock();
12154                 return PCI_ERS_RESULT_DISCONNECT;
12155         }
12156
12157         pci_set_master(pdev);
12158         pci_restore_state(pdev);
12159
12160         if (netif_running(dev))
12161                 bnx2x_set_power_state(bp, PCI_D0);
12162
12163         rtnl_unlock();
12164
12165         return PCI_ERS_RESULT_RECOVERED;
12166 }
12167
12168 /**
12169  * bnx2x_io_resume - called when traffic can start flowing again
12170  * @pdev: Pointer to PCI device
12171  *
12172  * This callback is called when the error recovery driver tells us that
12173  * its OK to resume normal operation.
12174  */
12175 static void bnx2x_io_resume(struct pci_dev *pdev)
12176 {
12177         struct net_device *dev = pci_get_drvdata(pdev);
12178         struct bnx2x *bp = netdev_priv(dev);
12179
12180         rtnl_lock();
12181
12182         bnx2x_eeh_recover(bp);
12183
12184         if (netif_running(dev))
12185                 bnx2x_nic_load(bp, LOAD_NORMAL);
12186
12187         netif_device_attach(dev);
12188
12189         rtnl_unlock();
12190 }
12191
12192 static struct pci_error_handlers bnx2x_err_handler = {
12193         .error_detected = bnx2x_io_error_detected,
12194         .slot_reset     = bnx2x_io_slot_reset,
12195         .resume         = bnx2x_io_resume,
12196 };
12197
12198 static struct pci_driver bnx2x_pci_driver = {
12199         .name        = DRV_MODULE_NAME,
12200         .id_table    = bnx2x_pci_tbl,
12201         .probe       = bnx2x_init_one,
12202         .remove      = __devexit_p(bnx2x_remove_one),
12203         .suspend     = bnx2x_suspend,
12204         .resume      = bnx2x_resume,
12205         .err_handler = &bnx2x_err_handler,
12206 };
12207
12208 static int __init bnx2x_init(void)
12209 {
12210         int ret;
12211
12212         printk(KERN_INFO "%s", version);
12213
12214         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12215         if (bnx2x_wq == NULL) {
12216                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12217                 return -ENOMEM;
12218         }
12219
12220         ret = pci_register_driver(&bnx2x_pci_driver);
12221         if (ret) {
12222                 printk(KERN_ERR PFX "Cannot register driver\n");
12223                 destroy_workqueue(bnx2x_wq);
12224         }
12225         return ret;
12226 }
12227
12228 static void __exit bnx2x_cleanup(void)
12229 {
12230         pci_unregister_driver(&bnx2x_pci_driver);
12231
12232         destroy_workqueue(bnx2x_wq);
12233 }
12234
12235 module_init(bnx2x_init);
12236 module_exit(bnx2x_cleanup);
12237
12238