bnx2x: Reporting host statistics to management FW
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int poll;
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
107
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
112 static int debug;
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
117
118 static struct workqueue_struct *bnx2x_wq;
119
120 enum bnx2x_board_type {
121         BCM57710 = 0,
122         BCM57711 = 1,
123         BCM57711E = 2,
124 };
125
126 /* indexed by board_type, above */
127 static struct {
128         char *name;
129 } board_info[] __devinitdata = {
130         { "Broadcom NetXtreme II BCM57710 XGb" },
131         { "Broadcom NetXtreme II BCM57711 XGb" },
132         { "Broadcom NetXtreme II BCM57711E XGb" }
133 };
134
135
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 static const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184                             int idx)
185 {
186         u32 cmd_offset;
187         int i;
188
189         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
193                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195         }
196         REG_WR(bp, dmae_reg_go_c[idx], 1);
197 }
198
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200                       u32 len32)
201 {
202         struct dmae_command *dmae = &bp->init_dmae;
203         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
204         int cnt = 200;
205
206         if (!bp->dmae_ready) {
207                 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
210                    "  using indirect\n", dst_addr, len32);
211                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212                 return;
213         }
214
215         mutex_lock(&bp->dmae_mutex);
216
217         memset(dmae, 0, sizeof(struct dmae_command));
218
219         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 #ifdef __BIG_ENDIAN
223                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 #else
225                         DMAE_CMD_ENDIANITY_DW_SWAP |
226 #endif
227                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229         dmae->src_addr_lo = U64_LO(dma_addr);
230         dmae->src_addr_hi = U64_HI(dma_addr);
231         dmae->dst_addr_lo = dst_addr >> 2;
232         dmae->dst_addr_hi = 0;
233         dmae->len = len32;
234         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236         dmae->comp_val = DMAE_COMP_VAL;
237
238         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
240                     "dst_addr [%x:%08x (%08x)]\n"
241            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
242            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248
249         *wb_comp = 0;
250
251         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
252
253         udelay(5);
254
255         while (*wb_comp != DMAE_COMP_VAL) {
256                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
258                 if (!cnt) {
259                         BNX2X_ERR("DMAE timeout!\n");
260                         break;
261                 }
262                 cnt--;
263                 /* adjust delay for emulation/FPGA */
264                 if (CHIP_REV_IS_SLOW(bp))
265                         msleep(100);
266                 else
267                         udelay(5);
268         }
269
270         mutex_unlock(&bp->dmae_mutex);
271 }
272
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 {
275         struct dmae_command *dmae = &bp->init_dmae;
276         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
277         int cnt = 200;
278
279         if (!bp->dmae_ready) {
280                 u32 *data = bnx2x_sp(bp, wb_data[0]);
281                 int i;
282
283                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
284                    "  using indirect\n", src_addr, len32);
285                 for (i = 0; i < len32; i++)
286                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287                 return;
288         }
289
290         mutex_lock(&bp->dmae_mutex);
291
292         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293         memset(dmae, 0, sizeof(struct dmae_command));
294
295         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298 #ifdef __BIG_ENDIAN
299                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
300 #else
301                         DMAE_CMD_ENDIANITY_DW_SWAP |
302 #endif
303                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305         dmae->src_addr_lo = src_addr >> 2;
306         dmae->src_addr_hi = 0;
307         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309         dmae->len = len32;
310         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312         dmae->comp_val = DMAE_COMP_VAL;
313
314         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
316                     "dst_addr [%x:%08x (%08x)]\n"
317            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
318            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
321
322         *wb_comp = 0;
323
324         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
325
326         udelay(5);
327
328         while (*wb_comp != DMAE_COMP_VAL) {
329
330                 if (!cnt) {
331                         BNX2X_ERR("DMAE timeout!\n");
332                         break;
333                 }
334                 cnt--;
335                 /* adjust delay for emulation/FPGA */
336                 if (CHIP_REV_IS_SLOW(bp))
337                         msleep(100);
338                 else
339                         udelay(5);
340         }
341         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344
345         mutex_unlock(&bp->dmae_mutex);
346 }
347
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350 {
351         u32 wb_write[2];
352
353         wb_write[0] = val_hi;
354         wb_write[1] = val_lo;
355         REG_WR_DMAE(bp, reg, wb_write, 2);
356 }
357
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360 {
361         u32 wb_data[2];
362
363         REG_RD_DMAE(bp, reg, wb_data, 2);
364
365         return HILO_U64(wb_data[0], wb_data[1]);
366 }
367 #endif
368
369 static int bnx2x_mc_assert(struct bnx2x *bp)
370 {
371         char last_idx;
372         int i, rc = 0;
373         u32 row0, row1, row2, row3;
374
375         /* XSTORM */
376         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
378         if (last_idx)
379                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381         /* print the asserts */
382         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385                               XSTORM_ASSERT_LIST_OFFSET(i));
386                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395                                   " 0x%08x 0x%08x 0x%08x\n",
396                                   i, row3, row2, row1, row0);
397                         rc++;
398                 } else {
399                         break;
400                 }
401         }
402
403         /* TSTORM */
404         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
406         if (last_idx)
407                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409         /* print the asserts */
410         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413                               TSTORM_ASSERT_LIST_OFFSET(i));
414                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423                                   " 0x%08x 0x%08x 0x%08x\n",
424                                   i, row3, row2, row1, row0);
425                         rc++;
426                 } else {
427                         break;
428                 }
429         }
430
431         /* CSTORM */
432         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
434         if (last_idx)
435                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437         /* print the asserts */
438         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441                               CSTORM_ASSERT_LIST_OFFSET(i));
442                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451                                   " 0x%08x 0x%08x 0x%08x\n",
452                                   i, row3, row2, row1, row0);
453                         rc++;
454                 } else {
455                         break;
456                 }
457         }
458
459         /* USTORM */
460         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461                            USTORM_ASSERT_LIST_INDEX_OFFSET);
462         if (last_idx)
463                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465         /* print the asserts */
466         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469                               USTORM_ASSERT_LIST_OFFSET(i));
470                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
472                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
474                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479                                   " 0x%08x 0x%08x 0x%08x\n",
480                                   i, row3, row2, row1, row0);
481                         rc++;
482                 } else {
483                         break;
484                 }
485         }
486
487         return rc;
488 }
489
490 static void bnx2x_fw_dump(struct bnx2x *bp)
491 {
492         u32 mark, offset;
493         __be32 data[9];
494         int word;
495
496         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497         mark = ((mark + 0x3) & ~0x3);
498         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
499
500         printk(KERN_ERR PFX);
501         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502                 for (word = 0; word < 8; word++)
503                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504                                                   offset + 4*word));
505                 data[8] = 0x0;
506                 printk(KERN_CONT "%s", (char *)data);
507         }
508         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509                 for (word = 0; word < 8; word++)
510                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511                                                   offset + 4*word));
512                 data[8] = 0x0;
513                 printk(KERN_CONT "%s", (char *)data);
514         }
515         printk(KERN_ERR PFX "end of fw dump\n");
516 }
517
518 static void bnx2x_panic_dump(struct bnx2x *bp)
519 {
520         int i;
521         u16 j, start, end;
522
523         bp->stats_state = STATS_STATE_DISABLED;
524         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
526         BNX2X_ERR("begin crash dump -----------------\n");
527
528         /* Indices */
529         /* Common */
530         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
531                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
532                   "  spq_prod_idx(%u)\n",
533                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536         /* Rx */
537         for_each_rx_queue(bp, i) {
538                 struct bnx2x_fastpath *fp = &bp->fp[i];
539
540                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
541                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
542                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
543                           i, fp->rx_bd_prod, fp->rx_bd_cons,
544                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
547                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
548                           fp->rx_sge_prod, fp->last_max_sge,
549                           le16_to_cpu(fp->fp_u_idx),
550                           fp->status_blk->u_status_block.status_block_index);
551         }
552
553         /* Tx */
554         for_each_tx_queue(bp, i) {
555                 struct bnx2x_fastpath *fp = &bp->fp[i];
556
557                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
558                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
559                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
562                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563                           fp->status_blk->c_status_block.status_block_index,
564                           fp->tx_db.data.prod);
565         }
566
567         /* Rings */
568         /* Rx */
569         for_each_rx_queue(bp, i) {
570                 struct bnx2x_fastpath *fp = &bp->fp[i];
571
572                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574                 for (j = start; j != end; j = RX_BD(j + 1)) {
575                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
579                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
580                 }
581
582                 start = RX_SGE(fp->rx_sge_prod);
583                 end = RX_SGE(fp->last_max_sge);
584                 for (j = start; j != end; j = RX_SGE(j + 1)) {
585                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
588                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
589                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
590                 }
591
592                 start = RCQ_BD(fp->rx_comp_cons - 10);
593                 end = RCQ_BD(fp->rx_comp_cons + 503);
594                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
597                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
599                 }
600         }
601
602         /* Tx */
603         for_each_tx_queue(bp, i) {
604                 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608                 for (j = start; j != end; j = TX_BD(j + 1)) {
609                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
611                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612                                   i, j, sw_bd->skb, sw_bd->first_bd);
613                 }
614
615                 start = TX_BD(fp->tx_bd_cons - 10);
616                 end = TX_BD(fp->tx_bd_cons + 254);
617                 for (j = start; j != end; j = TX_BD(j + 1)) {
618                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
620                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
622                 }
623         }
624
625         bnx2x_fw_dump(bp);
626         bnx2x_mc_assert(bp);
627         BNX2X_ERR("end crash dump -----------------\n");
628 }
629
630 static void bnx2x_int_enable(struct bnx2x *bp)
631 {
632         int port = BP_PORT(bp);
633         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634         u32 val = REG_RD(bp, addr);
635         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
637
638         if (msix) {
639                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                          HC_CONFIG_0_REG_INT_LINE_EN_0);
641                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643         } else if (msi) {
644                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648         } else {
649                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
652                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653
654                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655                    val, port, addr);
656
657                 REG_WR(bp, addr, val);
658
659                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660         }
661
662         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
663            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
664
665         REG_WR(bp, addr, val);
666         /*
667          * Ensure that HC_CONFIG is written before leading/trailing edge config
668          */
669         mmiowb();
670         barrier();
671
672         if (CHIP_IS_E1H(bp)) {
673                 /* init leading/trailing edge */
674                 if (IS_E1HMF(bp)) {
675                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
676                         if (bp->port.pmf)
677                                 /* enable nig and gpio3 attention */
678                                 val |= 0x1100;
679                 } else
680                         val = 0xffff;
681
682                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684         }
685
686         /* Make sure that interrupts are indeed enabled from here on */
687         mmiowb();
688 }
689
690 static void bnx2x_int_disable(struct bnx2x *bp)
691 {
692         int port = BP_PORT(bp);
693         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694         u32 val = REG_RD(bp, addr);
695
696         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
699                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702            val, port, addr);
703
704         /* flush all outstanding writes */
705         mmiowb();
706
707         REG_WR(bp, addr, val);
708         if (REG_RD(bp, addr) != val)
709                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
710
711 }
712
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
714 {
715         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
716         int i, offset;
717
718         /* disable interrupt handling */
719         atomic_inc(&bp->intr_sem);
720         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
722         if (disable_hw)
723                 /* prevent the HW from sending interrupts */
724                 bnx2x_int_disable(bp);
725
726         /* make sure all ISRs are done */
727         if (msix) {
728                 synchronize_irq(bp->msix_table[0].vector);
729                 offset = 1;
730                 for_each_queue(bp, i)
731                         synchronize_irq(bp->msix_table[i + offset].vector);
732         } else
733                 synchronize_irq(bp->pdev->irq);
734
735         /* make sure sp_task is not running */
736         cancel_delayed_work(&bp->sp_task);
737         flush_workqueue(bnx2x_wq);
738 }
739
740 /* fast path */
741
742 /*
743  * General service functions
744  */
745
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747                                 u8 storm, u16 index, u8 op, u8 update)
748 {
749         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750                        COMMAND_REG_INT_ACK);
751         struct igu_ack_register igu_ack;
752
753         igu_ack.status_block_index = index;
754         igu_ack.sb_id_and_flags =
755                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
760         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761            (*(u32 *)&igu_ack), hc_addr);
762         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
763
764         /* Make sure that ACK is written */
765         mmiowb();
766         barrier();
767 }
768
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770 {
771         struct host_status_block *fpsb = fp->status_blk;
772         u16 rc = 0;
773
774         barrier(); /* status block is written to by the chip */
775         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777                 rc |= 1;
778         }
779         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781                 rc |= 2;
782         }
783         return rc;
784 }
785
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
787 {
788         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789                        COMMAND_REG_SIMD_MASK);
790         u32 result = REG_RD(bp, hc_addr);
791
792         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793            result, hc_addr);
794
795         return result;
796 }
797
798
799 /*
800  * fast path service functions
801  */
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805         /* Tell compiler that consumer and producer can change */
806         barrier();
807         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811  * return idx of last bd freed
812  */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814                              u16 idx)
815 {
816         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817         struct eth_tx_start_bd *tx_start_bd;
818         struct eth_tx_bd *tx_data_bd;
819         struct sk_buff *skb = tx_buf->skb;
820         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821         int nbd;
822
823         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
824            idx, tx_buf, skb);
825
826         /* unmap first bd */
827         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
831
832         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835                 BNX2X_ERR("BAD nbd!\n");
836                 bnx2x_panic();
837         }
838 #endif
839         new_cons = nbd + tx_buf->first_bd;
840
841         /* Get the next bd */
842         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         /* Skip a parse bd... */
845         --nbd;
846         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848         /* ...and the TSO split header bd since they have no mapping */
849         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850                 --nbd;
851                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* now free frags */
855         while (nbd > 0) {
856
857                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
861                 if (--nbd)
862                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863         }
864
865         /* release skb */
866         WARN_ON(!skb);
867         dev_kfree_skb_any(skb);
868         tx_buf->first_bd = 0;
869         tx_buf->skb = NULL;
870
871         return new_cons;
872 }
873
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
875 {
876         s16 used;
877         u16 prod;
878         u16 cons;
879
880         barrier(); /* Tell compiler that prod and cons can change */
881         prod = fp->tx_bd_prod;
882         cons = fp->tx_bd_cons;
883
884         /* NUM_TX_RINGS = number of "next-page" entries
885            It will be used as a threshold */
886         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
887
888 #ifdef BNX2X_STOP_ON_ERROR
889         WARN_ON(used < 0);
890         WARN_ON(used > fp->bp->tx_ring_size);
891         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
892 #endif
893
894         return (s16)(fp->bp->tx_ring_size) - used;
895 }
896
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
898 {
899         struct bnx2x *bp = fp->bp;
900         struct netdev_queue *txq;
901         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902         int done = 0;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         if (unlikely(bp->panic))
906                 return;
907 #endif
908
909         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911         sw_cons = fp->tx_pkt_cons;
912
913         while (sw_cons != hw_cons) {
914                 u16 pkt_cons;
915
916                 pkt_cons = TX_BD(sw_cons);
917
918                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
920                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
921                    hw_cons, sw_cons, pkt_cons);
922
923 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
924                         rmb();
925                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926                 }
927 */
928                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929                 sw_cons++;
930                 done++;
931         }
932
933         fp->tx_pkt_cons = sw_cons;
934         fp->tx_bd_cons = bd_cons;
935
936         /* TBD need a thresh? */
937         if (unlikely(netif_tx_queue_stopped(txq))) {
938
939                 /* Need to make the tx_bd_cons update visible to start_xmit()
940                  * before checking for netif_tx_queue_stopped().  Without the
941                  * memory barrier, there is a small possibility that
942                  * start_xmit() will miss it and cause the queue to be stopped
943                  * forever.
944                  */
945                 smp_mb();
946
947                 if ((netif_tx_queue_stopped(txq)) &&
948                     (bp->state == BNX2X_STATE_OPEN) &&
949                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950                         netif_tx_wake_queue(txq);
951         }
952 }
953
954
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956                            union eth_rx_cqe *rr_cqe)
957 {
958         struct bnx2x *bp = fp->bp;
959         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
962         DP(BNX2X_MSG_SP,
963            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
964            fp->index, cid, command, bp->state,
965            rr_cqe->ramrod_cqe.ramrod_type);
966
967         bp->spq_left++;
968
969         if (fp->index) {
970                 switch (command | fp->state) {
971                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972                                                 BNX2X_FP_STATE_OPENING):
973                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974                            cid);
975                         fp->state = BNX2X_FP_STATE_OPEN;
976                         break;
977
978                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980                            cid);
981                         fp->state = BNX2X_FP_STATE_HALTED;
982                         break;
983
984                 default:
985                         BNX2X_ERR("unexpected MC reply (%d)  "
986                                   "fp->state is %x\n", command, fp->state);
987                         break;
988                 }
989                 mb(); /* force bnx2x_wait_ramrod() to see the change */
990                 return;
991         }
992
993         switch (command | bp->state) {
994         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996                 bp->state = BNX2X_STATE_OPEN;
997                 break;
998
999         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002                 fp->state = BNX2X_FP_STATE_HALTED;
1003                 break;
1004
1005         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1008                 break;
1009
1010
1011         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014                 bp->set_mac_pending = 0;
1015                 break;
1016
1017         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1020                 break;
1021
1022         default:
1023                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1024                           command, bp->state);
1025                 break;
1026         }
1027         mb(); /* force bnx2x_wait_ramrod() to see the change */
1028 }
1029
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034         struct page *page = sw_buf->page;
1035         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037         /* Skip "next page" elements */
1038         if (!page)
1039                 return;
1040
1041         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043         __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045         sw_buf->page = NULL;
1046         sge->addr_hi = 0;
1047         sge->addr_lo = 0;
1048 }
1049
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051                                            struct bnx2x_fastpath *fp, int last)
1052 {
1053         int i;
1054
1055         for (i = 0; i < last; i++)
1056                 bnx2x_free_rx_sge(bp, fp, i);
1057 }
1058
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065         dma_addr_t mapping;
1066
1067         if (unlikely(page == NULL))
1068                 return -ENOMEM;
1069
1070         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071                                PCI_DMA_FROMDEVICE);
1072         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074                 return -ENOMEM;
1075         }
1076
1077         sw_buf->page = page;
1078         pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083         return 0;
1084 }
1085
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087                                      struct bnx2x_fastpath *fp, u16 index)
1088 {
1089         struct sk_buff *skb;
1090         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092         dma_addr_t mapping;
1093
1094         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095         if (unlikely(skb == NULL))
1096                 return -ENOMEM;
1097
1098         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099                                  PCI_DMA_FROMDEVICE);
1100         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1101                 dev_kfree_skb(skb);
1102                 return -ENOMEM;
1103         }
1104
1105         rx_buf->skb = skb;
1106         pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111         return 0;
1112 }
1113
1114 /* note that we are not allocating a new skb,
1115  * we are just moving one from cons to prod
1116  * we are not creating a new mapping,
1117  * so there is no need to check for dma_mapping_error().
1118  */
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120                                struct sk_buff *skb, u16 cons, u16 prod)
1121 {
1122         struct bnx2x *bp = fp->bp;
1123         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128         pci_dma_sync_single_for_device(bp->pdev,
1129                                        pci_unmap_addr(cons_rx_buf, mapping),
1130                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1131
1132         prod_rx_buf->skb = cons_rx_buf->skb;
1133         pci_unmap_addr_set(prod_rx_buf, mapping,
1134                            pci_unmap_addr(cons_rx_buf, mapping));
1135         *prod_bd = *cons_bd;
1136 }
1137
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139                                              u16 idx)
1140 {
1141         u16 last_max = fp->last_max_sge;
1142
1143         if (SUB_S16(idx, last_max) > 0)
1144                 fp->last_max_sge = idx;
1145 }
1146
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148 {
1149         int i, j;
1150
1151         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152                 int idx = RX_SGE_CNT * i - 1;
1153
1154                 for (j = 0; j < 2; j++) {
1155                         SGE_MASK_CLEAR_BIT(fp, idx);
1156                         idx--;
1157                 }
1158         }
1159 }
1160
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162                                   struct eth_fast_path_rx_cqe *fp_cqe)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1167                       SGE_PAGE_SHIFT;
1168         u16 last_max, last_elem, first_elem;
1169         u16 delta = 0;
1170         u16 i;
1171
1172         if (!sge_len)
1173                 return;
1174
1175         /* First mark all used pages */
1176         for (i = 0; i < sge_len; i++)
1177                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182         /* Here we assume that the last SGE index is the biggest */
1183         prefetch((void *)(fp->sge_mask));
1184         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186         last_max = RX_SGE(fp->last_max_sge);
1187         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190         /* If ring is not full */
1191         if (last_elem + 1 != first_elem)
1192                 last_elem++;
1193
1194         /* Now update the prod */
1195         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196                 if (likely(fp->sge_mask[i]))
1197                         break;
1198
1199                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200                 delta += RX_SGE_MASK_ELEM_SZ;
1201         }
1202
1203         if (delta > 0) {
1204                 fp->rx_sge_prod += delta;
1205                 /* clear page-end entries */
1206                 bnx2x_clear_sge_mask_next_elems(fp);
1207         }
1208
1209         DP(NETIF_MSG_RX_STATUS,
1210            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1211            fp->last_max_sge, fp->rx_sge_prod);
1212 }
1213
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215 {
1216         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217         memset(fp->sge_mask, 0xff,
1218                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
1220         /* Clear the two last indices in the page to 1:
1221            these are the indices that correspond to the "next" element,
1222            hence will never be indicated and should be removed from
1223            the calculations. */
1224         bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228                             struct sk_buff *skb, u16 cons, u16 prod)
1229 {
1230         struct bnx2x *bp = fp->bp;
1231         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234         dma_addr_t mapping;
1235
1236         /* move empty skb from pool to prod and map it */
1237         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242         /* move partial skb from cons to pool (don't unmap yet) */
1243         fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245         /* mark bin state as start - print error if current state != stop */
1246         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249         fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251         /* point prod_bd to new skb */
1252         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255 #ifdef BNX2X_STOP_ON_ERROR
1256         fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259 #else
1260         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261 #endif
1262            fp->tpa_queue_used);
1263 #endif
1264 }
1265
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                                struct sk_buff *skb,
1268                                struct eth_fast_path_rx_cqe *fp_cqe,
1269                                u16 cqe_idx)
1270 {
1271         struct sw_rx_page *rx_pg, old_rx_pg;
1272         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273         u32 i, frag_len, frag_size, pages;
1274         int err;
1275         int j;
1276
1277         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1279
1280         /* This is needed in order to enable forwarding support */
1281         if (frag_size)
1282                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283                                                max(frag_size, (u32)len_on_bd));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         if (pages >
1287             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289                           pages, cqe_idx);
1290                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1291                           fp_cqe->pkt_len, len_on_bd);
1292                 bnx2x_panic();
1293                 return -EINVAL;
1294         }
1295 #endif
1296
1297         /* Run through the SGL and compose the fragmented skb */
1298         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301                 /* FW gives the indices of the SGE as if the ring is an array
1302                    (meaning that "next" element will consume 2 indices) */
1303                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304                 rx_pg = &fp->rx_page_ring[sge_idx];
1305                 old_rx_pg = *rx_pg;
1306
1307                 /* If we fail to allocate a substitute page, we simply stop
1308                    where we are and drop the whole packet */
1309                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310                 if (unlikely(err)) {
1311                         fp->eth_q_stats.rx_skb_alloc_failed++;
1312                         return err;
1313                 }
1314
1315                 /* Unmap the page as we r going to pass it to the stack */
1316                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1318
1319                 /* Add one frag and update the appropriate fields in the skb */
1320                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322                 skb->data_len += frag_len;
1323                 skb->truesize += frag_len;
1324                 skb->len += frag_len;
1325
1326                 frag_size -= frag_len;
1327         }
1328
1329         return 0;
1330 }
1331
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334                            u16 cqe_idx)
1335 {
1336         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337         struct sk_buff *skb = rx_buf->skb;
1338         /* alloc new skb */
1339         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341         /* Unmap skb in the pool anyway, as we are going to change
1342            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343            fails. */
1344         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1346
1347         if (likely(new_skb)) {
1348                 /* fix ip xsum and give it to the stack */
1349                 /* (no need to map the new skb) */
1350 #ifdef BCM_VLAN
1351                 int is_vlan_cqe =
1352                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353                          PARSING_FLAGS_VLAN);
1354                 int is_not_hwaccel_vlan_cqe =
1355                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356 #endif
1357
1358                 prefetch(skb);
1359                 prefetch(((char *)(skb)) + 128);
1360
1361 #ifdef BNX2X_STOP_ON_ERROR
1362                 if (pad + len > bp->rx_buf_size) {
1363                         BNX2X_ERR("skb_put is about to fail...  "
1364                                   "pad %d  len %d  rx_buf_size %d\n",
1365                                   pad, len, bp->rx_buf_size);
1366                         bnx2x_panic();
1367                         return;
1368                 }
1369 #endif
1370
1371                 skb_reserve(skb, pad);
1372                 skb_put(skb, len);
1373
1374                 skb->protocol = eth_type_trans(skb, bp->dev);
1375                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377                 {
1378                         struct iphdr *iph;
1379
1380                         iph = (struct iphdr *)skb->data;
1381 #ifdef BCM_VLAN
1382                         /* If there is no Rx VLAN offloading -
1383                            take VLAN tag into an account */
1384                         if (unlikely(is_not_hwaccel_vlan_cqe))
1385                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386 #endif
1387                         iph->check = 0;
1388                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389                 }
1390
1391                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392                                          &cqe->fast_path_cqe, cqe_idx)) {
1393 #ifdef BCM_VLAN
1394                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395                             (!is_not_hwaccel_vlan_cqe))
1396                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397                                                 le16_to_cpu(cqe->fast_path_cqe.
1398                                                             vlan_tag));
1399                         else
1400 #endif
1401                                 netif_receive_skb(skb);
1402                 } else {
1403                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404                            " - dropping packet!\n");
1405                         dev_kfree_skb(skb);
1406                 }
1407
1408
1409                 /* put new skb in bin */
1410                 fp->tpa_pool[queue].skb = new_skb;
1411
1412         } else {
1413                 /* else drop the packet and keep the buffer in the bin */
1414                 DP(NETIF_MSG_RX_STATUS,
1415                    "Failed to allocate new skb - dropping packet!\n");
1416                 fp->eth_q_stats.rx_skb_alloc_failed++;
1417         }
1418
1419         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420 }
1421
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423                                         struct bnx2x_fastpath *fp,
1424                                         u16 bd_prod, u16 rx_comp_prod,
1425                                         u16 rx_sge_prod)
1426 {
1427         struct ustorm_eth_rx_producers rx_prods = {0};
1428         int i;
1429
1430         /* Update producers */
1431         rx_prods.bd_prod = bd_prod;
1432         rx_prods.cqe_prod = rx_comp_prod;
1433         rx_prods.sge_prod = rx_sge_prod;
1434
1435         /*
1436          * Make sure that the BD and SGE data is updated before updating the
1437          * producers since FW might read the BD/SGE right after the producer
1438          * is updated.
1439          * This is only applicable for weak-ordered memory model archs such
1440          * as IA-64. The following barrier is also mandatory since FW will
1441          * assumes BDs must have buffers.
1442          */
1443         wmb();
1444
1445         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446                 REG_WR(bp, BAR_USTRORM_INTMEM +
1447                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448                        ((u32 *)&rx_prods)[i]);
1449
1450         mmiowb(); /* keep prod updates ordered */
1451
1452         DP(NETIF_MSG_RX_STATUS,
1453            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1454            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1455 }
1456
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458 {
1459         struct bnx2x *bp = fp->bp;
1460         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462         int rx_pkt = 0;
1463
1464 #ifdef BNX2X_STOP_ON_ERROR
1465         if (unlikely(bp->panic))
1466                 return 0;
1467 #endif
1468
1469         /* CQ "next element" is of the size of the regular element,
1470            that's why it's ok here */
1471         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473                 hw_comp_cons++;
1474
1475         bd_cons = fp->rx_bd_cons;
1476         bd_prod = fp->rx_bd_prod;
1477         bd_prod_fw = bd_prod;
1478         sw_comp_cons = fp->rx_comp_cons;
1479         sw_comp_prod = fp->rx_comp_prod;
1480
1481         /* Memory barrier necessary as speculative reads of the rx
1482          * buffer can be ahead of the index in the status block
1483          */
1484         rmb();
1485
1486         DP(NETIF_MSG_RX_STATUS,
1487            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1488            fp->index, hw_comp_cons, sw_comp_cons);
1489
1490         while (sw_comp_cons != hw_comp_cons) {
1491                 struct sw_rx_bd *rx_buf = NULL;
1492                 struct sk_buff *skb;
1493                 union eth_rx_cqe *cqe;
1494                 u8 cqe_fp_flags;
1495                 u16 len, pad;
1496
1497                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498                 bd_prod = RX_BD(bd_prod);
1499                 bd_cons = RX_BD(bd_cons);
1500
1501                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1503
1504                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1505                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1506                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1510
1511                 /* is this a slowpath msg? */
1512                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513                         bnx2x_sp_event(fp, cqe);
1514                         goto next_cqe;
1515
1516                 /* this is an rx packet */
1517                 } else {
1518                         rx_buf = &fp->rx_buf_ring[bd_cons];
1519                         skb = rx_buf->skb;
1520                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521                         pad = cqe->fast_path_cqe.placement_offset;
1522
1523                         /* If CQE is marked both TPA_START and TPA_END
1524                            it is a non-TPA CQE */
1525                         if ((!fp->disable_tpa) &&
1526                             (TPA_TYPE(cqe_fp_flags) !=
1527                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1528                                 u16 queue = cqe->fast_path_cqe.queue_index;
1529
1530                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531                                         DP(NETIF_MSG_RX_STATUS,
1532                                            "calling tpa_start on queue %d\n",
1533                                            queue);
1534
1535                                         bnx2x_tpa_start(fp, queue, skb,
1536                                                         bd_cons, bd_prod);
1537                                         goto next_rx;
1538                                 }
1539
1540                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541                                         DP(NETIF_MSG_RX_STATUS,
1542                                            "calling tpa_stop on queue %d\n",
1543                                            queue);
1544
1545                                         if (!BNX2X_RX_SUM_FIX(cqe))
1546                                                 BNX2X_ERR("STOP on none TCP "
1547                                                           "data\n");
1548
1549                                         /* This is a size of the linear data
1550                                            on this skb */
1551                                         len = le16_to_cpu(cqe->fast_path_cqe.
1552                                                                 len_on_bd);
1553                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1554                                                     len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1556                                         if (bp->panic)
1557                                                 return 0;
1558 #endif
1559
1560                                         bnx2x_update_sge_prod(fp,
1561                                                         &cqe->fast_path_cqe);
1562                                         goto next_cqe;
1563                                 }
1564                         }
1565
1566                         pci_dma_sync_single_for_device(bp->pdev,
1567                                         pci_unmap_addr(rx_buf, mapping),
1568                                                        pad + RX_COPY_THRESH,
1569                                                        PCI_DMA_FROMDEVICE);
1570                         prefetch(skb);
1571                         prefetch(((char *)(skb)) + 128);
1572
1573                         /* is this an error packet? */
1574                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575                                 DP(NETIF_MSG_RX_ERR,
1576                                    "ERROR  flags %x  rx packet %u\n",
1577                                    cqe_fp_flags, sw_comp_cons);
1578                                 fp->eth_q_stats.rx_err_discard_pkt++;
1579                                 goto reuse_rx;
1580                         }
1581
1582                         /* Since we don't have a jumbo ring
1583                          * copy small packets if mtu > 1500
1584                          */
1585                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586                             (len <= RX_COPY_THRESH)) {
1587                                 struct sk_buff *new_skb;
1588
1589                                 new_skb = netdev_alloc_skb(bp->dev,
1590                                                            len + pad);
1591                                 if (new_skb == NULL) {
1592                                         DP(NETIF_MSG_RX_ERR,
1593                                            "ERROR  packet dropped "
1594                                            "because of alloc failure\n");
1595                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1596                                         goto reuse_rx;
1597                                 }
1598
1599                                 /* aligned copy */
1600                                 skb_copy_from_linear_data_offset(skb, pad,
1601                                                     new_skb->data + pad, len);
1602                                 skb_reserve(new_skb, pad);
1603                                 skb_put(new_skb, len);
1604
1605                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607                                 skb = new_skb;
1608
1609                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610                                 pci_unmap_single(bp->pdev,
1611                                         pci_unmap_addr(rx_buf, mapping),
1612                                                  bp->rx_buf_size,
1613                                                  PCI_DMA_FROMDEVICE);
1614                                 skb_reserve(skb, pad);
1615                                 skb_put(skb, len);
1616
1617                         } else {
1618                                 DP(NETIF_MSG_RX_ERR,
1619                                    "ERROR  packet dropped because "
1620                                    "of alloc failure\n");
1621                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 reuse_rx:
1623                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624                                 goto next_rx;
1625                         }
1626
1627                         skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629                         skb->ip_summed = CHECKSUM_NONE;
1630                         if (bp->rx_csum) {
1631                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1633                                 else
1634                                         fp->eth_q_stats.hw_csum_err++;
1635                         }
1636                 }
1637
1638                 skb_record_rx_queue(skb, fp->index);
1639 #ifdef BCM_VLAN
1640                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642                      PARSING_FLAGS_VLAN))
1643                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645                 else
1646 #endif
1647                         netif_receive_skb(skb);
1648
1649
1650 next_rx:
1651                 rx_buf->skb = NULL;
1652
1653                 bd_cons = NEXT_RX_IDX(bd_cons);
1654                 bd_prod = NEXT_RX_IDX(bd_prod);
1655                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656                 rx_pkt++;
1657 next_cqe:
1658                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1660
1661                 if (rx_pkt == budget)
1662                         break;
1663         } /* while */
1664
1665         fp->rx_bd_cons = bd_cons;
1666         fp->rx_bd_prod = bd_prod_fw;
1667         fp->rx_comp_cons = sw_comp_cons;
1668         fp->rx_comp_prod = sw_comp_prod;
1669
1670         /* Update producers */
1671         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672                              fp->rx_sge_prod);
1673
1674         fp->rx_pkt += rx_pkt;
1675         fp->rx_calls++;
1676
1677         return rx_pkt;
1678 }
1679
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681 {
1682         struct bnx2x_fastpath *fp = fp_cookie;
1683         struct bnx2x *bp = fp->bp;
1684
1685         /* Return here if interrupt is disabled */
1686         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688                 return IRQ_HANDLED;
1689         }
1690
1691         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692            fp->index, fp->sb_id);
1693         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1694
1695 #ifdef BNX2X_STOP_ON_ERROR
1696         if (unlikely(bp->panic))
1697                 return IRQ_HANDLED;
1698 #endif
1699         /* Handle Rx or Tx according to MSI-X vector */
1700         if (fp->is_rx_queue) {
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1703
1704                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1705
1706         } else {
1707                 prefetch(fp->tx_cons_sb);
1708                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710                 bnx2x_update_fpsb_idx(fp);
1711                 rmb();
1712                 bnx2x_tx_int(fp);
1713
1714                 /* Re-enable interrupts */
1715                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719         }
1720
1721         return IRQ_HANDLED;
1722 }
1723
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725 {
1726         struct bnx2x *bp = netdev_priv(dev_instance);
1727         u16 status = bnx2x_ack_int(bp);
1728         u16 mask;
1729         int i;
1730
1731         /* Return here if interrupt is shared and it's not for us */
1732         if (unlikely(status == 0)) {
1733                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734                 return IRQ_NONE;
1735         }
1736         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1737
1738         /* Return here if interrupt is disabled */
1739         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741                 return IRQ_HANDLED;
1742         }
1743
1744 #ifdef BNX2X_STOP_ON_ERROR
1745         if (unlikely(bp->panic))
1746                 return IRQ_HANDLED;
1747 #endif
1748
1749         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750                 struct bnx2x_fastpath *fp = &bp->fp[i];
1751
1752                 mask = 0x2 << fp->sb_id;
1753                 if (status & mask) {
1754                         /* Handle Rx or Tx according to SB id */
1755                         if (fp->is_rx_queue) {
1756                                 prefetch(fp->rx_cons_sb);
1757                                 prefetch(&fp->status_blk->u_status_block.
1758                                                         status_block_index);
1759
1760                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1761
1762                         } else {
1763                                 prefetch(fp->tx_cons_sb);
1764                                 prefetch(&fp->status_blk->c_status_block.
1765                                                         status_block_index);
1766
1767                                 bnx2x_update_fpsb_idx(fp);
1768                                 rmb();
1769                                 bnx2x_tx_int(fp);
1770
1771                                 /* Re-enable interrupts */
1772                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773                                              le16_to_cpu(fp->fp_u_idx),
1774                                              IGU_INT_NOP, 1);
1775                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776                                              le16_to_cpu(fp->fp_c_idx),
1777                                              IGU_INT_ENABLE, 1);
1778                         }
1779                         status &= ~mask;
1780                 }
1781         }
1782
1783
1784         if (unlikely(status & 0x1)) {
1785                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1786
1787                 status &= ~0x1;
1788                 if (!status)
1789                         return IRQ_HANDLED;
1790         }
1791
1792         if (status)
1793                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794                    status);
1795
1796         return IRQ_HANDLED;
1797 }
1798
1799 /* end of fast path */
1800
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1802
1803 /* Link */
1804
1805 /*
1806  * General service functions
1807  */
1808
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1810 {
1811         u32 lock_status;
1812         u32 resource_bit = (1 << resource);
1813         int func = BP_FUNC(bp);
1814         u32 hw_lock_control_reg;
1815         int cnt;
1816
1817         /* Validating that the resource is within range */
1818         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819                 DP(NETIF_MSG_HW,
1820                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822                 return -EINVAL;
1823         }
1824
1825         if (func <= 5) {
1826                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827         } else {
1828                 hw_lock_control_reg =
1829                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830         }
1831
1832         /* Validating that the resource is not already taken */
1833         lock_status = REG_RD(bp, hw_lock_control_reg);
1834         if (lock_status & resource_bit) {
1835                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1836                    lock_status, resource_bit);
1837                 return -EEXIST;
1838         }
1839
1840         /* Try for 5 second every 5ms */
1841         for (cnt = 0; cnt < 1000; cnt++) {
1842                 /* Try to acquire the lock */
1843                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844                 lock_status = REG_RD(bp, hw_lock_control_reg);
1845                 if (lock_status & resource_bit)
1846                         return 0;
1847
1848                 msleep(5);
1849         }
1850         DP(NETIF_MSG_HW, "Timeout\n");
1851         return -EAGAIN;
1852 }
1853
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1855 {
1856         u32 lock_status;
1857         u32 resource_bit = (1 << resource);
1858         int func = BP_FUNC(bp);
1859         u32 hw_lock_control_reg;
1860
1861         /* Validating that the resource is within range */
1862         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863                 DP(NETIF_MSG_HW,
1864                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866                 return -EINVAL;
1867         }
1868
1869         if (func <= 5) {
1870                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871         } else {
1872                 hw_lock_control_reg =
1873                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874         }
1875
1876         /* Validating that the resource is currently taken */
1877         lock_status = REG_RD(bp, hw_lock_control_reg);
1878         if (!(lock_status & resource_bit)) {
1879                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1880                    lock_status, resource_bit);
1881                 return -EFAULT;
1882         }
1883
1884         REG_WR(bp, hw_lock_control_reg, resource_bit);
1885         return 0;
1886 }
1887
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1890 {
1891         mutex_lock(&bp->port.phy_mutex);
1892
1893         if (bp->port.need_hw_lock)
1894                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1895 }
1896
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1898 {
1899         if (bp->port.need_hw_lock)
1900                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1901
1902         mutex_unlock(&bp->port.phy_mutex);
1903 }
1904
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906 {
1907         /* The GPIO should be swapped if swap register is set and active */
1908         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910         int gpio_shift = gpio_num +
1911                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912         u32 gpio_mask = (1 << gpio_shift);
1913         u32 gpio_reg;
1914         int value;
1915
1916         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918                 return -EINVAL;
1919         }
1920
1921         /* read GPIO value */
1922         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924         /* get the requested pin value */
1925         if ((gpio_reg & gpio_mask) == gpio_mask)
1926                 value = 1;
1927         else
1928                 value = 0;
1929
1930         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1931
1932         return value;
1933 }
1934
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944
1945         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947                 return -EINVAL;
1948         }
1949
1950         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951         /* read GPIO and mask except the float bits */
1952         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954         switch (mode) {
1955         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957                    gpio_num, gpio_shift);
1958                 /* clear FLOAT and set CLR */
1959                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961                 break;
1962
1963         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965                    gpio_num, gpio_shift);
1966                 /* clear FLOAT and set SET */
1967                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969                 break;
1970
1971         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973                    gpio_num, gpio_shift);
1974                 /* set FLOAT */
1975                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976                 break;
1977
1978         default:
1979                 break;
1980         }
1981
1982         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985         return 0;
1986 }
1987
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO int */
2005         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010                                    "output low\n", gpio_num, gpio_shift);
2011                 /* clear SET and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018                                    "output high\n", gpio_num, gpio_shift);
2019                 /* clear CLR and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022                 break;
2023
2024         default:
2025                 break;
2026         }
2027
2028         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031         return 0;
2032 }
2033
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035 {
2036         u32 spio_mask = (1 << spio_num);
2037         u32 spio_reg;
2038
2039         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040             (spio_num > MISC_REGISTERS_SPIO_7)) {
2041                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046         /* read SPIO and mask except the float bits */
2047         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052                 /* clear FLOAT and set CLR */
2053                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055                 break;
2056
2057         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059                 /* clear FLOAT and set SET */
2060                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062                 break;
2063
2064         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066                 /* set FLOAT */
2067                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076
2077         return 0;
2078 }
2079
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2081 {
2082         switch (bp->link_vars.ieee_fc &
2083                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2086                                           ADVERTISED_Pause);
2087                 break;
2088
2089         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2091                                          ADVERTISED_Pause);
2092                 break;
2093
2094         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2096                 break;
2097
2098         default:
2099                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2100                                           ADVERTISED_Pause);
2101                 break;
2102         }
2103 }
2104
2105 static void bnx2x_link_report(struct bnx2x *bp)
2106 {
2107         if (bp->state == BNX2X_STATE_DISABLED) {
2108                 netif_carrier_off(bp->dev);
2109                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110                 return;
2111         }
2112
2113         if (bp->link_vars.link_up) {
2114                 if (bp->state == BNX2X_STATE_OPEN)
2115                         netif_carrier_on(bp->dev);
2116                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2117
2118                 printk("%d Mbps ", bp->link_vars.line_speed);
2119
2120                 if (bp->link_vars.duplex == DUPLEX_FULL)
2121                         printk("full duplex");
2122                 else
2123                         printk("half duplex");
2124
2125                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127                                 printk(", receive ");
2128                                 if (bp->link_vars.flow_ctrl &
2129                                     BNX2X_FLOW_CTRL_TX)
2130                                         printk("& transmit ");
2131                         } else {
2132                                 printk(", transmit ");
2133                         }
2134                         printk("flow control ON");
2135                 }
2136                 printk("\n");
2137
2138         } else { /* link_down */
2139                 netif_carrier_off(bp->dev);
2140                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2141         }
2142 }
2143
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2145 {
2146         if (!BP_NOMCP(bp)) {
2147                 u8 rc;
2148
2149                 /* Initialize link parameters structure variables */
2150                 /* It is recommended to turn off RX FC for jumbo frames
2151                    for better performance */
2152                 if (bp->dev->mtu > 5000)
2153                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2154                 else
2155                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2156
2157                 bnx2x_acquire_phy_lock(bp);
2158
2159                 if (load_mode == LOAD_DIAG)
2160                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
2162                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2163
2164                 bnx2x_release_phy_lock(bp);
2165
2166                 bnx2x_calc_fc_adv(bp);
2167
2168                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2170                         bnx2x_link_report(bp);
2171                 }
2172
2173                 return rc;
2174         }
2175         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2176         return -EINVAL;
2177 }
2178
2179 static void bnx2x_link_set(struct bnx2x *bp)
2180 {
2181         if (!BP_NOMCP(bp)) {
2182                 bnx2x_acquire_phy_lock(bp);
2183                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2184                 bnx2x_release_phy_lock(bp);
2185
2186                 bnx2x_calc_fc_adv(bp);
2187         } else
2188                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2189 }
2190
2191 static void bnx2x__link_reset(struct bnx2x *bp)
2192 {
2193         if (!BP_NOMCP(bp)) {
2194                 bnx2x_acquire_phy_lock(bp);
2195                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2196                 bnx2x_release_phy_lock(bp);
2197         } else
2198                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2199 }
2200
2201 static u8 bnx2x_link_test(struct bnx2x *bp)
2202 {
2203         u8 rc;
2204
2205         bnx2x_acquire_phy_lock(bp);
2206         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2207         bnx2x_release_phy_lock(bp);
2208
2209         return rc;
2210 }
2211
2212 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2213 {
2214         u32 r_param = bp->link_vars.line_speed / 8;
2215         u32 fair_periodic_timeout_usec;
2216         u32 t_fair;
2217
2218         memset(&(bp->cmng.rs_vars), 0,
2219                sizeof(struct rate_shaping_vars_per_port));
2220         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2221
2222         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2224
2225         /* this is the threshold below which no timer arming will occur
2226            1.25 coefficient is for the threshold to be a little bigger
2227            than the real time, to compensate for timer in-accuracy */
2228         bp->cmng.rs_vars.rs_threshold =
2229                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
2231         /* resolution of fairness timer */
2232         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2235
2236         /* this is the threshold below which we won't arm the timer anymore */
2237         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2238
2239         /* we multiply by 1e3/8 to get bytes/msec.
2240            We don't want the credits to pass a credit
2241            of the t_fair*FAIR_MEM (algorithm resolution) */
2242         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243         /* since each tick is 4 usec */
2244         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2245 }
2246
2247 /* Calculates the sum of vn_min_rates.
2248    It's needed for further normalizing of the min_rates.
2249    Returns:
2250      sum of vn_min_rates.
2251        or
2252      0 - if all the min_rates are 0.
2253      In the later case fainess algorithm should be deactivated.
2254      If not all min_rates are zero then those that are zeroes will be set to 1.
2255  */
2256 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257 {
2258         int all_zero = 1;
2259         int port = BP_PORT(bp);
2260         int vn;
2261
2262         bp->vn_weight_sum = 0;
2263         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264                 int func = 2*vn + port;
2265                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269                 /* Skip hidden vns */
2270                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271                         continue;
2272
2273                 /* If min rate is zero - set it to 1 */
2274                 if (!vn_min_rate)
2275                         vn_min_rate = DEF_MIN_RATE;
2276                 else
2277                         all_zero = 0;
2278
2279                 bp->vn_weight_sum += vn_min_rate;
2280         }
2281
2282         /* ... only if all min rates are zeros - disable fairness */
2283         if (all_zero)
2284                 bp->vn_weight_sum = 0;
2285 }
2286
2287 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2288 {
2289         struct rate_shaping_vars_per_vn m_rs_vn;
2290         struct fairness_vars_per_vn m_fair_vn;
2291         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292         u16 vn_min_rate, vn_max_rate;
2293         int i;
2294
2295         /* If function is hidden - set min and max to zeroes */
2296         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297                 vn_min_rate = 0;
2298                 vn_max_rate = 0;
2299
2300         } else {
2301                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2303                 /* If fairness is enabled (not all min rates are zeroes) and
2304                    if current min rate is zero - set it to 1.
2305                    This is a requirement of the algorithm. */
2306                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2307                         vn_min_rate = DEF_MIN_RATE;
2308                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310         }
2311
2312         DP(NETIF_MSG_IFUP,
2313            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2314            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2315
2316         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319         /* global vn counter - maximal Mbps for this vn */
2320         m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322         /* quota - number of bytes transmitted in this period */
2323         m_rs_vn.vn_counter.quota =
2324                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
2326         if (bp->vn_weight_sum) {
2327                 /* credit for each period of the fairness algorithm:
2328                    number of bytes in T_FAIR (the vn share the port rate).
2329                    vn_weight_sum should not be larger than 10000, thus
2330                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331                    than zero */
2332                 m_fair_vn.vn_credit_delta =
2333                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2334                                                  (8 * bp->vn_weight_sum))),
2335                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2336                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337                    m_fair_vn.vn_credit_delta);
2338         }
2339
2340         /* Store it to internal memory */
2341         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344                        ((u32 *)(&m_rs_vn))[i]);
2345
2346         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349                        ((u32 *)(&m_fair_vn))[i]);
2350 }
2351
2352
2353 /* This function is called upon link interrupt */
2354 static void bnx2x_link_attn(struct bnx2x *bp)
2355 {
2356         /* Make sure that we are synced with the current statistics */
2357         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
2359         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2360
2361         if (bp->link_vars.link_up) {
2362
2363                 /* dropless flow control */
2364                 if (CHIP_IS_E1H(bp)) {
2365                         int port = BP_PORT(bp);
2366                         u32 pause_enabled = 0;
2367
2368                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369                                 pause_enabled = 1;
2370
2371                         REG_WR(bp, BAR_USTRORM_INTMEM +
2372                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2373                                pause_enabled);
2374                 }
2375
2376                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377                         struct host_port_stats *pstats;
2378
2379                         pstats = bnx2x_sp(bp, port_stats);
2380                         /* reset old bmac stats */
2381                         memset(&(pstats->mac_stx[0]), 0,
2382                                sizeof(struct mac_stx));
2383                 }
2384                 if ((bp->state == BNX2X_STATE_OPEN) ||
2385                     (bp->state == BNX2X_STATE_DISABLED))
2386                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387         }
2388
2389         /* indicate link status */
2390         bnx2x_link_report(bp);
2391
2392         if (IS_E1HMF(bp)) {
2393                 int port = BP_PORT(bp);
2394                 int func;
2395                 int vn;
2396
2397                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398                         if (vn == BP_E1HVN(bp))
2399                                 continue;
2400
2401                         func = ((vn << 1) | port);
2402
2403                         /* Set the attention towards other drivers
2404                            on the same port */
2405                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407                 }
2408
2409                 if (bp->link_vars.link_up) {
2410                         int i;
2411
2412                         /* Init rate shaping and fairness contexts */
2413                         bnx2x_init_port_minmax(bp);
2414
2415                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2416                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418                         /* Store it to internal memory */
2419                         for (i = 0;
2420                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423                                        ((u32 *)(&bp->cmng))[i]);
2424                 }
2425         }
2426 }
2427
2428 static void bnx2x__link_status_update(struct bnx2x *bp)
2429 {
2430         int func = BP_FUNC(bp);
2431
2432         if (bp->state != BNX2X_STATE_OPEN)
2433                 return;
2434
2435         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2436
2437         if (bp->link_vars.link_up)
2438                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439         else
2440                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
2442         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443         bnx2x_calc_vn_weight_sum(bp);
2444
2445         /* indicate link status */
2446         bnx2x_link_report(bp);
2447 }
2448
2449 static void bnx2x_pmf_update(struct bnx2x *bp)
2450 {
2451         int port = BP_PORT(bp);
2452         u32 val;
2453
2454         bp->port.pmf = 1;
2455         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457         /* enable nig attention */
2458         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2461
2462         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2463 }
2464
2465 /* end of Link */
2466
2467 /* slow path */
2468
2469 /*
2470  * General service functions
2471  */
2472
2473 /* send the MCP a request, block until there is a reply */
2474 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475 {
2476         int func = BP_FUNC(bp);
2477         u32 seq = ++bp->fw_seq;
2478         u32 rc = 0;
2479         u32 cnt = 1;
2480         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485         do {
2486                 /* let the FW do it's magic ... */
2487                 msleep(delay);
2488
2489                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491                 /* Give the FW up to 2 second (200*10ms) */
2492         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495            cnt*delay, rc, seq);
2496
2497         /* is this a reply to our command? */
2498         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499                 rc &= FW_MSG_CODE_MASK;
2500         else {
2501                 /* FW BUG! */
2502                 BNX2X_ERR("FW failed to respond!\n");
2503                 bnx2x_fw_dump(bp);
2504                 rc = 0;
2505         }
2506
2507         return rc;
2508 }
2509
2510 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512 static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514 static void bnx2x_e1h_disable(struct bnx2x *bp)
2515 {
2516         int port = BP_PORT(bp);
2517         int i;
2518
2519         bp->rx_mode = BNX2X_RX_MODE_NONE;
2520         bnx2x_set_storm_rx_mode(bp);
2521
2522         netif_tx_disable(bp->dev);
2523         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527         bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529         for (i = 0; i < MC_HASH_SIZE; i++)
2530                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532         netif_carrier_off(bp->dev);
2533 }
2534
2535 static void bnx2x_e1h_enable(struct bnx2x *bp)
2536 {
2537         int port = BP_PORT(bp);
2538
2539         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541         bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543         /* Tx queue should be only reenabled */
2544         netif_tx_wake_all_queues(bp->dev);
2545
2546         /* Initialize the receive filter. */
2547         bnx2x_set_rx_mode(bp->dev);
2548 }
2549
2550 static void bnx2x_update_min_max(struct bnx2x *bp)
2551 {
2552         int port = BP_PORT(bp);
2553         int vn, i;
2554
2555         /* Init rate shaping and fairness contexts */
2556         bnx2x_init_port_minmax(bp);
2557
2558         bnx2x_calc_vn_weight_sum(bp);
2559
2560         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563         if (bp->port.pmf) {
2564                 int func;
2565
2566                 /* Set the attention towards other drivers on the same port */
2567                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568                         if (vn == BP_E1HVN(bp))
2569                                 continue;
2570
2571                         func = ((vn << 1) | port);
2572                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574                 }
2575
2576                 /* Store it to internal memory */
2577                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2579                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580                                ((u32 *)(&bp->cmng))[i]);
2581         }
2582 }
2583
2584 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585 {
2586         int func = BP_FUNC(bp);
2587
2588         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595                         bp->state = BNX2X_STATE_DISABLED;
2596
2597                         bnx2x_e1h_disable(bp);
2598                 } else {
2599                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600                         bp->state = BNX2X_STATE_OPEN;
2601
2602                         bnx2x_e1h_enable(bp);
2603                 }
2604                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605         }
2606         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608                 bnx2x_update_min_max(bp);
2609                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610         }
2611
2612         /* Report results to MCP */
2613         if (dcc_event)
2614                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615         else
2616                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617 }
2618
2619 /* the slow path queue is odd since completions arrive on the fastpath ring */
2620 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621                          u32 data_hi, u32 data_lo, int common)
2622 {
2623         int func = BP_FUNC(bp);
2624
2625         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2627            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631 #ifdef BNX2X_STOP_ON_ERROR
2632         if (unlikely(bp->panic))
2633                 return -EIO;
2634 #endif
2635
2636         spin_lock_bh(&bp->spq_lock);
2637
2638         if (!bp->spq_left) {
2639                 BNX2X_ERR("BUG! SPQ ring full!\n");
2640                 spin_unlock_bh(&bp->spq_lock);
2641                 bnx2x_panic();
2642                 return -EBUSY;
2643         }
2644
2645         /* CID needs port number to be encoded int it */
2646         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648                                      HW_CID(bp, cid)));
2649         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650         if (common)
2651                 bp->spq_prod_bd->hdr.type |=
2652                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657         bp->spq_left--;
2658
2659         if (bp->spq_prod_bd == bp->spq_last_bd) {
2660                 bp->spq_prod_bd = bp->spq;
2661                 bp->spq_prod_idx = 0;
2662                 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664         } else {
2665                 bp->spq_prod_bd++;
2666                 bp->spq_prod_idx++;
2667         }
2668
2669         /* Make sure that BD data is updated before writing the producer */
2670         wmb();
2671
2672         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2673                bp->spq_prod_idx);
2674
2675         mmiowb();
2676
2677         spin_unlock_bh(&bp->spq_lock);
2678         return 0;
2679 }
2680
2681 /* acquire split MCP access lock register */
2682 static int bnx2x_acquire_alr(struct bnx2x *bp)
2683 {
2684         u32 i, j, val;
2685         int rc = 0;
2686
2687         might_sleep();
2688         i = 100;
2689         for (j = 0; j < i*10; j++) {
2690                 val = (1UL << 31);
2691                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693                 if (val & (1L << 31))
2694                         break;
2695
2696                 msleep(5);
2697         }
2698         if (!(val & (1L << 31))) {
2699                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2700                 rc = -EBUSY;
2701         }
2702
2703         return rc;
2704 }
2705
2706 /* release split MCP access lock register */
2707 static void bnx2x_release_alr(struct bnx2x *bp)
2708 {
2709         u32 val = 0;
2710
2711         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712 }
2713
2714 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715 {
2716         struct host_def_status_block *def_sb = bp->def_status_blk;
2717         u16 rc = 0;
2718
2719         barrier(); /* status block is written to by the chip */
2720         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722                 rc |= 1;
2723         }
2724         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726                 rc |= 2;
2727         }
2728         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730                 rc |= 4;
2731         }
2732         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734                 rc |= 8;
2735         }
2736         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738                 rc |= 16;
2739         }
2740         return rc;
2741 }
2742
2743 /*
2744  * slow path service functions
2745  */
2746
2747 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748 {
2749         int port = BP_PORT(bp);
2750         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751                        COMMAND_REG_ATTN_BITS_SET);
2752         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2754         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755                                        NIG_REG_MASK_INTERRUPT_PORT0;
2756         u32 aeu_mask;
2757         u32 nig_mask = 0;
2758
2759         if (bp->attn_state & asserted)
2760                 BNX2X_ERR("IGU ERROR\n");
2761
2762         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763         aeu_mask = REG_RD(bp, aeu_addr);
2764
2765         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2766            aeu_mask, asserted);
2767         aeu_mask &= ~(asserted & 0xff);
2768         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2769
2770         REG_WR(bp, aeu_addr, aeu_mask);
2771         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2772
2773         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2774         bp->attn_state |= asserted;
2775         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2776
2777         if (asserted & ATTN_HARD_WIRED_MASK) {
2778                 if (asserted & ATTN_NIG_FOR_FUNC) {
2779
2780                         bnx2x_acquire_phy_lock(bp);
2781
2782                         /* save nig interrupt mask */
2783                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2784                         REG_WR(bp, nig_int_mask_addr, 0);
2785
2786                         bnx2x_link_attn(bp);
2787
2788                         /* handle unicore attn? */
2789                 }
2790                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793                 if (asserted & GPIO_2_FUNC)
2794                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796                 if (asserted & GPIO_3_FUNC)
2797                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799                 if (asserted & GPIO_4_FUNC)
2800                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802                 if (port == 0) {
2803                         if (asserted & ATTN_GENERAL_ATTN_1) {
2804                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806                         }
2807                         if (asserted & ATTN_GENERAL_ATTN_2) {
2808                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810                         }
2811                         if (asserted & ATTN_GENERAL_ATTN_3) {
2812                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814                         }
2815                 } else {
2816                         if (asserted & ATTN_GENERAL_ATTN_4) {
2817                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819                         }
2820                         if (asserted & ATTN_GENERAL_ATTN_5) {
2821                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823                         }
2824                         if (asserted & ATTN_GENERAL_ATTN_6) {
2825                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827                         }
2828                 }
2829
2830         } /* if hardwired */
2831
2832         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833            asserted, hc_addr);
2834         REG_WR(bp, hc_addr, asserted);
2835
2836         /* now set back the mask */
2837         if (asserted & ATTN_NIG_FOR_FUNC) {
2838                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2839                 bnx2x_release_phy_lock(bp);
2840         }
2841 }
2842
2843 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844 {
2845         int port = BP_PORT(bp);
2846
2847         /* mark the failure */
2848         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851                  bp->link_params.ext_phy_config);
2852
2853         /* log the failure */
2854         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855                " the driver to shutdown the card to prevent permanent"
2856                " damage.  Please contact Dell Support for assistance\n",
2857                bp->dev->name);
2858 }
2859 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2860 {
2861         int port = BP_PORT(bp);
2862         int reg_offset;
2863         u32 val, swap_val, swap_override;
2864
2865         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2867
2868         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2869
2870                 val = REG_RD(bp, reg_offset);
2871                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872                 REG_WR(bp, reg_offset, val);
2873
2874                 BNX2X_ERR("SPIO5 hw attention\n");
2875
2876                 /* Fan failure attention */
2877                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2879                         /* Low power mode is controlled by GPIO 2 */
2880                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2881                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2882                         /* The PHY reset is controlled by GPIO 1 */
2883                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2885                         break;
2886
2887                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888                         /* The PHY reset is controlled by GPIO 1 */
2889                         /* fake the port number to cancel the swap done in
2890                            set_gpio() */
2891                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893                         port = (swap_val && swap_override) ^ 1;
2894                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896                         break;
2897
2898                 default:
2899                         break;
2900                 }
2901                 bnx2x_fan_failure(bp);
2902         }
2903
2904         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906                 bnx2x_acquire_phy_lock(bp);
2907                 bnx2x_handle_module_detect_int(&bp->link_params);
2908                 bnx2x_release_phy_lock(bp);
2909         }
2910
2911         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913                 val = REG_RD(bp, reg_offset);
2914                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915                 REG_WR(bp, reg_offset, val);
2916
2917                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918                           (attn & HW_INTERRUT_ASSERT_SET_0));
2919                 bnx2x_panic();
2920         }
2921 }
2922
2923 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924 {
2925         u32 val;
2926
2927         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2928
2929                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931                 /* DORQ discard attention */
2932                 if (val & 0x2)
2933                         BNX2X_ERR("FATAL error from DORQ\n");
2934         }
2935
2936         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938                 int port = BP_PORT(bp);
2939                 int reg_offset;
2940
2941                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944                 val = REG_RD(bp, reg_offset);
2945                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946                 REG_WR(bp, reg_offset, val);
2947
2948                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949                           (attn & HW_INTERRUT_ASSERT_SET_1));
2950                 bnx2x_panic();
2951         }
2952 }
2953
2954 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955 {
2956         u32 val;
2957
2958         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962                 /* CFC error attention */
2963                 if (val & 0x2)
2964                         BNX2X_ERR("FATAL error from CFC\n");
2965         }
2966
2967         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971                 /* RQ_USDMDP_FIFO_OVERFLOW */
2972                 if (val & 0x18000)
2973                         BNX2X_ERR("FATAL error from PXP\n");
2974         }
2975
2976         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978                 int port = BP_PORT(bp);
2979                 int reg_offset;
2980
2981                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984                 val = REG_RD(bp, reg_offset);
2985                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986                 REG_WR(bp, reg_offset, val);
2987
2988                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989                           (attn & HW_INTERRUT_ASSERT_SET_2));
2990                 bnx2x_panic();
2991         }
2992 }
2993
2994 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995 {
2996         u32 val;
2997
2998         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
3000                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001                         int func = BP_FUNC(bp);
3002
3003                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3004                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3005                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3006                                 bnx2x_dcc_event(bp,
3007                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3008                         bnx2x__link_status_update(bp);
3009                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3010                                 bnx2x_pmf_update(bp);
3011
3012                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3013
3014                         BNX2X_ERR("MC assert!\n");
3015                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019                         bnx2x_panic();
3020
3021                 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023                         BNX2X_ERR("MCP assert!\n");
3024                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3025                         bnx2x_fw_dump(bp);
3026
3027                 } else
3028                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029         }
3030
3031         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3032                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033                 if (attn & BNX2X_GRC_TIMEOUT) {
3034                         val = CHIP_IS_E1H(bp) ?
3035                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037                 }
3038                 if (attn & BNX2X_GRC_RSV) {
3039                         val = CHIP_IS_E1H(bp) ?
3040                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042                 }
3043                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3044         }
3045 }
3046
3047 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048 {
3049         struct attn_route attn;
3050         struct attn_route group_mask;
3051         int port = BP_PORT(bp);
3052         int index;
3053         u32 reg_addr;
3054         u32 val;
3055         u32 aeu_mask;
3056
3057         /* need to take HW lock because MCP or other port might also
3058            try to handle this event */
3059         bnx2x_acquire_alr(bp);
3060
3061         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3065         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3067
3068         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069                 if (deasserted & (1 << index)) {
3070                         group_mask = bp->attn_group[index];
3071
3072                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073                            index, group_mask.sig[0], group_mask.sig[1],
3074                            group_mask.sig[2], group_mask.sig[3]);
3075
3076                         bnx2x_attn_int_deasserted3(bp,
3077                                         attn.sig[3] & group_mask.sig[3]);
3078                         bnx2x_attn_int_deasserted1(bp,
3079                                         attn.sig[1] & group_mask.sig[1]);
3080                         bnx2x_attn_int_deasserted2(bp,
3081                                         attn.sig[2] & group_mask.sig[2]);
3082                         bnx2x_attn_int_deasserted0(bp,
3083                                         attn.sig[0] & group_mask.sig[0]);
3084
3085                         if ((attn.sig[0] & group_mask.sig[0] &
3086                                                 HW_PRTY_ASSERT_SET_0) ||
3087                             (attn.sig[1] & group_mask.sig[1] &
3088                                                 HW_PRTY_ASSERT_SET_1) ||
3089                             (attn.sig[2] & group_mask.sig[2] &
3090                                                 HW_PRTY_ASSERT_SET_2))
3091                                 BNX2X_ERR("FATAL HW block parity attention\n");
3092                 }
3093         }
3094
3095         bnx2x_release_alr(bp);
3096
3097         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3098
3099         val = ~deasserted;
3100         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101            val, reg_addr);
3102         REG_WR(bp, reg_addr, val);
3103
3104         if (~bp->attn_state & deasserted)
3105                 BNX2X_ERR("IGU ERROR\n");
3106
3107         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
3110         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111         aeu_mask = REG_RD(bp, reg_addr);
3112
3113         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3114            aeu_mask, deasserted);
3115         aeu_mask |= (deasserted & 0xff);
3116         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3117
3118         REG_WR(bp, reg_addr, aeu_mask);
3119         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3120
3121         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122         bp->attn_state &= ~deasserted;
3123         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124 }
3125
3126 static void bnx2x_attn_int(struct bnx2x *bp)
3127 {
3128         /* read local copy of bits */
3129         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130                                                                 attn_bits);
3131         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132                                                                 attn_bits_ack);
3133         u32 attn_state = bp->attn_state;
3134
3135         /* look for changed bits */
3136         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3137         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3138
3139         DP(NETIF_MSG_HW,
3140            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3141            attn_bits, attn_ack, asserted, deasserted);
3142
3143         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3144                 BNX2X_ERR("BAD attention state\n");
3145
3146         /* handle bits that were raised */
3147         if (asserted)
3148                 bnx2x_attn_int_asserted(bp, asserted);
3149
3150         if (deasserted)
3151                 bnx2x_attn_int_deasserted(bp, deasserted);
3152 }
3153
3154 static void bnx2x_sp_task(struct work_struct *work)
3155 {
3156         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3157         u16 status;
3158
3159
3160         /* Return here if interrupt is disabled */
3161         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3162                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3163                 return;
3164         }
3165
3166         status = bnx2x_update_dsb_idx(bp);
3167 /*      if (status == 0)                                     */
3168 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3169
3170         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3171
3172         /* HW attentions */
3173         if (status & 0x1)
3174                 bnx2x_attn_int(bp);
3175
3176         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3177                      IGU_INT_NOP, 1);
3178         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179                      IGU_INT_NOP, 1);
3180         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181                      IGU_INT_NOP, 1);
3182         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183                      IGU_INT_NOP, 1);
3184         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185                      IGU_INT_ENABLE, 1);
3186
3187 }
3188
3189 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190 {
3191         struct net_device *dev = dev_instance;
3192         struct bnx2x *bp = netdev_priv(dev);
3193
3194         /* Return here if interrupt is disabled */
3195         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3197                 return IRQ_HANDLED;
3198         }
3199
3200         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3201
3202 #ifdef BNX2X_STOP_ON_ERROR
3203         if (unlikely(bp->panic))
3204                 return IRQ_HANDLED;
3205 #endif
3206
3207         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3208
3209         return IRQ_HANDLED;
3210 }
3211
3212 /* end of slow path */
3213
3214 /* Statistics */
3215
3216 /****************************************************************************
3217 * Macros
3218 ****************************************************************************/
3219
3220 /* sum[hi:lo] += add[hi:lo] */
3221 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222         do { \
3223                 s_lo += a_lo; \
3224                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3225         } while (0)
3226
3227 /* difference = minuend - subtrahend */
3228 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229         do { \
3230                 if (m_lo < s_lo) { \
3231                         /* underflow */ \
3232                         d_hi = m_hi - s_hi; \
3233                         if (d_hi > 0) { \
3234                                 /* we can 'loan' 1 */ \
3235                                 d_hi--; \
3236                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3237                         } else { \
3238                                 /* m_hi <= s_hi */ \
3239                                 d_hi = 0; \
3240                                 d_lo = 0; \
3241                         } \
3242                 } else { \
3243                         /* m_lo >= s_lo */ \
3244                         if (m_hi < s_hi) { \
3245                                 d_hi = 0; \
3246                                 d_lo = 0; \
3247                         } else { \
3248                                 /* m_hi >= s_hi */ \
3249                                 d_hi = m_hi - s_hi; \
3250                                 d_lo = m_lo - s_lo; \
3251                         } \
3252                 } \
3253         } while (0)
3254
3255 #define UPDATE_STAT64(s, t) \
3256         do { \
3257                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262                        pstats->mac_stx[1].t##_lo, diff.lo); \
3263         } while (0)
3264
3265 #define UPDATE_STAT64_NIG(s, t) \
3266         do { \
3267                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268                         diff.lo, new->s##_lo, old->s##_lo); \
3269                 ADD_64(estats->t##_hi, diff.hi, \
3270                        estats->t##_lo, diff.lo); \
3271         } while (0)
3272
3273 /* sum[hi:lo] += add */
3274 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3275         do { \
3276                 s_lo += a; \
3277                 s_hi += (s_lo < a) ? 1 : 0; \
3278         } while (0)
3279
3280 #define UPDATE_EXTEND_STAT(s) \
3281         do { \
3282                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283                               pstats->mac_stx[1].s##_lo, \
3284                               new->s); \
3285         } while (0)
3286
3287 #define UPDATE_EXTEND_TSTAT(s, t) \
3288         do { \
3289                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290                 old_tclient->s = tclient->s; \
3291                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292         } while (0)
3293
3294 #define UPDATE_EXTEND_USTAT(s, t) \
3295         do { \
3296                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297                 old_uclient->s = uclient->s; \
3298                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3299         } while (0)
3300
3301 #define UPDATE_EXTEND_XSTAT(s, t) \
3302         do { \
3303                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304                 old_xclient->s = xclient->s; \
3305                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306         } while (0)
3307
3308 /* minuend -= subtrahend */
3309 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310         do { \
3311                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312         } while (0)
3313
3314 /* minuend[hi:lo] -= subtrahend */
3315 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3316         do { \
3317                 SUB_64(m_hi, 0, m_lo, s); \
3318         } while (0)
3319
3320 #define SUB_EXTEND_USTAT(s, t) \
3321         do { \
3322                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3324         } while (0)
3325
3326 /*
3327  * General service functions
3328  */
3329
3330 static inline long bnx2x_hilo(u32 *hiref)
3331 {
3332         u32 lo = *(hiref + 1);
3333 #if (BITS_PER_LONG == 64)
3334         u32 hi = *hiref;
3335
3336         return HILO_U64(hi, lo);
3337 #else
3338         return lo;
3339 #endif
3340 }
3341
3342 /*
3343  * Init service functions
3344  */
3345
3346 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347 {
3348         if (!bp->stats_pending) {
3349                 struct eth_query_ramrod_data ramrod_data = {0};
3350                 int i, rc;
3351
3352                 ramrod_data.drv_counter = bp->stats_counter++;
3353                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3354                 for_each_queue(bp, i)
3355                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3356
3357                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358                                    ((u32 *)&ramrod_data)[1],
3359                                    ((u32 *)&ramrod_data)[0], 0);
3360                 if (rc == 0) {
3361                         /* stats ramrod has it's own slot on the spq */
3362                         bp->spq_left++;
3363                         bp->stats_pending = 1;
3364                 }
3365         }
3366 }
3367
3368 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3369 {
3370         struct dmae_command *dmae = &bp->stats_dmae;
3371         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3372
3373         *stats_comp = DMAE_COMP_VAL;
3374         if (CHIP_REV_IS_SLOW(bp))
3375                 return;
3376
3377         /* loader */
3378         if (bp->executer_idx) {
3379                 int loader_idx = PMF_DMAE_C(bp);
3380
3381                 memset(dmae, 0, sizeof(struct dmae_command));
3382
3383                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3384                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3385                                 DMAE_CMD_DST_RESET |
3386 #ifdef __BIG_ENDIAN
3387                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3388 #else
3389                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3390 #endif
3391                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3392                                                DMAE_CMD_PORT_0) |
3393                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3395                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3396                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3397                                      sizeof(struct dmae_command) *
3398                                      (loader_idx + 1)) >> 2;
3399                 dmae->dst_addr_hi = 0;
3400                 dmae->len = sizeof(struct dmae_command) >> 2;
3401                 if (CHIP_IS_E1(bp))
3402                         dmae->len--;
3403                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3404                 dmae->comp_addr_hi = 0;
3405                 dmae->comp_val = 1;
3406
3407                 *stats_comp = 0;
3408                 bnx2x_post_dmae(bp, dmae, loader_idx);
3409
3410         } else if (bp->func_stx) {
3411                 *stats_comp = 0;
3412                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3413         }
3414 }
3415
3416 static int bnx2x_stats_comp(struct bnx2x *bp)
3417 {
3418         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419         int cnt = 10;
3420
3421         might_sleep();
3422         while (*stats_comp != DMAE_COMP_VAL) {
3423                 if (!cnt) {
3424                         BNX2X_ERR("timeout waiting for stats finished\n");
3425                         break;
3426                 }
3427                 cnt--;
3428                 msleep(1);
3429         }
3430         return 1;
3431 }
3432
3433 /*
3434  * Statistics service functions
3435  */
3436
3437 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3438 {
3439         struct dmae_command *dmae;
3440         u32 opcode;
3441         int loader_idx = PMF_DMAE_C(bp);
3442         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443
3444         /* sanity */
3445         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3446                 BNX2X_ERR("BUG!\n");
3447                 return;
3448         }
3449
3450         bp->executer_idx = 0;
3451
3452         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3453                   DMAE_CMD_C_ENABLE |
3454                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3455 #ifdef __BIG_ENDIAN
3456                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3457 #else
3458                   DMAE_CMD_ENDIANITY_DW_SWAP |
3459 #endif
3460                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3461                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3462
3463         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3464         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3465         dmae->src_addr_lo = bp->port.port_stx >> 2;
3466         dmae->src_addr_hi = 0;
3467         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3468         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3469         dmae->len = DMAE_LEN32_RD_MAX;
3470         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3471         dmae->comp_addr_hi = 0;
3472         dmae->comp_val = 1;
3473
3474         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3475         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3476         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3477         dmae->src_addr_hi = 0;
3478         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3479                                    DMAE_LEN32_RD_MAX * 4);
3480         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3481                                    DMAE_LEN32_RD_MAX * 4);
3482         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3483         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485         dmae->comp_val = DMAE_COMP_VAL;
3486
3487         *stats_comp = 0;
3488         bnx2x_hw_stats_post(bp);
3489         bnx2x_stats_comp(bp);
3490 }
3491
3492 static void bnx2x_port_stats_init(struct bnx2x *bp)
3493 {
3494         struct dmae_command *dmae;
3495         int port = BP_PORT(bp);
3496         int vn = BP_E1HVN(bp);
3497         u32 opcode;
3498         int loader_idx = PMF_DMAE_C(bp);
3499         u32 mac_addr;
3500         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3501
3502         /* sanity */
3503         if (!bp->link_vars.link_up || !bp->port.pmf) {
3504                 BNX2X_ERR("BUG!\n");
3505                 return;
3506         }
3507
3508         bp->executer_idx = 0;
3509
3510         /* MCP */
3511         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3512                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3513                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3514 #ifdef __BIG_ENDIAN
3515                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3516 #else
3517                   DMAE_CMD_ENDIANITY_DW_SWAP |
3518 #endif
3519                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520                   (vn << DMAE_CMD_E1HVN_SHIFT));
3521
3522         if (bp->port.port_stx) {
3523
3524                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525                 dmae->opcode = opcode;
3526                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3529                 dmae->dst_addr_hi = 0;
3530                 dmae->len = sizeof(struct host_port_stats) >> 2;
3531                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532                 dmae->comp_addr_hi = 0;
3533                 dmae->comp_val = 1;
3534         }
3535
3536         if (bp->func_stx) {
3537
3538                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3539                 dmae->opcode = opcode;
3540                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3541                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3542                 dmae->dst_addr_lo = bp->func_stx >> 2;
3543                 dmae->dst_addr_hi = 0;
3544                 dmae->len = sizeof(struct host_func_stats) >> 2;
3545                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3546                 dmae->comp_addr_hi = 0;
3547                 dmae->comp_val = 1;
3548         }
3549
3550         /* MAC */
3551         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3552                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3554 #ifdef __BIG_ENDIAN
3555                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3556 #else
3557                   DMAE_CMD_ENDIANITY_DW_SWAP |
3558 #endif
3559                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560                   (vn << DMAE_CMD_E1HVN_SHIFT));
3561
3562         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3563
3564                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3565                                    NIG_REG_INGRESS_BMAC0_MEM);
3566
3567                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3568                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3569                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3570                 dmae->opcode = opcode;
3571                 dmae->src_addr_lo = (mac_addr +
3572                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3573                 dmae->src_addr_hi = 0;
3574                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3575                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3576                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3577                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3578                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579                 dmae->comp_addr_hi = 0;
3580                 dmae->comp_val = 1;
3581
3582                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3583                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3584                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3585                 dmae->opcode = opcode;
3586                 dmae->src_addr_lo = (mac_addr +
3587                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3588                 dmae->src_addr_hi = 0;
3589                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3590                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3591                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3592                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3593                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3594                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3595                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3596                 dmae->comp_addr_hi = 0;
3597                 dmae->comp_val = 1;
3598
3599         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3600
3601                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3602
3603                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3604                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3605                 dmae->opcode = opcode;
3606                 dmae->src_addr_lo = (mac_addr +
3607                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3608                 dmae->src_addr_hi = 0;
3609                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3610                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3611                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3612                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613                 dmae->comp_addr_hi = 0;
3614                 dmae->comp_val = 1;
3615
3616                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3617                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3618                 dmae->opcode = opcode;
3619                 dmae->src_addr_lo = (mac_addr +
3620                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3621                 dmae->src_addr_hi = 0;
3622                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3623                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3624                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3625                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3626                 dmae->len = 1;
3627                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628                 dmae->comp_addr_hi = 0;
3629                 dmae->comp_val = 1;
3630
3631                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3632                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3633                 dmae->opcode = opcode;
3634                 dmae->src_addr_lo = (mac_addr +
3635                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3636                 dmae->src_addr_hi = 0;
3637                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3638                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3639                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3640                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3641                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3642                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643                 dmae->comp_addr_hi = 0;
3644                 dmae->comp_val = 1;
3645         }
3646
3647         /* NIG */
3648         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3649         dmae->opcode = opcode;
3650         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3651                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3652         dmae->src_addr_hi = 0;
3653         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3654         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3655         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3656         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657         dmae->comp_addr_hi = 0;
3658         dmae->comp_val = 1;
3659
3660         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661         dmae->opcode = opcode;
3662         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3663                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3664         dmae->src_addr_hi = 0;
3665         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3666                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3667         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3668                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3669         dmae->len = (2*sizeof(u32)) >> 2;
3670         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671         dmae->comp_addr_hi = 0;
3672         dmae->comp_val = 1;
3673
3674         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3675         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3676                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3677                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3678 #ifdef __BIG_ENDIAN
3679                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3680 #else
3681                         DMAE_CMD_ENDIANITY_DW_SWAP |
3682 #endif
3683                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3684                         (vn << DMAE_CMD_E1HVN_SHIFT));
3685         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3686                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3687         dmae->src_addr_hi = 0;
3688         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3689                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3690         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3691                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3692         dmae->len = (2*sizeof(u32)) >> 2;
3693         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3694         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3695         dmae->comp_val = DMAE_COMP_VAL;
3696
3697         *stats_comp = 0;
3698 }
3699
3700 static void bnx2x_func_stats_init(struct bnx2x *bp)
3701 {
3702         struct dmae_command *dmae = &bp->stats_dmae;
3703         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3704
3705         /* sanity */
3706         if (!bp->func_stx) {
3707                 BNX2X_ERR("BUG!\n");
3708                 return;
3709         }
3710
3711         bp->executer_idx = 0;
3712         memset(dmae, 0, sizeof(struct dmae_command));
3713
3714         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3715                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3716                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3717 #ifdef __BIG_ENDIAN
3718                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3719 #else
3720                         DMAE_CMD_ENDIANITY_DW_SWAP |
3721 #endif
3722                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3723                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3724         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3725         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3726         dmae->dst_addr_lo = bp->func_stx >> 2;
3727         dmae->dst_addr_hi = 0;
3728         dmae->len = sizeof(struct host_func_stats) >> 2;
3729         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3730         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3731         dmae->comp_val = DMAE_COMP_VAL;
3732
3733         *stats_comp = 0;
3734 }
3735
3736 static void bnx2x_stats_start(struct bnx2x *bp)
3737 {
3738         if (bp->port.pmf)
3739                 bnx2x_port_stats_init(bp);
3740
3741         else if (bp->func_stx)
3742                 bnx2x_func_stats_init(bp);
3743
3744         bnx2x_hw_stats_post(bp);
3745         bnx2x_storm_stats_post(bp);
3746 }
3747
3748 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3749 {
3750         bnx2x_stats_comp(bp);
3751         bnx2x_stats_pmf_update(bp);
3752         bnx2x_stats_start(bp);
3753 }
3754
3755 static void bnx2x_stats_restart(struct bnx2x *bp)
3756 {
3757         bnx2x_stats_comp(bp);
3758         bnx2x_stats_start(bp);
3759 }
3760
3761 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3762 {
3763         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3764         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3765         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3766         struct {
3767                 u32 lo;
3768                 u32 hi;
3769         } diff;
3770
3771         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3772         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3773         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3774         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3775         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3776         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3777         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3778         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3779         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3780         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3781         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3782         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3783         UPDATE_STAT64(tx_stat_gt127,
3784                                 tx_stat_etherstatspkts65octetsto127octets);
3785         UPDATE_STAT64(tx_stat_gt255,
3786                                 tx_stat_etherstatspkts128octetsto255octets);
3787         UPDATE_STAT64(tx_stat_gt511,
3788                                 tx_stat_etherstatspkts256octetsto511octets);
3789         UPDATE_STAT64(tx_stat_gt1023,
3790                                 tx_stat_etherstatspkts512octetsto1023octets);
3791         UPDATE_STAT64(tx_stat_gt1518,
3792                                 tx_stat_etherstatspkts1024octetsto1522octets);
3793         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3794         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3795         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3796         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3797         UPDATE_STAT64(tx_stat_gterr,
3798                                 tx_stat_dot3statsinternalmactransmiterrors);
3799         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3800
3801         estats->pause_frames_received_hi =
3802                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3803         estats->pause_frames_received_lo =
3804                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3805
3806         estats->pause_frames_sent_hi =
3807                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3808         estats->pause_frames_sent_lo =
3809                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3810 }
3811
3812 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3813 {
3814         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3815         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3816         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3817
3818         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3819         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3820         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3821         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3822         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3823         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3824         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3825         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3826         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3827         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3828         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3829         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3830         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3831         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3832         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3833         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3834         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3835         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3836         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3837         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3838         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3839         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3840         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3841         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3842         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3843         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3844         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3845         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3846         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3847         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3848         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3849
3850         estats->pause_frames_received_hi =
3851                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3852         estats->pause_frames_received_lo =
3853                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3854         ADD_64(estats->pause_frames_received_hi,
3855                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3856                estats->pause_frames_received_lo,
3857                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3858
3859         estats->pause_frames_sent_hi =
3860                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3861         estats->pause_frames_sent_lo =
3862                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3863         ADD_64(estats->pause_frames_sent_hi,
3864                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3865                estats->pause_frames_sent_lo,
3866                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3867 }
3868
3869 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3870 {
3871         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3872         struct nig_stats *old = &(bp->port.old_nig_stats);
3873         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3874         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3875         struct {
3876                 u32 lo;
3877                 u32 hi;
3878         } diff;
3879         u32 nig_timer_max;
3880
3881         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3882                 bnx2x_bmac_stats_update(bp);
3883
3884         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3885                 bnx2x_emac_stats_update(bp);
3886
3887         else { /* unreached */
3888                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3889                 return -1;
3890         }
3891
3892         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3893                       new->brb_discard - old->brb_discard);
3894         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3895                       new->brb_truncate - old->brb_truncate);
3896
3897         UPDATE_STAT64_NIG(egress_mac_pkt0,
3898                                         etherstatspkts1024octetsto1522octets);
3899         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3900
3901         memcpy(old, new, sizeof(struct nig_stats));
3902
3903         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3904                sizeof(struct mac_stx));
3905         estats->brb_drop_hi = pstats->brb_drop_hi;
3906         estats->brb_drop_lo = pstats->brb_drop_lo;
3907
3908         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3909
3910         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3911         if (nig_timer_max != estats->nig_timer_max) {
3912                 estats->nig_timer_max = nig_timer_max;
3913                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3914         }
3915
3916         return 0;
3917 }
3918
3919 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3920 {
3921         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3922         struct tstorm_per_port_stats *tport =
3923                                         &stats->tstorm_common.port_statistics;
3924         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3925         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3926         int i;
3927
3928         memcpy(&(fstats->total_bytes_received_hi),
3929                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3930                sizeof(struct host_func_stats) - 2*sizeof(u32));
3931         estats->error_bytes_received_hi = 0;
3932         estats->error_bytes_received_lo = 0;
3933         estats->etherstatsoverrsizepkts_hi = 0;
3934         estats->etherstatsoverrsizepkts_lo = 0;
3935         estats->no_buff_discard_hi = 0;
3936         estats->no_buff_discard_lo = 0;
3937
3938         for_each_rx_queue(bp, i) {
3939                 struct bnx2x_fastpath *fp = &bp->fp[i];
3940                 int cl_id = fp->cl_id;
3941                 struct tstorm_per_client_stats *tclient =
3942                                 &stats->tstorm_common.client_statistics[cl_id];
3943                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3944                 struct ustorm_per_client_stats *uclient =
3945                                 &stats->ustorm_common.client_statistics[cl_id];
3946                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3947                 struct xstorm_per_client_stats *xclient =
3948                                 &stats->xstorm_common.client_statistics[cl_id];
3949                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3950                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3951                 u32 diff;
3952
3953                 /* are storm stats valid? */
3954                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3955                                                         bp->stats_counter) {
3956                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3957                            "  xstorm counter (%d) != stats_counter (%d)\n",
3958                            i, xclient->stats_counter, bp->stats_counter);
3959                         return -1;
3960                 }
3961                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3962                                                         bp->stats_counter) {
3963                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3964                            "  tstorm counter (%d) != stats_counter (%d)\n",
3965                            i, tclient->stats_counter, bp->stats_counter);
3966                         return -2;
3967                 }
3968                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3969                                                         bp->stats_counter) {
3970                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3971                            "  ustorm counter (%d) != stats_counter (%d)\n",
3972                            i, uclient->stats_counter, bp->stats_counter);
3973                         return -4;
3974                 }
3975
3976                 qstats->total_bytes_received_hi =
3977                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3978                 qstats->total_bytes_received_lo =
3979                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3980
3981                 ADD_64(qstats->total_bytes_received_hi,
3982                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3983                        qstats->total_bytes_received_lo,
3984                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3985
3986                 ADD_64(qstats->total_bytes_received_hi,
3987                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3988                        qstats->total_bytes_received_lo,
3989                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3990
3991                 qstats->valid_bytes_received_hi =
3992                                         qstats->total_bytes_received_hi;
3993                 qstats->valid_bytes_received_lo =
3994                                         qstats->total_bytes_received_lo;
3995
3996                 qstats->error_bytes_received_hi =
3997                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3998                 qstats->error_bytes_received_lo =
3999                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4000
4001                 ADD_64(qstats->total_bytes_received_hi,
4002                        qstats->error_bytes_received_hi,
4003                        qstats->total_bytes_received_lo,
4004                        qstats->error_bytes_received_lo);
4005
4006                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4007                                         total_unicast_packets_received);
4008                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4009                                         total_multicast_packets_received);
4010                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4011                                         total_broadcast_packets_received);
4012                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4013                                         etherstatsoverrsizepkts);
4014                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4015
4016                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4017                                         total_unicast_packets_received);
4018                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4019                                         total_multicast_packets_received);
4020                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4021                                         total_broadcast_packets_received);
4022                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4023                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4024                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4025
4026                 qstats->total_bytes_transmitted_hi =
4027                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4028                 qstats->total_bytes_transmitted_lo =
4029                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4030
4031                 ADD_64(qstats->total_bytes_transmitted_hi,
4032                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4033                        qstats->total_bytes_transmitted_lo,
4034                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4035
4036                 ADD_64(qstats->total_bytes_transmitted_hi,
4037                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4038                        qstats->total_bytes_transmitted_lo,
4039                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4040
4041                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4042                                         total_unicast_packets_transmitted);
4043                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4044                                         total_multicast_packets_transmitted);
4045                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4046                                         total_broadcast_packets_transmitted);
4047
4048                 old_tclient->checksum_discard = tclient->checksum_discard;
4049                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4050
4051                 ADD_64(fstats->total_bytes_received_hi,
4052                        qstats->total_bytes_received_hi,
4053                        fstats->total_bytes_received_lo,
4054                        qstats->total_bytes_received_lo);
4055                 ADD_64(fstats->total_bytes_transmitted_hi,
4056                        qstats->total_bytes_transmitted_hi,
4057                        fstats->total_bytes_transmitted_lo,
4058                        qstats->total_bytes_transmitted_lo);
4059                 ADD_64(fstats->total_unicast_packets_received_hi,
4060                        qstats->total_unicast_packets_received_hi,
4061                        fstats->total_unicast_packets_received_lo,
4062                        qstats->total_unicast_packets_received_lo);
4063                 ADD_64(fstats->total_multicast_packets_received_hi,
4064                        qstats->total_multicast_packets_received_hi,
4065                        fstats->total_multicast_packets_received_lo,
4066                        qstats->total_multicast_packets_received_lo);
4067                 ADD_64(fstats->total_broadcast_packets_received_hi,
4068                        qstats->total_broadcast_packets_received_hi,
4069                        fstats->total_broadcast_packets_received_lo,
4070                        qstats->total_broadcast_packets_received_lo);
4071                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4072                        qstats->total_unicast_packets_transmitted_hi,
4073                        fstats->total_unicast_packets_transmitted_lo,
4074                        qstats->total_unicast_packets_transmitted_lo);
4075                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4076                        qstats->total_multicast_packets_transmitted_hi,
4077                        fstats->total_multicast_packets_transmitted_lo,
4078                        qstats->total_multicast_packets_transmitted_lo);
4079                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4080                        qstats->total_broadcast_packets_transmitted_hi,
4081                        fstats->total_broadcast_packets_transmitted_lo,
4082                        qstats->total_broadcast_packets_transmitted_lo);
4083                 ADD_64(fstats->valid_bytes_received_hi,
4084                        qstats->valid_bytes_received_hi,
4085                        fstats->valid_bytes_received_lo,
4086                        qstats->valid_bytes_received_lo);
4087
4088                 ADD_64(estats->error_bytes_received_hi,
4089                        qstats->error_bytes_received_hi,
4090                        estats->error_bytes_received_lo,
4091                        qstats->error_bytes_received_lo);
4092                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4093                        qstats->etherstatsoverrsizepkts_hi,
4094                        estats->etherstatsoverrsizepkts_lo,
4095                        qstats->etherstatsoverrsizepkts_lo);
4096                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4097                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4098         }
4099
4100         ADD_64(fstats->total_bytes_received_hi,
4101                estats->rx_stat_ifhcinbadoctets_hi,
4102                fstats->total_bytes_received_lo,
4103                estats->rx_stat_ifhcinbadoctets_lo);
4104
4105         memcpy(estats, &(fstats->total_bytes_received_hi),
4106                sizeof(struct host_func_stats) - 2*sizeof(u32));
4107
4108         ADD_64(estats->etherstatsoverrsizepkts_hi,
4109                estats->rx_stat_dot3statsframestoolong_hi,
4110                estats->etherstatsoverrsizepkts_lo,
4111                estats->rx_stat_dot3statsframestoolong_lo);
4112         ADD_64(estats->error_bytes_received_hi,
4113                estats->rx_stat_ifhcinbadoctets_hi,
4114                estats->error_bytes_received_lo,
4115                estats->rx_stat_ifhcinbadoctets_lo);
4116
4117         if (bp->port.pmf) {
4118                 estats->mac_filter_discard =
4119                                 le32_to_cpu(tport->mac_filter_discard);
4120                 estats->xxoverflow_discard =
4121                                 le32_to_cpu(tport->xxoverflow_discard);
4122                 estats->brb_truncate_discard =
4123                                 le32_to_cpu(tport->brb_truncate_discard);
4124                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4125         }
4126
4127         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4128
4129         bp->stats_pending = 0;
4130
4131         return 0;
4132 }
4133
4134 static void bnx2x_net_stats_update(struct bnx2x *bp)
4135 {
4136         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4137         struct net_device_stats *nstats = &bp->dev->stats;
4138         int i;
4139
4140         nstats->rx_packets =
4141                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4142                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4143                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4144
4145         nstats->tx_packets =
4146                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4147                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4148                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4149
4150         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4151
4152         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4153
4154         nstats->rx_dropped = estats->mac_discard;
4155         for_each_rx_queue(bp, i)
4156                 nstats->rx_dropped +=
4157                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4158
4159         nstats->tx_dropped = 0;
4160
4161         nstats->multicast =
4162                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4163
4164         nstats->collisions =
4165                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4166
4167         nstats->rx_length_errors =
4168                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4169                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4170         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4171                                  bnx2x_hilo(&estats->brb_truncate_hi);
4172         nstats->rx_crc_errors =
4173                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4174         nstats->rx_frame_errors =
4175                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4176         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4177         nstats->rx_missed_errors = estats->xxoverflow_discard;
4178
4179         nstats->rx_errors = nstats->rx_length_errors +
4180                             nstats->rx_over_errors +
4181                             nstats->rx_crc_errors +
4182                             nstats->rx_frame_errors +
4183                             nstats->rx_fifo_errors +
4184                             nstats->rx_missed_errors;
4185
4186         nstats->tx_aborted_errors =
4187                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4188                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4189         nstats->tx_carrier_errors =
4190                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4191         nstats->tx_fifo_errors = 0;
4192         nstats->tx_heartbeat_errors = 0;
4193         nstats->tx_window_errors = 0;
4194
4195         nstats->tx_errors = nstats->tx_aborted_errors +
4196                             nstats->tx_carrier_errors +
4197             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4198 }
4199
4200 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4201 {
4202         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4203         int i;
4204
4205         estats->driver_xoff = 0;
4206         estats->rx_err_discard_pkt = 0;
4207         estats->rx_skb_alloc_failed = 0;
4208         estats->hw_csum_err = 0;
4209         for_each_rx_queue(bp, i) {
4210                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4211
4212                 estats->driver_xoff += qstats->driver_xoff;
4213                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4214                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4215                 estats->hw_csum_err += qstats->hw_csum_err;
4216         }
4217 }
4218
4219 static void bnx2x_stats_update(struct bnx2x *bp)
4220 {
4221         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4222
4223         if (*stats_comp != DMAE_COMP_VAL)
4224                 return;
4225
4226         if (bp->port.pmf)
4227                 bnx2x_hw_stats_update(bp);
4228
4229         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4230                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4231                 bnx2x_panic();
4232                 return;
4233         }
4234
4235         bnx2x_net_stats_update(bp);
4236         bnx2x_drv_stats_update(bp);
4237
4238         if (bp->msglevel & NETIF_MSG_TIMER) {
4239                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4240                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4241                 struct tstorm_per_client_stats *old_tclient =
4242                                                         &bp->fp->old_tclient;
4243                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4244                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4245                 struct net_device_stats *nstats = &bp->dev->stats;
4246                 int i;
4247
4248                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4249                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4250                                   "  tx pkt (%lx)\n",
4251                        bnx2x_tx_avail(fp0_tx),
4252                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4253                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4254                                   "  rx pkt (%lx)\n",
4255                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4256                              fp0_rx->rx_comp_cons),
4257                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4258                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4259                                   "brb truncate %u\n",
4260                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4261                        qstats->driver_xoff,
4262                        estats->brb_drop_lo, estats->brb_truncate_lo);
4263                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4264                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4265                         "mac_discard %u  mac_filter_discard %u  "
4266                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4267                         "ttl0_discard %u\n",
4268                        le32_to_cpu(old_tclient->checksum_discard),
4269                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4270                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4271                        estats->mac_discard, estats->mac_filter_discard,
4272                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4273                        le32_to_cpu(old_tclient->ttl0_discard));
4274
4275                 for_each_queue(bp, i) {
4276                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4277                                bnx2x_fp(bp, i, tx_pkt),
4278                                bnx2x_fp(bp, i, rx_pkt),
4279                                bnx2x_fp(bp, i, rx_calls));
4280                 }
4281         }
4282
4283         bnx2x_hw_stats_post(bp);
4284         bnx2x_storm_stats_post(bp);
4285 }
4286
4287 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4288 {
4289         struct dmae_command *dmae;
4290         u32 opcode;
4291         int loader_idx = PMF_DMAE_C(bp);
4292         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4293
4294         bp->executer_idx = 0;
4295
4296         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4297                   DMAE_CMD_C_ENABLE |
4298                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4299 #ifdef __BIG_ENDIAN
4300                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4301 #else
4302                   DMAE_CMD_ENDIANITY_DW_SWAP |
4303 #endif
4304                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4305                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4306
4307         if (bp->port.port_stx) {
4308
4309                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4310                 if (bp->func_stx)
4311                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4312                 else
4313                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4314                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4315                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4316                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4317                 dmae->dst_addr_hi = 0;
4318                 dmae->len = sizeof(struct host_port_stats) >> 2;
4319                 if (bp->func_stx) {
4320                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4321                         dmae->comp_addr_hi = 0;
4322                         dmae->comp_val = 1;
4323                 } else {
4324                         dmae->comp_addr_lo =
4325                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4326                         dmae->comp_addr_hi =
4327                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4328                         dmae->comp_val = DMAE_COMP_VAL;
4329
4330                         *stats_comp = 0;
4331                 }
4332         }
4333
4334         if (bp->func_stx) {
4335
4336                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4337                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4338                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4339                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4340                 dmae->dst_addr_lo = bp->func_stx >> 2;
4341                 dmae->dst_addr_hi = 0;
4342                 dmae->len = sizeof(struct host_func_stats) >> 2;
4343                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4344                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4345                 dmae->comp_val = DMAE_COMP_VAL;
4346
4347                 *stats_comp = 0;
4348         }
4349 }
4350
4351 static void bnx2x_stats_stop(struct bnx2x *bp)
4352 {
4353         int update = 0;
4354
4355         bnx2x_stats_comp(bp);
4356
4357         if (bp->port.pmf)
4358                 update = (bnx2x_hw_stats_update(bp) == 0);
4359
4360         update |= (bnx2x_storm_stats_update(bp) == 0);
4361
4362         if (update) {
4363                 bnx2x_net_stats_update(bp);
4364
4365                 if (bp->port.pmf)
4366                         bnx2x_port_stats_stop(bp);
4367
4368                 bnx2x_hw_stats_post(bp);
4369                 bnx2x_stats_comp(bp);
4370         }
4371 }
4372
4373 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4374 {
4375 }
4376
4377 static const struct {
4378         void (*action)(struct bnx2x *bp);
4379         enum bnx2x_stats_state next_state;
4380 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4381 /* state        event   */
4382 {
4383 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4384 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4385 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4386 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4387 },
4388 {
4389 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4390 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4391 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4392 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4393 }
4394 };
4395
4396 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4397 {
4398         enum bnx2x_stats_state state = bp->stats_state;
4399
4400         bnx2x_stats_stm[state][event].action(bp);
4401         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4402
4403         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4404                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4405                    state, event, bp->stats_state);
4406 }
4407
4408 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4409 {
4410         struct dmae_command *dmae;
4411         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4412
4413         /* sanity */
4414         if (!bp->port.pmf || !bp->port.port_stx) {
4415                 BNX2X_ERR("BUG!\n");
4416                 return;
4417         }
4418
4419         bp->executer_idx = 0;
4420
4421         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4422         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4423                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4424                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4425 #ifdef __BIG_ENDIAN
4426                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4427 #else
4428                         DMAE_CMD_ENDIANITY_DW_SWAP |
4429 #endif
4430                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4431                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4432         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4433         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4434         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4435         dmae->dst_addr_hi = 0;
4436         dmae->len = sizeof(struct host_port_stats) >> 2;
4437         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4438         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4439         dmae->comp_val = DMAE_COMP_VAL;
4440
4441         *stats_comp = 0;
4442         bnx2x_hw_stats_post(bp);
4443         bnx2x_stats_comp(bp);
4444 }
4445
4446 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4447 {
4448         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4449         int port = BP_PORT(bp);
4450         int func;
4451         u32 func_stx;
4452
4453         /* sanity */
4454         if (!bp->port.pmf || !bp->func_stx) {
4455                 BNX2X_ERR("BUG!\n");
4456                 return;
4457         }
4458
4459         /* save our func_stx */
4460         func_stx = bp->func_stx;
4461
4462         for (vn = VN_0; vn < vn_max; vn++) {
4463                 func = 2*vn + port;
4464
4465                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4466                 bnx2x_func_stats_init(bp);
4467                 bnx2x_hw_stats_post(bp);
4468                 bnx2x_stats_comp(bp);
4469         }
4470
4471         /* restore our func_stx */
4472         bp->func_stx = func_stx;
4473 }
4474
4475 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4476 {
4477         struct dmae_command *dmae = &bp->stats_dmae;
4478         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4479
4480         /* sanity */
4481         if (!bp->func_stx) {
4482                 BNX2X_ERR("BUG!\n");
4483                 return;
4484         }
4485
4486         bp->executer_idx = 0;
4487         memset(dmae, 0, sizeof(struct dmae_command));
4488
4489         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4490                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4491                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4492 #ifdef __BIG_ENDIAN
4493                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4494 #else
4495                         DMAE_CMD_ENDIANITY_DW_SWAP |
4496 #endif
4497                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4498                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4499         dmae->src_addr_lo = bp->func_stx >> 2;
4500         dmae->src_addr_hi = 0;
4501         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4502         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4503         dmae->len = sizeof(struct host_func_stats) >> 2;
4504         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4505         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4506         dmae->comp_val = DMAE_COMP_VAL;
4507
4508         *stats_comp = 0;
4509         bnx2x_hw_stats_post(bp);
4510         bnx2x_stats_comp(bp);
4511 }
4512
4513 static void bnx2x_stats_init(struct bnx2x *bp)
4514 {
4515         int port = BP_PORT(bp);
4516         int func = BP_FUNC(bp);
4517         int i;
4518
4519         bp->stats_pending = 0;
4520         bp->executer_idx = 0;
4521         bp->stats_counter = 0;
4522
4523         /* port and func stats for management */
4524         if (!BP_NOMCP(bp)) {
4525                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4526                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4527
4528         } else {
4529                 bp->port.port_stx = 0;
4530                 bp->func_stx = 0;
4531         }
4532         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4533            bp->port.port_stx, bp->func_stx);
4534
4535         /* port stats */
4536         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4537         bp->port.old_nig_stats.brb_discard =
4538                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4539         bp->port.old_nig_stats.brb_truncate =
4540                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4541         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4542                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4543         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4544                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4545
4546         /* function stats */
4547         for_each_queue(bp, i) {
4548                 struct bnx2x_fastpath *fp = &bp->fp[i];
4549
4550                 memset(&fp->old_tclient, 0,
4551                        sizeof(struct tstorm_per_client_stats));
4552                 memset(&fp->old_uclient, 0,
4553                        sizeof(struct ustorm_per_client_stats));
4554                 memset(&fp->old_xclient, 0,
4555                        sizeof(struct xstorm_per_client_stats));
4556                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4557         }
4558
4559         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4560         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4561
4562         bp->stats_state = STATS_STATE_DISABLED;
4563
4564         if (bp->port.pmf) {
4565                 if (bp->port.port_stx)
4566                         bnx2x_port_stats_base_init(bp);
4567
4568                 if (bp->func_stx)
4569                         bnx2x_func_stats_base_init(bp);
4570
4571         } else if (bp->func_stx)
4572                 bnx2x_func_stats_base_update(bp);
4573 }
4574
4575 static void bnx2x_timer(unsigned long data)
4576 {
4577         struct bnx2x *bp = (struct bnx2x *) data;
4578
4579         if (!netif_running(bp->dev))
4580                 return;
4581
4582         if (atomic_read(&bp->intr_sem) != 0)
4583                 goto timer_restart;
4584
4585         if (poll) {
4586                 struct bnx2x_fastpath *fp = &bp->fp[0];
4587                 int rc;
4588
4589                 bnx2x_tx_int(fp);
4590                 rc = bnx2x_rx_int(fp, 1000);
4591         }
4592
4593         if (!BP_NOMCP(bp)) {
4594                 int func = BP_FUNC(bp);
4595                 u32 drv_pulse;
4596                 u32 mcp_pulse;
4597
4598                 ++bp->fw_drv_pulse_wr_seq;
4599                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4600                 /* TBD - add SYSTEM_TIME */
4601                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4602                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4603
4604                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4605                              MCP_PULSE_SEQ_MASK);
4606                 /* The delta between driver pulse and mcp response
4607                  * should be 1 (before mcp response) or 0 (after mcp response)
4608                  */
4609                 if ((drv_pulse != mcp_pulse) &&
4610                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4611                         /* someone lost a heartbeat... */
4612                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4613                                   drv_pulse, mcp_pulse);
4614                 }
4615         }
4616
4617         if ((bp->state == BNX2X_STATE_OPEN) ||
4618             (bp->state == BNX2X_STATE_DISABLED))
4619                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4620
4621 timer_restart:
4622         mod_timer(&bp->timer, jiffies + bp->current_interval);
4623 }
4624
4625 /* end of Statistics */
4626
4627 /* nic init */
4628
4629 /*
4630  * nic init service functions
4631  */
4632
4633 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4634 {
4635         int port = BP_PORT(bp);
4636
4637         /* "CSTORM" */
4638         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4639                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4640                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4641         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4642                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4643                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4644 }
4645
4646 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4647                           dma_addr_t mapping, int sb_id)
4648 {
4649         int port = BP_PORT(bp);
4650         int func = BP_FUNC(bp);
4651         int index;
4652         u64 section;
4653
4654         /* USTORM */
4655         section = ((u64)mapping) + offsetof(struct host_status_block,
4656                                             u_status_block);
4657         sb->u_status_block.status_block_id = sb_id;
4658
4659         REG_WR(bp, BAR_CSTRORM_INTMEM +
4660                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4661         REG_WR(bp, BAR_CSTRORM_INTMEM +
4662                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4663                U64_HI(section));
4664         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4665                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4666
4667         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4668                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4669                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4670
4671         /* CSTORM */
4672         section = ((u64)mapping) + offsetof(struct host_status_block,
4673                                             c_status_block);
4674         sb->c_status_block.status_block_id = sb_id;
4675
4676         REG_WR(bp, BAR_CSTRORM_INTMEM +
4677                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4678         REG_WR(bp, BAR_CSTRORM_INTMEM +
4679                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4680                U64_HI(section));
4681         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4682                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4683
4684         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4685                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4686                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4687
4688         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4689 }
4690
4691 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4692 {
4693         int func = BP_FUNC(bp);
4694
4695         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4696                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4697                         sizeof(struct tstorm_def_status_block)/4);
4698         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4699                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4700                         sizeof(struct cstorm_def_status_block_u)/4);
4701         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4703                         sizeof(struct cstorm_def_status_block_c)/4);
4704         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4705                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4706                         sizeof(struct xstorm_def_status_block)/4);
4707 }
4708
4709 static void bnx2x_init_def_sb(struct bnx2x *bp,
4710                               struct host_def_status_block *def_sb,
4711                               dma_addr_t mapping, int sb_id)
4712 {
4713         int port = BP_PORT(bp);
4714         int func = BP_FUNC(bp);
4715         int index, val, reg_offset;
4716         u64 section;
4717
4718         /* ATTN */
4719         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4720                                             atten_status_block);
4721         def_sb->atten_status_block.status_block_id = sb_id;
4722
4723         bp->attn_state = 0;
4724
4725         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4726                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4727
4728         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4729                 bp->attn_group[index].sig[0] = REG_RD(bp,
4730                                                      reg_offset + 0x10*index);
4731                 bp->attn_group[index].sig[1] = REG_RD(bp,
4732                                                reg_offset + 0x4 + 0x10*index);
4733                 bp->attn_group[index].sig[2] = REG_RD(bp,
4734                                                reg_offset + 0x8 + 0x10*index);
4735                 bp->attn_group[index].sig[3] = REG_RD(bp,
4736                                                reg_offset + 0xc + 0x10*index);
4737         }
4738
4739         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4740                              HC_REG_ATTN_MSG0_ADDR_L);
4741
4742         REG_WR(bp, reg_offset, U64_LO(section));
4743         REG_WR(bp, reg_offset + 4, U64_HI(section));
4744
4745         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4746
4747         val = REG_RD(bp, reg_offset);
4748         val |= sb_id;
4749         REG_WR(bp, reg_offset, val);
4750
4751         /* USTORM */
4752         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4753                                             u_def_status_block);
4754         def_sb->u_def_status_block.status_block_id = sb_id;
4755
4756         REG_WR(bp, BAR_CSTRORM_INTMEM +
4757                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4758         REG_WR(bp, BAR_CSTRORM_INTMEM +
4759                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4760                U64_HI(section));
4761         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4762                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4763
4764         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4765                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4767
4768         /* CSTORM */
4769         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4770                                             c_def_status_block);
4771         def_sb->c_def_status_block.status_block_id = sb_id;
4772
4773         REG_WR(bp, BAR_CSTRORM_INTMEM +
4774                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4775         REG_WR(bp, BAR_CSTRORM_INTMEM +
4776                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4777                U64_HI(section));
4778         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4779                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4780
4781         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4782                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4783                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4784
4785         /* TSTORM */
4786         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4787                                             t_def_status_block);
4788         def_sb->t_def_status_block.status_block_id = sb_id;
4789
4790         REG_WR(bp, BAR_TSTRORM_INTMEM +
4791                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4792         REG_WR(bp, BAR_TSTRORM_INTMEM +
4793                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4794                U64_HI(section));
4795         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4796                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4797
4798         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4799                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4800                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4801
4802         /* XSTORM */
4803         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4804                                             x_def_status_block);
4805         def_sb->x_def_status_block.status_block_id = sb_id;
4806
4807         REG_WR(bp, BAR_XSTRORM_INTMEM +
4808                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4809         REG_WR(bp, BAR_XSTRORM_INTMEM +
4810                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4811                U64_HI(section));
4812         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4813                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4814
4815         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4816                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4817                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4818
4819         bp->stats_pending = 0;
4820         bp->set_mac_pending = 0;
4821
4822         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4823 }
4824
4825 static void bnx2x_update_coalesce(struct bnx2x *bp)
4826 {
4827         int port = BP_PORT(bp);
4828         int i;
4829
4830         for_each_queue(bp, i) {
4831                 int sb_id = bp->fp[i].sb_id;
4832
4833                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4834                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4835                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4836                                                       U_SB_ETH_RX_CQ_INDEX),
4837                         bp->rx_ticks/12);
4838                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4840                                                        U_SB_ETH_RX_CQ_INDEX),
4841                          (bp->rx_ticks/12) ? 0 : 1);
4842
4843                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4844                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4845                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4846                                                       C_SB_ETH_TX_CQ_INDEX),
4847                         bp->tx_ticks/12);
4848                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4849                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4850                                                        C_SB_ETH_TX_CQ_INDEX),
4851                          (bp->tx_ticks/12) ? 0 : 1);
4852         }
4853 }
4854
4855 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4856                                        struct bnx2x_fastpath *fp, int last)
4857 {
4858         int i;
4859
4860         for (i = 0; i < last; i++) {
4861                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4862                 struct sk_buff *skb = rx_buf->skb;
4863
4864                 if (skb == NULL) {
4865                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4866                         continue;
4867                 }
4868
4869                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4870                         pci_unmap_single(bp->pdev,
4871                                          pci_unmap_addr(rx_buf, mapping),
4872                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4873
4874                 dev_kfree_skb(skb);
4875                 rx_buf->skb = NULL;
4876         }
4877 }
4878
4879 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4880 {
4881         int func = BP_FUNC(bp);
4882         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4883                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4884         u16 ring_prod, cqe_ring_prod;
4885         int i, j;
4886
4887         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4888         DP(NETIF_MSG_IFUP,
4889            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4890
4891         if (bp->flags & TPA_ENABLE_FLAG) {
4892
4893                 for_each_rx_queue(bp, j) {
4894                         struct bnx2x_fastpath *fp = &bp->fp[j];
4895
4896                         for (i = 0; i < max_agg_queues; i++) {
4897                                 fp->tpa_pool[i].skb =
4898                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4899                                 if (!fp->tpa_pool[i].skb) {
4900                                         BNX2X_ERR("Failed to allocate TPA "
4901                                                   "skb pool for queue[%d] - "
4902                                                   "disabling TPA on this "
4903                                                   "queue!\n", j);
4904                                         bnx2x_free_tpa_pool(bp, fp, i);
4905                                         fp->disable_tpa = 1;
4906                                         break;
4907                                 }
4908                                 pci_unmap_addr_set((struct sw_rx_bd *)
4909                                                         &bp->fp->tpa_pool[i],
4910                                                    mapping, 0);
4911                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4912                         }
4913                 }
4914         }
4915
4916         for_each_rx_queue(bp, j) {
4917                 struct bnx2x_fastpath *fp = &bp->fp[j];
4918
4919                 fp->rx_bd_cons = 0;
4920                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4921                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4922
4923                 /* Mark queue as Rx */
4924                 fp->is_rx_queue = 1;
4925
4926                 /* "next page" elements initialization */
4927                 /* SGE ring */
4928                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4929                         struct eth_rx_sge *sge;
4930
4931                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4932                         sge->addr_hi =
4933                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4934                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4935                         sge->addr_lo =
4936                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4937                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4938                 }
4939
4940                 bnx2x_init_sge_ring_bit_mask(fp);
4941
4942                 /* RX BD ring */
4943                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4944                         struct eth_rx_bd *rx_bd;
4945
4946                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4947                         rx_bd->addr_hi =
4948                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4949                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4950                         rx_bd->addr_lo =
4951                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4952                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4953                 }
4954
4955                 /* CQ ring */
4956                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4957                         struct eth_rx_cqe_next_page *nextpg;
4958
4959                         nextpg = (struct eth_rx_cqe_next_page *)
4960                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4961                         nextpg->addr_hi =
4962                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4963                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4964                         nextpg->addr_lo =
4965                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4966                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4967                 }
4968
4969                 /* Allocate SGEs and initialize the ring elements */
4970                 for (i = 0, ring_prod = 0;
4971                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4972
4973                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4974                                 BNX2X_ERR("was only able to allocate "
4975                                           "%d rx sges\n", i);
4976                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4977                                 /* Cleanup already allocated elements */
4978                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4979                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4980                                 fp->disable_tpa = 1;
4981                                 ring_prod = 0;
4982                                 break;
4983                         }
4984                         ring_prod = NEXT_SGE_IDX(ring_prod);
4985                 }
4986                 fp->rx_sge_prod = ring_prod;
4987
4988                 /* Allocate BDs and initialize BD ring */
4989                 fp->rx_comp_cons = 0;
4990                 cqe_ring_prod = ring_prod = 0;
4991                 for (i = 0; i < bp->rx_ring_size; i++) {
4992                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4993                                 BNX2X_ERR("was only able to allocate "
4994                                           "%d rx skbs on queue[%d]\n", i, j);
4995                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4996                                 break;
4997                         }
4998                         ring_prod = NEXT_RX_IDX(ring_prod);
4999                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5000                         WARN_ON(ring_prod <= i);
5001                 }
5002
5003                 fp->rx_bd_prod = ring_prod;
5004                 /* must not have more available CQEs than BDs */
5005                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5006                                        cqe_ring_prod);
5007                 fp->rx_pkt = fp->rx_calls = 0;
5008
5009                 /* Warning!
5010                  * this will generate an interrupt (to the TSTORM)
5011                  * must only be done after chip is initialized
5012                  */
5013                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5014                                      fp->rx_sge_prod);
5015                 if (j != 0)
5016                         continue;
5017
5018                 REG_WR(bp, BAR_USTRORM_INTMEM +
5019                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5020                        U64_LO(fp->rx_comp_mapping));
5021                 REG_WR(bp, BAR_USTRORM_INTMEM +
5022                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5023                        U64_HI(fp->rx_comp_mapping));
5024         }
5025 }
5026
5027 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5028 {
5029         int i, j;
5030
5031         for_each_tx_queue(bp, j) {
5032                 struct bnx2x_fastpath *fp = &bp->fp[j];
5033
5034                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5035                         struct eth_tx_next_bd *tx_next_bd =
5036                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5037
5038                         tx_next_bd->addr_hi =
5039                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5040                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5041                         tx_next_bd->addr_lo =
5042                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5043                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5044                 }
5045
5046                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5047                 fp->tx_db.data.zero_fill1 = 0;
5048                 fp->tx_db.data.prod = 0;
5049
5050                 fp->tx_pkt_prod = 0;
5051                 fp->tx_pkt_cons = 0;
5052                 fp->tx_bd_prod = 0;
5053                 fp->tx_bd_cons = 0;
5054                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5055                 fp->tx_pkt = 0;
5056         }
5057
5058         /* clean tx statistics */
5059         for_each_rx_queue(bp, i)
5060                 bnx2x_fp(bp, i, tx_pkt) = 0;
5061 }
5062
5063 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5064 {
5065         int func = BP_FUNC(bp);
5066
5067         spin_lock_init(&bp->spq_lock);
5068
5069         bp->spq_left = MAX_SPQ_PENDING;
5070         bp->spq_prod_idx = 0;
5071         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5072         bp->spq_prod_bd = bp->spq;
5073         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5074
5075         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5076                U64_LO(bp->spq_mapping));
5077         REG_WR(bp,
5078                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5079                U64_HI(bp->spq_mapping));
5080
5081         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5082                bp->spq_prod_idx);
5083 }
5084
5085 static void bnx2x_init_context(struct bnx2x *bp)
5086 {
5087         int i;
5088
5089         for_each_rx_queue(bp, i) {
5090                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5091                 struct bnx2x_fastpath *fp = &bp->fp[i];
5092                 u8 cl_id = fp->cl_id;
5093
5094                 context->ustorm_st_context.common.sb_index_numbers =
5095                                                 BNX2X_RX_SB_INDEX_NUM;
5096                 context->ustorm_st_context.common.clientId = cl_id;
5097                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5098                 context->ustorm_st_context.common.flags =
5099                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5100                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5101                 context->ustorm_st_context.common.statistics_counter_id =
5102                                                 cl_id;
5103                 context->ustorm_st_context.common.mc_alignment_log_size =
5104                                                 BNX2X_RX_ALIGN_SHIFT;
5105                 context->ustorm_st_context.common.bd_buff_size =
5106                                                 bp->rx_buf_size;
5107                 context->ustorm_st_context.common.bd_page_base_hi =
5108                                                 U64_HI(fp->rx_desc_mapping);
5109                 context->ustorm_st_context.common.bd_page_base_lo =
5110                                                 U64_LO(fp->rx_desc_mapping);
5111                 if (!fp->disable_tpa) {
5112                         context->ustorm_st_context.common.flags |=
5113                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5114                         context->ustorm_st_context.common.sge_buff_size =
5115                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5116                                          (u32)0xffff);
5117                         context->ustorm_st_context.common.sge_page_base_hi =
5118                                                 U64_HI(fp->rx_sge_mapping);
5119                         context->ustorm_st_context.common.sge_page_base_lo =
5120                                                 U64_LO(fp->rx_sge_mapping);
5121
5122                         context->ustorm_st_context.common.max_sges_for_packet =
5123                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5124                         context->ustorm_st_context.common.max_sges_for_packet =
5125                                 ((context->ustorm_st_context.common.
5126                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5127                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5128                 }
5129
5130                 context->ustorm_ag_context.cdu_usage =
5131                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5132                                                CDU_REGION_NUMBER_UCM_AG,
5133                                                ETH_CONNECTION_TYPE);
5134
5135                 context->xstorm_ag_context.cdu_reserved =
5136                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5137                                                CDU_REGION_NUMBER_XCM_AG,
5138                                                ETH_CONNECTION_TYPE);
5139         }
5140
5141         for_each_tx_queue(bp, i) {
5142                 struct bnx2x_fastpath *fp = &bp->fp[i];
5143                 struct eth_context *context =
5144                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5145
5146                 context->cstorm_st_context.sb_index_number =
5147                                                 C_SB_ETH_TX_CQ_INDEX;
5148                 context->cstorm_st_context.status_block_id = fp->sb_id;
5149
5150                 context->xstorm_st_context.tx_bd_page_base_hi =
5151                                                 U64_HI(fp->tx_desc_mapping);
5152                 context->xstorm_st_context.tx_bd_page_base_lo =
5153                                                 U64_LO(fp->tx_desc_mapping);
5154                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5155                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5156         }
5157 }
5158
5159 static void bnx2x_init_ind_table(struct bnx2x *bp)
5160 {
5161         int func = BP_FUNC(bp);
5162         int i;
5163
5164         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5165                 return;
5166
5167         DP(NETIF_MSG_IFUP,
5168            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5169         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5170                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5171                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5172                         bp->fp->cl_id + (i % bp->num_rx_queues));
5173 }
5174
5175 static void bnx2x_set_client_config(struct bnx2x *bp)
5176 {
5177         struct tstorm_eth_client_config tstorm_client = {0};
5178         int port = BP_PORT(bp);
5179         int i;
5180
5181         tstorm_client.mtu = bp->dev->mtu;
5182         tstorm_client.config_flags =
5183                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5184                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5185 #ifdef BCM_VLAN
5186         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5187                 tstorm_client.config_flags |=
5188                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5189                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5190         }
5191 #endif
5192
5193         for_each_queue(bp, i) {
5194                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5195
5196                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5197                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5198                        ((u32 *)&tstorm_client)[0]);
5199                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5200                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5201                        ((u32 *)&tstorm_client)[1]);
5202         }
5203
5204         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5205            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5206 }
5207
5208 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5209 {
5210         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5211         int mode = bp->rx_mode;
5212         int mask = (1 << BP_L_ID(bp));
5213         int func = BP_FUNC(bp);
5214         int port = BP_PORT(bp);
5215         int i;
5216         /* All but management unicast packets should pass to the host as well */
5217         u32 llh_mask =
5218                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5219                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5220                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5221                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5222
5223         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5224
5225         switch (mode) {
5226         case BNX2X_RX_MODE_NONE: /* no Rx */
5227                 tstorm_mac_filter.ucast_drop_all = mask;
5228                 tstorm_mac_filter.mcast_drop_all = mask;
5229                 tstorm_mac_filter.bcast_drop_all = mask;
5230                 break;
5231
5232         case BNX2X_RX_MODE_NORMAL:
5233                 tstorm_mac_filter.bcast_accept_all = mask;
5234                 break;
5235
5236         case BNX2X_RX_MODE_ALLMULTI:
5237                 tstorm_mac_filter.mcast_accept_all = mask;
5238                 tstorm_mac_filter.bcast_accept_all = mask;
5239                 break;
5240
5241         case BNX2X_RX_MODE_PROMISC:
5242                 tstorm_mac_filter.ucast_accept_all = mask;
5243                 tstorm_mac_filter.mcast_accept_all = mask;
5244                 tstorm_mac_filter.bcast_accept_all = mask;
5245                 /* pass management unicast packets as well */
5246                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5247                 break;
5248
5249         default:
5250                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5251                 break;
5252         }
5253
5254         REG_WR(bp,
5255                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5256                llh_mask);
5257
5258         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5259                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5260                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5261                        ((u32 *)&tstorm_mac_filter)[i]);
5262
5263 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5264                    ((u32 *)&tstorm_mac_filter)[i]); */
5265         }
5266
5267         if (mode != BNX2X_RX_MODE_NONE)
5268                 bnx2x_set_client_config(bp);
5269 }
5270
5271 static void bnx2x_init_internal_common(struct bnx2x *bp)
5272 {
5273         int i;
5274
5275         /* Zero this manually as its initialization is
5276            currently missing in the initTool */
5277         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5278                 REG_WR(bp, BAR_USTRORM_INTMEM +
5279                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5280 }
5281
5282 static void bnx2x_init_internal_port(struct bnx2x *bp)
5283 {
5284         int port = BP_PORT(bp);
5285
5286         REG_WR(bp,
5287                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5288         REG_WR(bp,
5289                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5290         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5291         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5292 }
5293
5294 static void bnx2x_init_internal_func(struct bnx2x *bp)
5295 {
5296         struct tstorm_eth_function_common_config tstorm_config = {0};
5297         struct stats_indication_flags stats_flags = {0};
5298         int port = BP_PORT(bp);
5299         int func = BP_FUNC(bp);
5300         int i, j;
5301         u32 offset;
5302         u16 max_agg_size;
5303
5304         if (is_multi(bp)) {
5305                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5306                 tstorm_config.rss_result_mask = MULTI_MASK;
5307         }
5308
5309         /* Enable TPA if needed */
5310         if (bp->flags & TPA_ENABLE_FLAG)
5311                 tstorm_config.config_flags |=
5312                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5313
5314         if (IS_E1HMF(bp))
5315                 tstorm_config.config_flags |=
5316                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5317
5318         tstorm_config.leading_client_id = BP_L_ID(bp);
5319
5320         REG_WR(bp, BAR_TSTRORM_INTMEM +
5321                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5322                (*(u32 *)&tstorm_config));
5323
5324         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5325         bnx2x_set_storm_rx_mode(bp);
5326
5327         for_each_queue(bp, i) {
5328                 u8 cl_id = bp->fp[i].cl_id;
5329
5330                 /* reset xstorm per client statistics */
5331                 offset = BAR_XSTRORM_INTMEM +
5332                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5333                 for (j = 0;
5334                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5335                         REG_WR(bp, offset + j*4, 0);
5336
5337                 /* reset tstorm per client statistics */
5338                 offset = BAR_TSTRORM_INTMEM +
5339                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5340                 for (j = 0;
5341                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5342                         REG_WR(bp, offset + j*4, 0);
5343
5344                 /* reset ustorm per client statistics */
5345                 offset = BAR_USTRORM_INTMEM +
5346                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5347                 for (j = 0;
5348                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5349                         REG_WR(bp, offset + j*4, 0);
5350         }
5351
5352         /* Init statistics related context */
5353         stats_flags.collect_eth = 1;
5354
5355         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5356                ((u32 *)&stats_flags)[0]);
5357         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5358                ((u32 *)&stats_flags)[1]);
5359
5360         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5361                ((u32 *)&stats_flags)[0]);
5362         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5363                ((u32 *)&stats_flags)[1]);
5364
5365         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5366                ((u32 *)&stats_flags)[0]);
5367         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5368                ((u32 *)&stats_flags)[1]);
5369
5370         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5371                ((u32 *)&stats_flags)[0]);
5372         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5373                ((u32 *)&stats_flags)[1]);
5374
5375         REG_WR(bp, BAR_XSTRORM_INTMEM +
5376                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5377                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5378         REG_WR(bp, BAR_XSTRORM_INTMEM +
5379                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5380                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5381
5382         REG_WR(bp, BAR_TSTRORM_INTMEM +
5383                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5384                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5385         REG_WR(bp, BAR_TSTRORM_INTMEM +
5386                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5387                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5388
5389         REG_WR(bp, BAR_USTRORM_INTMEM +
5390                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5391                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5392         REG_WR(bp, BAR_USTRORM_INTMEM +
5393                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5394                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5395
5396         if (CHIP_IS_E1H(bp)) {
5397                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5398                         IS_E1HMF(bp));
5399                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5400                         IS_E1HMF(bp));
5401                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5402                         IS_E1HMF(bp));
5403                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5404                         IS_E1HMF(bp));
5405
5406                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5407                          bp->e1hov);
5408         }
5409
5410         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5411         max_agg_size =
5412                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5413                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5414                     (u32)0xffff);
5415         for_each_rx_queue(bp, i) {
5416                 struct bnx2x_fastpath *fp = &bp->fp[i];
5417
5418                 REG_WR(bp, BAR_USTRORM_INTMEM +
5419                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5420                        U64_LO(fp->rx_comp_mapping));
5421                 REG_WR(bp, BAR_USTRORM_INTMEM +
5422                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5423                        U64_HI(fp->rx_comp_mapping));
5424
5425                 /* Next page */
5426                 REG_WR(bp, BAR_USTRORM_INTMEM +
5427                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5428                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5429                 REG_WR(bp, BAR_USTRORM_INTMEM +
5430                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5431                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5432
5433                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5434                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5435                          max_agg_size);
5436         }
5437
5438         /* dropless flow control */
5439         if (CHIP_IS_E1H(bp)) {
5440                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5441
5442                 rx_pause.bd_thr_low = 250;
5443                 rx_pause.cqe_thr_low = 250;
5444                 rx_pause.cos = 1;
5445                 rx_pause.sge_thr_low = 0;
5446                 rx_pause.bd_thr_high = 350;
5447                 rx_pause.cqe_thr_high = 350;
5448                 rx_pause.sge_thr_high = 0;
5449
5450                 for_each_rx_queue(bp, i) {
5451                         struct bnx2x_fastpath *fp = &bp->fp[i];
5452
5453                         if (!fp->disable_tpa) {
5454                                 rx_pause.sge_thr_low = 150;
5455                                 rx_pause.sge_thr_high = 250;
5456                         }
5457
5458
5459                         offset = BAR_USTRORM_INTMEM +
5460                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5461                                                                    fp->cl_id);
5462                         for (j = 0;
5463                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5464                              j++)
5465                                 REG_WR(bp, offset + j*4,
5466                                        ((u32 *)&rx_pause)[j]);
5467                 }
5468         }
5469
5470         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5471
5472         /* Init rate shaping and fairness contexts */
5473         if (IS_E1HMF(bp)) {
5474                 int vn;
5475
5476                 /* During init there is no active link
5477                    Until link is up, set link rate to 10Gbps */
5478                 bp->link_vars.line_speed = SPEED_10000;
5479                 bnx2x_init_port_minmax(bp);
5480
5481                 bnx2x_calc_vn_weight_sum(bp);
5482
5483                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5484                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5485
5486                 /* Enable rate shaping and fairness */
5487                 bp->cmng.flags.cmng_enables =
5488                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5489                 if (bp->vn_weight_sum)
5490                         bp->cmng.flags.cmng_enables |=
5491                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5492                 else
5493                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5494                            "  fairness will be disabled\n");
5495         } else {
5496                 /* rate shaping and fairness are disabled */
5497                 DP(NETIF_MSG_IFUP,
5498                    "single function mode  minmax will be disabled\n");
5499         }
5500
5501
5502         /* Store it to internal memory */
5503         if (bp->port.pmf)
5504                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5505                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5506                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5507                                ((u32 *)(&bp->cmng))[i]);
5508 }
5509
5510 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5511 {
5512         switch (load_code) {
5513         case FW_MSG_CODE_DRV_LOAD_COMMON:
5514                 bnx2x_init_internal_common(bp);
5515                 /* no break */
5516
5517         case FW_MSG_CODE_DRV_LOAD_PORT:
5518                 bnx2x_init_internal_port(bp);
5519                 /* no break */
5520
5521         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5522                 bnx2x_init_internal_func(bp);
5523                 break;
5524
5525         default:
5526                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5527                 break;
5528         }
5529 }
5530
5531 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5532 {
5533         int i;
5534
5535         for_each_queue(bp, i) {
5536                 struct bnx2x_fastpath *fp = &bp->fp[i];
5537
5538                 fp->bp = bp;
5539                 fp->state = BNX2X_FP_STATE_CLOSED;
5540                 fp->index = i;
5541                 fp->cl_id = BP_L_ID(bp) + i;
5542                 fp->sb_id = fp->cl_id;
5543                 /* Suitable Rx and Tx SBs are served by the same client */
5544                 if (i >= bp->num_rx_queues)
5545                         fp->cl_id -= bp->num_rx_queues;
5546                 DP(NETIF_MSG_IFUP,
5547                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5548                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5549                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5550                               fp->sb_id);
5551                 bnx2x_update_fpsb_idx(fp);
5552         }
5553
5554         /* ensure status block indices were read */
5555         rmb();
5556
5557
5558         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5559                           DEF_SB_ID);
5560         bnx2x_update_dsb_idx(bp);
5561         bnx2x_update_coalesce(bp);
5562         bnx2x_init_rx_rings(bp);
5563         bnx2x_init_tx_ring(bp);
5564         bnx2x_init_sp_ring(bp);
5565         bnx2x_init_context(bp);
5566         bnx2x_init_internal(bp, load_code);
5567         bnx2x_init_ind_table(bp);
5568         bnx2x_stats_init(bp);
5569
5570         /* At this point, we are ready for interrupts */
5571         atomic_set(&bp->intr_sem, 0);
5572
5573         /* flush all before enabling interrupts */
5574         mb();
5575         mmiowb();
5576
5577         bnx2x_int_enable(bp);
5578
5579         /* Check for SPIO5 */
5580         bnx2x_attn_int_deasserted0(bp,
5581                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5582                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5583 }
5584
5585 /* end of nic init */
5586
5587 /*
5588  * gzip service functions
5589  */
5590
5591 static int bnx2x_gunzip_init(struct bnx2x *bp)
5592 {
5593         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5594                                               &bp->gunzip_mapping);
5595         if (bp->gunzip_buf  == NULL)
5596                 goto gunzip_nomem1;
5597
5598         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5599         if (bp->strm  == NULL)
5600                 goto gunzip_nomem2;
5601
5602         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5603                                       GFP_KERNEL);
5604         if (bp->strm->workspace == NULL)
5605                 goto gunzip_nomem3;
5606
5607         return 0;
5608
5609 gunzip_nomem3:
5610         kfree(bp->strm);
5611         bp->strm = NULL;
5612
5613 gunzip_nomem2:
5614         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5615                             bp->gunzip_mapping);
5616         bp->gunzip_buf = NULL;
5617
5618 gunzip_nomem1:
5619         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5620                " un-compression\n", bp->dev->name);
5621         return -ENOMEM;
5622 }
5623
5624 static void bnx2x_gunzip_end(struct bnx2x *bp)
5625 {
5626         kfree(bp->strm->workspace);
5627
5628         kfree(bp->strm);
5629         bp->strm = NULL;
5630
5631         if (bp->gunzip_buf) {
5632                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5633                                     bp->gunzip_mapping);
5634                 bp->gunzip_buf = NULL;
5635         }
5636 }
5637
5638 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5639 {
5640         int n, rc;
5641
5642         /* check gzip header */
5643         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5644                 BNX2X_ERR("Bad gzip header\n");
5645                 return -EINVAL;
5646         }
5647
5648         n = 10;
5649
5650 #define FNAME                           0x8
5651
5652         if (zbuf[3] & FNAME)
5653                 while ((zbuf[n++] != 0) && (n < len));
5654
5655         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5656         bp->strm->avail_in = len - n;
5657         bp->strm->next_out = bp->gunzip_buf;
5658         bp->strm->avail_out = FW_BUF_SIZE;
5659
5660         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5661         if (rc != Z_OK)
5662                 return rc;
5663
5664         rc = zlib_inflate(bp->strm, Z_FINISH);
5665         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5666                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5667                        bp->dev->name, bp->strm->msg);
5668
5669         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5670         if (bp->gunzip_outlen & 0x3)
5671                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5672                                     " gunzip_outlen (%d) not aligned\n",
5673                        bp->dev->name, bp->gunzip_outlen);
5674         bp->gunzip_outlen >>= 2;
5675
5676         zlib_inflateEnd(bp->strm);
5677
5678         if (rc == Z_STREAM_END)
5679                 return 0;
5680
5681         return rc;
5682 }
5683
5684 /* nic load/unload */
5685
5686 /*
5687  * General service functions
5688  */
5689
5690 /* send a NIG loopback debug packet */
5691 static void bnx2x_lb_pckt(struct bnx2x *bp)
5692 {
5693         u32 wb_write[3];
5694
5695         /* Ethernet source and destination addresses */
5696         wb_write[0] = 0x55555555;
5697         wb_write[1] = 0x55555555;
5698         wb_write[2] = 0x20;             /* SOP */
5699         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5700
5701         /* NON-IP protocol */
5702         wb_write[0] = 0x09000000;
5703         wb_write[1] = 0x55555555;
5704         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5705         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5706 }
5707
5708 /* some of the internal memories
5709  * are not directly readable from the driver
5710  * to test them we send debug packets
5711  */
5712 static int bnx2x_int_mem_test(struct bnx2x *bp)
5713 {
5714         int factor;
5715         int count, i;
5716         u32 val = 0;
5717
5718         if (CHIP_REV_IS_FPGA(bp))
5719                 factor = 120;
5720         else if (CHIP_REV_IS_EMUL(bp))
5721                 factor = 200;
5722         else
5723                 factor = 1;
5724
5725         DP(NETIF_MSG_HW, "start part1\n");
5726
5727         /* Disable inputs of parser neighbor blocks */
5728         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5729         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5730         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5731         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5732
5733         /*  Write 0 to parser credits for CFC search request */
5734         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5735
5736         /* send Ethernet packet */
5737         bnx2x_lb_pckt(bp);
5738
5739         /* TODO do i reset NIG statistic? */
5740         /* Wait until NIG register shows 1 packet of size 0x10 */
5741         count = 1000 * factor;
5742         while (count) {
5743
5744                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5745                 val = *bnx2x_sp(bp, wb_data[0]);
5746                 if (val == 0x10)
5747                         break;
5748
5749                 msleep(10);
5750                 count--;
5751         }
5752         if (val != 0x10) {
5753                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5754                 return -1;
5755         }
5756
5757         /* Wait until PRS register shows 1 packet */
5758         count = 1000 * factor;
5759         while (count) {
5760                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5761                 if (val == 1)
5762                         break;
5763
5764                 msleep(10);
5765                 count--;
5766         }
5767         if (val != 0x1) {
5768                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5769                 return -2;
5770         }
5771
5772         /* Reset and init BRB, PRS */
5773         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5774         msleep(50);
5775         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5776         msleep(50);
5777         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5778         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5779
5780         DP(NETIF_MSG_HW, "part2\n");
5781
5782         /* Disable inputs of parser neighbor blocks */
5783         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5784         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5785         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5786         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5787
5788         /* Write 0 to parser credits for CFC search request */
5789         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5790
5791         /* send 10 Ethernet packets */
5792         for (i = 0; i < 10; i++)
5793                 bnx2x_lb_pckt(bp);
5794
5795         /* Wait until NIG register shows 10 + 1
5796            packets of size 11*0x10 = 0xb0 */
5797         count = 1000 * factor;
5798         while (count) {
5799
5800                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801                 val = *bnx2x_sp(bp, wb_data[0]);
5802                 if (val == 0xb0)
5803                         break;
5804
5805                 msleep(10);
5806                 count--;
5807         }
5808         if (val != 0xb0) {
5809                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5810                 return -3;
5811         }
5812
5813         /* Wait until PRS register shows 2 packets */
5814         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5815         if (val != 2)
5816                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5817
5818         /* Write 1 to parser credits for CFC search request */
5819         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5820
5821         /* Wait until PRS register shows 3 packets */
5822         msleep(10 * factor);
5823         /* Wait until NIG register shows 1 packet of size 0x10 */
5824         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5825         if (val != 3)
5826                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5827
5828         /* clear NIG EOP FIFO */
5829         for (i = 0; i < 11; i++)
5830                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5831         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5832         if (val != 1) {
5833                 BNX2X_ERR("clear of NIG failed\n");
5834                 return -4;
5835         }
5836
5837         /* Reset and init BRB, PRS, NIG */
5838         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5839         msleep(50);
5840         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5841         msleep(50);
5842         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5843         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5844 #ifndef BCM_ISCSI
5845         /* set NIC mode */
5846         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5847 #endif
5848
5849         /* Enable inputs of parser neighbor blocks */
5850         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5851         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5852         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5853         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5854
5855         DP(NETIF_MSG_HW, "done\n");
5856
5857         return 0; /* OK */
5858 }
5859
5860 static void enable_blocks_attention(struct bnx2x *bp)
5861 {
5862         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5863         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5864         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5865         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5866         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5867         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5868         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5869         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5870         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5871 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5872 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5873         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5874         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5875         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5876 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5877 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5878         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5879         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5880         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5881         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5882 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5883 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5884         if (CHIP_REV_IS_FPGA(bp))
5885                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5886         else
5887                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5888         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5889         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5890         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5891 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5892 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5893         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5894         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5895 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5896         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5897 }
5898
5899
5900 static void bnx2x_reset_common(struct bnx2x *bp)
5901 {
5902         /* reset_common */
5903         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5904                0xd3ffff7f);
5905         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5906 }
5907
5908
5909 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5910 {
5911         u32 val;
5912         u8 port;
5913         u8 is_required = 0;
5914
5915         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5916               SHARED_HW_CFG_FAN_FAILURE_MASK;
5917
5918         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5919                 is_required = 1;
5920
5921         /*
5922          * The fan failure mechanism is usually related to the PHY type since
5923          * the power consumption of the board is affected by the PHY. Currently,
5924          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5925          */
5926         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5927                 for (port = PORT_0; port < PORT_MAX; port++) {
5928                         u32 phy_type =
5929                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5930                                          external_phy_config) &
5931                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5932                         is_required |=
5933                                 ((phy_type ==
5934                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5935                                  (phy_type ==
5936                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5937                                  (phy_type ==
5938                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5939                 }
5940
5941         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5942
5943         if (is_required == 0)
5944                 return;
5945
5946         /* Fan failure is indicated by SPIO 5 */
5947         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5948                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5949
5950         /* set to active low mode */
5951         val = REG_RD(bp, MISC_REG_SPIO_INT);
5952         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5953                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5954         REG_WR(bp, MISC_REG_SPIO_INT, val);
5955
5956         /* enable interrupt to signal the IGU */
5957         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5958         val |= (1 << MISC_REGISTERS_SPIO_5);
5959         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5960 }
5961
5962 static int bnx2x_init_common(struct bnx2x *bp)
5963 {
5964         u32 val, i;
5965
5966         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5967
5968         bnx2x_reset_common(bp);
5969         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5970         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5971
5972         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5973         if (CHIP_IS_E1H(bp))
5974                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5975
5976         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5977         msleep(30);
5978         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5979
5980         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5981         if (CHIP_IS_E1(bp)) {
5982                 /* enable HW interrupt from PXP on USDM overflow
5983                    bit 16 on INT_MASK_0 */
5984                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5985         }
5986
5987         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5988         bnx2x_init_pxp(bp);
5989
5990 #ifdef __BIG_ENDIAN
5991         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5992         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5993         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5994         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5995         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5996         /* make sure this value is 0 */
5997         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5998
5999 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6000         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6001         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6002         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6003         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6004 #endif
6005
6006         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6007 #ifdef BCM_ISCSI
6008         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6009         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6010         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6011 #endif
6012
6013         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6014                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6015
6016         /* let the HW do it's magic ... */
6017         msleep(100);
6018         /* finish PXP init */
6019         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6020         if (val != 1) {
6021                 BNX2X_ERR("PXP2 CFG failed\n");
6022                 return -EBUSY;
6023         }
6024         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6025         if (val != 1) {
6026                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6027                 return -EBUSY;
6028         }
6029
6030         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6031         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6032
6033         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6034
6035         /* clean the DMAE memory */
6036         bp->dmae_ready = 1;
6037         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6038
6039         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6040         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6041         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6042         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6043
6044         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6045         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6046         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6047         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6048
6049         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6050         /* soft reset pulse */
6051         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6052         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6053
6054 #ifdef BCM_ISCSI
6055         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6056 #endif
6057
6058         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6059         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6060         if (!CHIP_REV_IS_SLOW(bp)) {
6061                 /* enable hw interrupt from doorbell Q */
6062                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6063         }
6064
6065         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6066         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6067         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6068         /* set NIC mode */
6069         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6070         if (CHIP_IS_E1H(bp))
6071                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6072
6073         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6074         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6075         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6076         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6077
6078         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6079         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6080         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6081         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6082
6083         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6084         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6085         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6086         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6087
6088         /* sync semi rtc */
6089         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6090                0x80000000);
6091         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6092                0x80000000);
6093
6094         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6095         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6096         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6097
6098         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6099         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6100                 REG_WR(bp, i, 0xc0cac01a);
6101                 /* TODO: replace with something meaningful */
6102         }
6103         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6104         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6105
6106         if (sizeof(union cdu_context) != 1024)
6107                 /* we currently assume that a context is 1024 bytes */
6108                 printk(KERN_ALERT PFX "please adjust the size of"
6109                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6110
6111         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6112         val = (4 << 24) + (0 << 12) + 1024;
6113         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6114
6115         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6116         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6117         /* enable context validation interrupt from CFC */
6118         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6119
6120         /* set the thresholds to prevent CFC/CDU race */
6121         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6122
6123         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6124         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6125
6126         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6127         /* Reset PCIE errors for debug */
6128         REG_WR(bp, 0x2814, 0xffffffff);
6129         REG_WR(bp, 0x3820, 0xffffffff);
6130
6131         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6132         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6133         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6134         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6135
6136         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6137         if (CHIP_IS_E1H(bp)) {
6138                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6139                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6140         }
6141
6142         if (CHIP_REV_IS_SLOW(bp))
6143                 msleep(200);
6144
6145         /* finish CFC init */
6146         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6147         if (val != 1) {
6148                 BNX2X_ERR("CFC LL_INIT failed\n");
6149                 return -EBUSY;
6150         }
6151         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6152         if (val != 1) {
6153                 BNX2X_ERR("CFC AC_INIT failed\n");
6154                 return -EBUSY;
6155         }
6156         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6157         if (val != 1) {
6158                 BNX2X_ERR("CFC CAM_INIT failed\n");
6159                 return -EBUSY;
6160         }
6161         REG_WR(bp, CFC_REG_DEBUG0, 0);
6162
6163         /* read NIG statistic
6164            to see if this is our first up since powerup */
6165         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6166         val = *bnx2x_sp(bp, wb_data[0]);
6167
6168         /* do internal memory self test */
6169         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6170                 BNX2X_ERR("internal mem self test failed\n");
6171                 return -EBUSY;
6172         }
6173
6174         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6175         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6176         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6177         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6178         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6179                 bp->port.need_hw_lock = 1;
6180                 break;
6181
6182         default:
6183                 break;
6184         }
6185
6186         bnx2x_setup_fan_failure_detection(bp);
6187
6188         /* clear PXP2 attentions */
6189         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6190
6191         enable_blocks_attention(bp);
6192
6193         if (!BP_NOMCP(bp)) {
6194                 bnx2x_acquire_phy_lock(bp);
6195                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6196                 bnx2x_release_phy_lock(bp);
6197         } else
6198                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6199
6200         return 0;
6201 }
6202
6203 static int bnx2x_init_port(struct bnx2x *bp)
6204 {
6205         int port = BP_PORT(bp);
6206         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6207         u32 low, high;
6208         u32 val;
6209
6210         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6211
6212         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6213
6214         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6215         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6216
6217         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6218         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6219         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6220 #ifdef BCM_ISCSI
6221         /* Port0  1
6222          * Port1  385 */
6223         i++;
6224         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6225         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6226         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6227         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6228
6229         /* Port0  2
6230          * Port1  386 */
6231         i++;
6232         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6233         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6234         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6235         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6236
6237         /* Port0  3
6238          * Port1  387 */
6239         i++;
6240         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6241         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6242         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6243         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6244 #endif
6245         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6246
6247 #ifdef BCM_ISCSI
6248         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6249         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6250
6251         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6252 #endif
6253         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6254
6255         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6256         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6257                 /* no pause for emulation and FPGA */
6258                 low = 0;
6259                 high = 513;
6260         } else {
6261                 if (IS_E1HMF(bp))
6262                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6263                 else if (bp->dev->mtu > 4096) {
6264                         if (bp->flags & ONE_PORT_FLAG)
6265                                 low = 160;
6266                         else {
6267                                 val = bp->dev->mtu;
6268                                 /* (24*1024 + val*4)/256 */
6269                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6270                         }
6271                 } else
6272                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6273                 high = low + 56;        /* 14*1024/256 */
6274         }
6275         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6276         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6277
6278
6279         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6280
6281         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6282         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6283         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6284         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6285
6286         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6287         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6288         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6289         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6290
6291         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6292         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6293
6294         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6295
6296         /* configure PBF to work without PAUSE mtu 9000 */
6297         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6298
6299         /* update threshold */
6300         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6301         /* update init credit */
6302         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6303
6304         /* probe changes */
6305         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6306         msleep(5);
6307         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6308
6309 #ifdef BCM_ISCSI
6310         /* tell the searcher where the T2 table is */
6311         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6312
6313         wb_write[0] = U64_LO(bp->t2_mapping);
6314         wb_write[1] = U64_HI(bp->t2_mapping);
6315         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6316         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6317         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6318         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6319
6320         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6321 #endif
6322         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6323         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6324
6325         if (CHIP_IS_E1(bp)) {
6326                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6327                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6328         }
6329         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6330
6331         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6332         /* init aeu_mask_attn_func_0/1:
6333          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6334          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6335          *             bits 4-7 are used for "per vn group attention" */
6336         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6337                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6338
6339         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6340         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6341         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6342         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6343         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6344
6345         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6346
6347         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6348
6349         if (CHIP_IS_E1H(bp)) {
6350                 /* 0x2 disable e1hov, 0x1 enable */
6351                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6352                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6353
6354                 /* support pause requests from USDM, TSDM and BRB */
6355                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6356
6357                 {
6358                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6359                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6360                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6361                 }
6362         }
6363
6364         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6365         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6366
6367         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6368         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6369                 {
6370                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6371
6372                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6373                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6374
6375                 /* The GPIO should be swapped if the swap register is
6376                    set and active */
6377                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6378                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6379
6380                 /* Select function upon port-swap configuration */
6381                 if (port == 0) {
6382                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6383                         aeu_gpio_mask = (swap_val && swap_override) ?
6384                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6385                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6386                 } else {
6387                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6388                         aeu_gpio_mask = (swap_val && swap_override) ?
6389                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6390                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6391                 }
6392                 val = REG_RD(bp, offset);
6393                 /* add GPIO3 to group */
6394                 val |= aeu_gpio_mask;
6395                 REG_WR(bp, offset, val);
6396                 }
6397                 break;
6398
6399         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6400         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6401                 /* add SPIO 5 to group 0 */
6402                 {
6403                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6404                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6405                 val = REG_RD(bp, reg_addr);
6406                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6407                 REG_WR(bp, reg_addr, val);
6408                 }
6409                 break;
6410
6411         default:
6412                 break;
6413         }
6414
6415         bnx2x__link_reset(bp);
6416
6417         return 0;
6418 }
6419
6420 #define ILT_PER_FUNC            (768/2)
6421 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6422 /* the phys address is shifted right 12 bits and has an added
6423    1=valid bit added to the 53rd bit
6424    then since this is a wide register(TM)
6425    we split it into two 32 bit writes
6426  */
6427 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6428 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6429 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6430 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6431
6432 #define CNIC_ILT_LINES          0
6433
6434 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6435 {
6436         int reg;
6437
6438         if (CHIP_IS_E1H(bp))
6439                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6440         else /* E1 */
6441                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6442
6443         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6444 }
6445
6446 static int bnx2x_init_func(struct bnx2x *bp)
6447 {
6448         int port = BP_PORT(bp);
6449         int func = BP_FUNC(bp);
6450         u32 addr, val;
6451         int i;
6452
6453         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6454
6455         /* set MSI reconfigure capability */
6456         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6457         val = REG_RD(bp, addr);
6458         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6459         REG_WR(bp, addr, val);
6460
6461         i = FUNC_ILT_BASE(func);
6462
6463         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6464         if (CHIP_IS_E1H(bp)) {
6465                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6466                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6467         } else /* E1 */
6468                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6469                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6470
6471
6472         if (CHIP_IS_E1H(bp)) {
6473                 for (i = 0; i < 9; i++)
6474                         bnx2x_init_block(bp,
6475                                          cm_blocks[i], FUNC0_STAGE + func);
6476
6477                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6478                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6479         }
6480
6481         /* HC init per function */
6482         if (CHIP_IS_E1H(bp)) {
6483                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6484
6485                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6486                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6487         }
6488         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6489
6490         /* Reset PCIE errors for debug */
6491         REG_WR(bp, 0x2114, 0xffffffff);
6492         REG_WR(bp, 0x2120, 0xffffffff);
6493
6494         return 0;
6495 }
6496
6497 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6498 {
6499         int i, rc = 0;
6500
6501         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6502            BP_FUNC(bp), load_code);
6503
6504         bp->dmae_ready = 0;
6505         mutex_init(&bp->dmae_mutex);
6506         bnx2x_gunzip_init(bp);
6507
6508         switch (load_code) {
6509         case FW_MSG_CODE_DRV_LOAD_COMMON:
6510                 rc = bnx2x_init_common(bp);
6511                 if (rc)
6512                         goto init_hw_err;
6513                 /* no break */
6514
6515         case FW_MSG_CODE_DRV_LOAD_PORT:
6516                 bp->dmae_ready = 1;
6517                 rc = bnx2x_init_port(bp);
6518                 if (rc)
6519                         goto init_hw_err;
6520                 /* no break */
6521
6522         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6523                 bp->dmae_ready = 1;
6524                 rc = bnx2x_init_func(bp);
6525                 if (rc)
6526                         goto init_hw_err;
6527                 break;
6528
6529         default:
6530                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6531                 break;
6532         }
6533
6534         if (!BP_NOMCP(bp)) {
6535                 int func = BP_FUNC(bp);
6536
6537                 bp->fw_drv_pulse_wr_seq =
6538                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6539                                  DRV_PULSE_SEQ_MASK);
6540                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6541         }
6542
6543         /* this needs to be done before gunzip end */
6544         bnx2x_zero_def_sb(bp);
6545         for_each_queue(bp, i)
6546                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6547
6548 init_hw_err:
6549         bnx2x_gunzip_end(bp);
6550
6551         return rc;
6552 }
6553
6554 static void bnx2x_free_mem(struct bnx2x *bp)
6555 {
6556
6557 #define BNX2X_PCI_FREE(x, y, size) \
6558         do { \
6559                 if (x) { \
6560                         pci_free_consistent(bp->pdev, size, x, y); \
6561                         x = NULL; \
6562                         y = 0; \
6563                 } \
6564         } while (0)
6565
6566 #define BNX2X_FREE(x) \
6567         do { \
6568                 if (x) { \
6569                         vfree(x); \
6570                         x = NULL; \
6571                 } \
6572         } while (0)
6573
6574         int i;
6575
6576         /* fastpath */
6577         /* Common */
6578         for_each_queue(bp, i) {
6579
6580                 /* status blocks */
6581                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6582                                bnx2x_fp(bp, i, status_blk_mapping),
6583                                sizeof(struct host_status_block));
6584         }
6585         /* Rx */
6586         for_each_rx_queue(bp, i) {
6587
6588                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6589                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6590                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6591                                bnx2x_fp(bp, i, rx_desc_mapping),
6592                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6593
6594                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6595                                bnx2x_fp(bp, i, rx_comp_mapping),
6596                                sizeof(struct eth_fast_path_rx_cqe) *
6597                                NUM_RCQ_BD);
6598
6599                 /* SGE ring */
6600                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6601                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6602                                bnx2x_fp(bp, i, rx_sge_mapping),
6603                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6604         }
6605         /* Tx */
6606         for_each_tx_queue(bp, i) {
6607
6608                 /* fastpath tx rings: tx_buf tx_desc */
6609                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6610                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6611                                bnx2x_fp(bp, i, tx_desc_mapping),
6612                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6613         }
6614         /* end of fastpath */
6615
6616         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6617                        sizeof(struct host_def_status_block));
6618
6619         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6620                        sizeof(struct bnx2x_slowpath));
6621
6622 #ifdef BCM_ISCSI
6623         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6624         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6625         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6626         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6627 #endif
6628         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6629
6630 #undef BNX2X_PCI_FREE
6631 #undef BNX2X_KFREE
6632 }
6633
6634 static int bnx2x_alloc_mem(struct bnx2x *bp)
6635 {
6636
6637 #define BNX2X_PCI_ALLOC(x, y, size) \
6638         do { \
6639                 x = pci_alloc_consistent(bp->pdev, size, y); \
6640                 if (x == NULL) \
6641                         goto alloc_mem_err; \
6642                 memset(x, 0, size); \
6643         } while (0)
6644
6645 #define BNX2X_ALLOC(x, size) \
6646         do { \
6647                 x = vmalloc(size); \
6648                 if (x == NULL) \
6649                         goto alloc_mem_err; \
6650                 memset(x, 0, size); \
6651         } while (0)
6652
6653         int i;
6654
6655         /* fastpath */
6656         /* Common */
6657         for_each_queue(bp, i) {
6658                 bnx2x_fp(bp, i, bp) = bp;
6659
6660                 /* status blocks */
6661                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662                                 &bnx2x_fp(bp, i, status_blk_mapping),
6663                                 sizeof(struct host_status_block));
6664         }
6665         /* Rx */
6666         for_each_rx_queue(bp, i) {
6667
6668                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6669                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6670                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6671                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6672                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6673                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6674
6675                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6676                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6677                                 sizeof(struct eth_fast_path_rx_cqe) *
6678                                 NUM_RCQ_BD);
6679
6680                 /* SGE ring */
6681                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6682                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6683                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6684                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6685                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6686         }
6687         /* Tx */
6688         for_each_tx_queue(bp, i) {
6689
6690                 /* fastpath tx rings: tx_buf tx_desc */
6691                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6692                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6693                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6694                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6695                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6696         }
6697         /* end of fastpath */
6698
6699         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6700                         sizeof(struct host_def_status_block));
6701
6702         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6703                         sizeof(struct bnx2x_slowpath));
6704
6705 #ifdef BCM_ISCSI
6706         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6707
6708         /* Initialize T1 */
6709         for (i = 0; i < 64*1024; i += 64) {
6710                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6711                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6712         }
6713
6714         /* allocate searcher T2 table
6715            we allocate 1/4 of alloc num for T2
6716           (which is not entered into the ILT) */
6717         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6718
6719         /* Initialize T2 */
6720         for (i = 0; i < 16*1024; i += 64)
6721                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6722
6723         /* now fixup the last line in the block to point to the next block */
6724         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6725
6726         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6727         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6728
6729         /* QM queues (128*MAX_CONN) */
6730         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6731 #endif
6732
6733         /* Slow path ring */
6734         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6735
6736         return 0;
6737
6738 alloc_mem_err:
6739         bnx2x_free_mem(bp);
6740         return -ENOMEM;
6741
6742 #undef BNX2X_PCI_ALLOC
6743 #undef BNX2X_ALLOC
6744 }
6745
6746 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6747 {
6748         int i;
6749
6750         for_each_tx_queue(bp, i) {
6751                 struct bnx2x_fastpath *fp = &bp->fp[i];
6752
6753                 u16 bd_cons = fp->tx_bd_cons;
6754                 u16 sw_prod = fp->tx_pkt_prod;
6755                 u16 sw_cons = fp->tx_pkt_cons;
6756
6757                 while (sw_cons != sw_prod) {
6758                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6759                         sw_cons++;
6760                 }
6761         }
6762 }
6763
6764 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6765 {
6766         int i, j;
6767
6768         for_each_rx_queue(bp, j) {
6769                 struct bnx2x_fastpath *fp = &bp->fp[j];
6770
6771                 for (i = 0; i < NUM_RX_BD; i++) {
6772                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6773                         struct sk_buff *skb = rx_buf->skb;
6774
6775                         if (skb == NULL)
6776                                 continue;
6777
6778                         pci_unmap_single(bp->pdev,
6779                                          pci_unmap_addr(rx_buf, mapping),
6780                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6781
6782                         rx_buf->skb = NULL;
6783                         dev_kfree_skb(skb);
6784                 }
6785                 if (!fp->disable_tpa)
6786                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6787                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6788                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6789         }
6790 }
6791
6792 static void bnx2x_free_skbs(struct bnx2x *bp)
6793 {
6794         bnx2x_free_tx_skbs(bp);
6795         bnx2x_free_rx_skbs(bp);
6796 }
6797
6798 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6799 {
6800         int i, offset = 1;
6801
6802         free_irq(bp->msix_table[0].vector, bp->dev);
6803         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6804            bp->msix_table[0].vector);
6805
6806         for_each_queue(bp, i) {
6807                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6808                    "state %x\n", i, bp->msix_table[i + offset].vector,
6809                    bnx2x_fp(bp, i, state));
6810
6811                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6812         }
6813 }
6814
6815 static void bnx2x_free_irq(struct bnx2x *bp)
6816 {
6817         if (bp->flags & USING_MSIX_FLAG) {
6818                 bnx2x_free_msix_irqs(bp);
6819                 pci_disable_msix(bp->pdev);
6820                 bp->flags &= ~USING_MSIX_FLAG;
6821
6822         } else if (bp->flags & USING_MSI_FLAG) {
6823                 free_irq(bp->pdev->irq, bp->dev);
6824                 pci_disable_msi(bp->pdev);
6825                 bp->flags &= ~USING_MSI_FLAG;
6826
6827         } else
6828                 free_irq(bp->pdev->irq, bp->dev);
6829 }
6830
6831 static int bnx2x_enable_msix(struct bnx2x *bp)
6832 {
6833         int i, rc, offset = 1;
6834         int igu_vec = 0;
6835
6836         bp->msix_table[0].entry = igu_vec;
6837         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6838
6839         for_each_queue(bp, i) {
6840                 igu_vec = BP_L_ID(bp) + offset + i;
6841                 bp->msix_table[i + offset].entry = igu_vec;
6842                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6843                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6844         }
6845
6846         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6847                              BNX2X_NUM_QUEUES(bp) + offset);
6848         if (rc) {
6849                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6850                 return rc;
6851         }
6852
6853         bp->flags |= USING_MSIX_FLAG;
6854
6855         return 0;
6856 }
6857
6858 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6859 {
6860         int i, rc, offset = 1;
6861
6862         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6863                          bp->dev->name, bp->dev);
6864         if (rc) {
6865                 BNX2X_ERR("request sp irq failed\n");
6866                 return -EBUSY;
6867         }
6868
6869         for_each_queue(bp, i) {
6870                 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
6872                 if (i < bp->num_rx_queues)
6873                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6874                 else
6875                         sprintf(fp->name, "%s-tx-%d",
6876                                 bp->dev->name, i - bp->num_rx_queues);
6877
6878                 rc = request_irq(bp->msix_table[i + offset].vector,
6879                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6880                 if (rc) {
6881                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6882                         bnx2x_free_msix_irqs(bp);
6883                         return -EBUSY;
6884                 }
6885
6886                 fp->state = BNX2X_FP_STATE_IRQ;
6887         }
6888
6889         i = BNX2X_NUM_QUEUES(bp);
6890         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
6891                " ... fp[%d] %d\n",
6892                bp->dev->name, bp->msix_table[0].vector,
6893                0, bp->msix_table[offset].vector,
6894                i - 1, bp->msix_table[offset + i - 1].vector);
6895
6896         return 0;
6897 }
6898
6899 static int bnx2x_enable_msi(struct bnx2x *bp)
6900 {
6901         int rc;
6902
6903         rc = pci_enable_msi(bp->pdev);
6904         if (rc) {
6905                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6906                 return -1;
6907         }
6908         bp->flags |= USING_MSI_FLAG;
6909
6910         return 0;
6911 }
6912
6913 static int bnx2x_req_irq(struct bnx2x *bp)
6914 {
6915         unsigned long flags;
6916         int rc;
6917
6918         if (bp->flags & USING_MSI_FLAG)
6919                 flags = 0;
6920         else
6921                 flags = IRQF_SHARED;
6922
6923         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6924                          bp->dev->name, bp->dev);
6925         if (!rc)
6926                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6927
6928         return rc;
6929 }
6930
6931 static void bnx2x_napi_enable(struct bnx2x *bp)
6932 {
6933         int i;
6934
6935         for_each_rx_queue(bp, i)
6936                 napi_enable(&bnx2x_fp(bp, i, napi));
6937 }
6938
6939 static void bnx2x_napi_disable(struct bnx2x *bp)
6940 {
6941         int i;
6942
6943         for_each_rx_queue(bp, i)
6944                 napi_disable(&bnx2x_fp(bp, i, napi));
6945 }
6946
6947 static void bnx2x_netif_start(struct bnx2x *bp)
6948 {
6949         int intr_sem;
6950
6951         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6952         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6953
6954         if (intr_sem) {
6955                 if (netif_running(bp->dev)) {
6956                         bnx2x_napi_enable(bp);
6957                         bnx2x_int_enable(bp);
6958                         if (bp->state == BNX2X_STATE_OPEN)
6959                                 netif_tx_wake_all_queues(bp->dev);
6960                 }
6961         }
6962 }
6963
6964 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6965 {
6966         bnx2x_int_disable_sync(bp, disable_hw);
6967         bnx2x_napi_disable(bp);
6968         netif_tx_disable(bp->dev);
6969         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6970 }
6971
6972 /*
6973  * Init service functions
6974  */
6975
6976 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6977 {
6978         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6979         int port = BP_PORT(bp);
6980
6981         /* CAM allocation
6982          * unicasts 0-31:port0 32-63:port1
6983          * multicast 64-127:port0 128-191:port1
6984          */
6985         config->hdr.length = 2;
6986         config->hdr.offset = port ? 32 : 0;
6987         config->hdr.client_id = bp->fp->cl_id;
6988         config->hdr.reserved1 = 0;
6989
6990         /* primary MAC */
6991         config->config_table[0].cam_entry.msb_mac_addr =
6992                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6993         config->config_table[0].cam_entry.middle_mac_addr =
6994                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6995         config->config_table[0].cam_entry.lsb_mac_addr =
6996                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6997         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6998         if (set)
6999                 config->config_table[0].target_table_entry.flags = 0;
7000         else
7001                 CAM_INVALIDATE(config->config_table[0]);
7002         config->config_table[0].target_table_entry.clients_bit_vector =
7003                                                 cpu_to_le32(1 << BP_L_ID(bp));
7004         config->config_table[0].target_table_entry.vlan_id = 0;
7005
7006         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7007            (set ? "setting" : "clearing"),
7008            config->config_table[0].cam_entry.msb_mac_addr,
7009            config->config_table[0].cam_entry.middle_mac_addr,
7010            config->config_table[0].cam_entry.lsb_mac_addr);
7011
7012         /* broadcast */
7013         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7014         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7015         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7016         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7017         if (set)
7018                 config->config_table[1].target_table_entry.flags =
7019                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7020         else
7021                 CAM_INVALIDATE(config->config_table[1]);
7022         config->config_table[1].target_table_entry.clients_bit_vector =
7023                                                 cpu_to_le32(1 << BP_L_ID(bp));
7024         config->config_table[1].target_table_entry.vlan_id = 0;
7025
7026         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7027                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7028                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7029 }
7030
7031 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7032 {
7033         struct mac_configuration_cmd_e1h *config =
7034                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7035
7036         /* CAM allocation for E1H
7037          * unicasts: by func number
7038          * multicast: 20+FUNC*20, 20 each
7039          */
7040         config->hdr.length = 1;
7041         config->hdr.offset = BP_FUNC(bp);
7042         config->hdr.client_id = bp->fp->cl_id;
7043         config->hdr.reserved1 = 0;
7044
7045         /* primary MAC */
7046         config->config_table[0].msb_mac_addr =
7047                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7048         config->config_table[0].middle_mac_addr =
7049                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7050         config->config_table[0].lsb_mac_addr =
7051                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7052         config->config_table[0].clients_bit_vector =
7053                                         cpu_to_le32(1 << BP_L_ID(bp));
7054         config->config_table[0].vlan_id = 0;
7055         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7056         if (set)
7057                 config->config_table[0].flags = BP_PORT(bp);
7058         else
7059                 config->config_table[0].flags =
7060                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7061
7062         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
7063            (set ? "setting" : "clearing"),
7064            config->config_table[0].msb_mac_addr,
7065            config->config_table[0].middle_mac_addr,
7066            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7067
7068         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7069                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7070                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7071 }
7072
7073 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7074                              int *state_p, int poll)
7075 {
7076         /* can take a while if any port is running */
7077         int cnt = 5000;
7078
7079         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7080            poll ? "polling" : "waiting", state, idx);
7081
7082         might_sleep();
7083         while (cnt--) {
7084                 if (poll) {
7085                         bnx2x_rx_int(bp->fp, 10);
7086                         /* if index is different from 0
7087                          * the reply for some commands will
7088                          * be on the non default queue
7089                          */
7090                         if (idx)
7091                                 bnx2x_rx_int(&bp->fp[idx], 10);
7092                 }
7093
7094                 mb(); /* state is changed by bnx2x_sp_event() */
7095                 if (*state_p == state) {
7096 #ifdef BNX2X_STOP_ON_ERROR
7097                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7098 #endif
7099                         return 0;
7100                 }
7101
7102                 msleep(1);
7103         }
7104
7105         /* timeout! */
7106         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7107                   poll ? "polling" : "waiting", state, idx);
7108 #ifdef BNX2X_STOP_ON_ERROR
7109         bnx2x_panic();
7110 #endif
7111
7112         return -EBUSY;
7113 }
7114
7115 static int bnx2x_setup_leading(struct bnx2x *bp)
7116 {
7117         int rc;
7118
7119         /* reset IGU state */
7120         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7121
7122         /* SETUP ramrod */
7123         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7124
7125         /* Wait for completion */
7126         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7127
7128         return rc;
7129 }
7130
7131 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7132 {
7133         struct bnx2x_fastpath *fp = &bp->fp[index];
7134
7135         /* reset IGU state */
7136         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7137
7138         /* SETUP ramrod */
7139         fp->state = BNX2X_FP_STATE_OPENING;
7140         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7141                       fp->cl_id, 0);
7142
7143         /* Wait for completion */
7144         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7145                                  &(fp->state), 0);
7146 }
7147
7148 static int bnx2x_poll(struct napi_struct *napi, int budget);
7149
7150 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7151                                     int *num_tx_queues_out)
7152 {
7153         int _num_rx_queues = 0, _num_tx_queues = 0;
7154
7155         switch (bp->multi_mode) {
7156         case ETH_RSS_MODE_DISABLED:
7157                 _num_rx_queues = 1;
7158                 _num_tx_queues = 1;
7159                 break;
7160
7161         case ETH_RSS_MODE_REGULAR:
7162                 if (num_rx_queues)
7163                         _num_rx_queues = min_t(u32, num_rx_queues,
7164                                                BNX2X_MAX_QUEUES(bp));
7165                 else
7166                         _num_rx_queues = min_t(u32, num_online_cpus(),
7167                                                BNX2X_MAX_QUEUES(bp));
7168
7169                 if (num_tx_queues)
7170                         _num_tx_queues = min_t(u32, num_tx_queues,
7171                                                BNX2X_MAX_QUEUES(bp));
7172                 else
7173                         _num_tx_queues = min_t(u32, num_online_cpus(),
7174                                                BNX2X_MAX_QUEUES(bp));
7175
7176                 /* There must be not more Tx queues than Rx queues */
7177                 if (_num_tx_queues > _num_rx_queues) {
7178                         BNX2X_ERR("number of tx queues (%d) > "
7179                                   "number of rx queues (%d)"
7180                                   "  defaulting to %d\n",
7181                                   _num_tx_queues, _num_rx_queues,
7182                                   _num_rx_queues);
7183                         _num_tx_queues = _num_rx_queues;
7184                 }
7185                 break;
7186
7187
7188         default:
7189                 _num_rx_queues = 1;
7190                 _num_tx_queues = 1;
7191                 break;
7192         }
7193
7194         *num_rx_queues_out = _num_rx_queues;
7195         *num_tx_queues_out = _num_tx_queues;
7196 }
7197
7198 static int bnx2x_set_int_mode(struct bnx2x *bp)
7199 {
7200         int rc = 0;
7201
7202         switch (int_mode) {
7203         case INT_MODE_INTx:
7204         case INT_MODE_MSI:
7205                 bp->num_rx_queues = 1;
7206                 bp->num_tx_queues = 1;
7207                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7208                 break;
7209
7210         case INT_MODE_MSIX:
7211         default:
7212                 /* Set interrupt mode according to bp->multi_mode value */
7213                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7214                                         &bp->num_tx_queues);
7215
7216                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7217                    bp->num_rx_queues, bp->num_tx_queues);
7218
7219                 /* if we can't use MSI-X we only need one fp,
7220                  * so try to enable MSI-X with the requested number of fp's
7221                  * and fallback to MSI or legacy INTx with one fp
7222                  */
7223                 rc = bnx2x_enable_msix(bp);
7224                 if (rc) {
7225                         /* failed to enable MSI-X */
7226                         if (bp->multi_mode)
7227                                 BNX2X_ERR("Multi requested but failed to "
7228                                           "enable MSI-X (rx %d tx %d), "
7229                                           "set number of queues to 1\n",
7230                                           bp->num_rx_queues, bp->num_tx_queues);
7231                         bp->num_rx_queues = 1;
7232                         bp->num_tx_queues = 1;
7233                 }
7234                 break;
7235         }
7236         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7237         return rc;
7238 }
7239
7240
7241 /* must be called with rtnl_lock */
7242 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7243 {
7244         u32 load_code;
7245         int i, rc;
7246
7247 #ifdef BNX2X_STOP_ON_ERROR
7248         if (unlikely(bp->panic))
7249                 return -EPERM;
7250 #endif
7251
7252         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7253
7254         rc = bnx2x_set_int_mode(bp);
7255
7256         if (bnx2x_alloc_mem(bp))
7257                 return -ENOMEM;
7258
7259         for_each_rx_queue(bp, i)
7260                 bnx2x_fp(bp, i, disable_tpa) =
7261                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7262
7263         for_each_rx_queue(bp, i)
7264                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7265                                bnx2x_poll, 128);
7266
7267         bnx2x_napi_enable(bp);
7268
7269         if (bp->flags & USING_MSIX_FLAG) {
7270                 rc = bnx2x_req_msix_irqs(bp);
7271                 if (rc) {
7272                         pci_disable_msix(bp->pdev);
7273                         goto load_error1;
7274                 }
7275         } else {
7276                 /* Fall to INTx if failed to enable MSI-X due to lack of
7277                    memory (in bnx2x_set_int_mode()) */
7278                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7279                         bnx2x_enable_msi(bp);
7280                 bnx2x_ack_int(bp);
7281                 rc = bnx2x_req_irq(bp);
7282                 if (rc) {
7283                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7284                         if (bp->flags & USING_MSI_FLAG)
7285                                 pci_disable_msi(bp->pdev);
7286                         goto load_error1;
7287                 }
7288                 if (bp->flags & USING_MSI_FLAG) {
7289                         bp->dev->irq = bp->pdev->irq;
7290                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7291                                bp->dev->name, bp->pdev->irq);
7292                 }
7293         }
7294
7295         /* Send LOAD_REQUEST command to MCP
7296            Returns the type of LOAD command:
7297            if it is the first port to be initialized
7298            common blocks should be initialized, otherwise - not
7299         */
7300         if (!BP_NOMCP(bp)) {
7301                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7302                 if (!load_code) {
7303                         BNX2X_ERR("MCP response failure, aborting\n");
7304                         rc = -EBUSY;
7305                         goto load_error2;
7306                 }
7307                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7308                         rc = -EBUSY; /* other port in diagnostic mode */
7309                         goto load_error2;
7310                 }
7311
7312         } else {
7313                 int port = BP_PORT(bp);
7314
7315                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7316                    load_count[0], load_count[1], load_count[2]);
7317                 load_count[0]++;
7318                 load_count[1 + port]++;
7319                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7320                    load_count[0], load_count[1], load_count[2]);
7321                 if (load_count[0] == 1)
7322                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7323                 else if (load_count[1 + port] == 1)
7324                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7325                 else
7326                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7327         }
7328
7329         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7330             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7331                 bp->port.pmf = 1;
7332         else
7333                 bp->port.pmf = 0;
7334         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7335
7336         /* Initialize HW */
7337         rc = bnx2x_init_hw(bp, load_code);
7338         if (rc) {
7339                 BNX2X_ERR("HW init failed, aborting\n");
7340                 goto load_error2;
7341         }
7342
7343         /* Setup NIC internals and enable interrupts */
7344         bnx2x_nic_init(bp, load_code);
7345
7346         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7347             (bp->common.shmem2_base))
7348                 SHMEM2_WR(bp, dcc_support,
7349                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7350                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7351
7352         /* Send LOAD_DONE command to MCP */
7353         if (!BP_NOMCP(bp)) {
7354                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7355                 if (!load_code) {
7356                         BNX2X_ERR("MCP response failure, aborting\n");
7357                         rc = -EBUSY;
7358                         goto load_error3;
7359                 }
7360         }
7361
7362         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7363
7364         rc = bnx2x_setup_leading(bp);
7365         if (rc) {
7366                 BNX2X_ERR("Setup leading failed!\n");
7367                 goto load_error3;
7368         }
7369
7370         if (CHIP_IS_E1H(bp))
7371                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7372                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7373                         bp->state = BNX2X_STATE_DISABLED;
7374                 }
7375
7376         if (bp->state == BNX2X_STATE_OPEN) {
7377                 for_each_nondefault_queue(bp, i) {
7378                         rc = bnx2x_setup_multi(bp, i);
7379                         if (rc)
7380                                 goto load_error3;
7381                 }
7382
7383                 if (CHIP_IS_E1(bp))
7384                         bnx2x_set_mac_addr_e1(bp, 1);
7385                 else
7386                         bnx2x_set_mac_addr_e1h(bp, 1);
7387         }
7388
7389         if (bp->port.pmf)
7390                 bnx2x_initial_phy_init(bp, load_mode);
7391
7392         /* Start fast path */
7393         switch (load_mode) {
7394         case LOAD_NORMAL:
7395                 if (bp->state == BNX2X_STATE_OPEN) {
7396                         /* Tx queue should be only reenabled */
7397                         netif_tx_wake_all_queues(bp->dev);
7398                 }
7399                 /* Initialize the receive filter. */
7400                 bnx2x_set_rx_mode(bp->dev);
7401                 break;
7402
7403         case LOAD_OPEN:
7404                 netif_tx_start_all_queues(bp->dev);
7405                 if (bp->state != BNX2X_STATE_OPEN)
7406                         netif_tx_disable(bp->dev);
7407                 /* Initialize the receive filter. */
7408                 bnx2x_set_rx_mode(bp->dev);
7409                 break;
7410
7411         case LOAD_DIAG:
7412                 /* Initialize the receive filter. */
7413                 bnx2x_set_rx_mode(bp->dev);
7414                 bp->state = BNX2X_STATE_DIAG;
7415                 break;
7416
7417         default:
7418                 break;
7419         }
7420
7421         if (!bp->port.pmf)
7422                 bnx2x__link_status_update(bp);
7423
7424         /* start the timer */
7425         mod_timer(&bp->timer, jiffies + bp->current_interval);
7426
7427
7428         return 0;
7429
7430 load_error3:
7431         bnx2x_int_disable_sync(bp, 1);
7432         if (!BP_NOMCP(bp)) {
7433                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7434                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435         }
7436         bp->port.pmf = 0;
7437         /* Free SKBs, SGEs, TPA pool and driver internals */
7438         bnx2x_free_skbs(bp);
7439         for_each_rx_queue(bp, i)
7440                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7441 load_error2:
7442         /* Release IRQs */
7443         bnx2x_free_irq(bp);
7444 load_error1:
7445         bnx2x_napi_disable(bp);
7446         for_each_rx_queue(bp, i)
7447                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7448         bnx2x_free_mem(bp);
7449
7450         return rc;
7451 }
7452
7453 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7454 {
7455         struct bnx2x_fastpath *fp = &bp->fp[index];
7456         int rc;
7457
7458         /* halt the connection */
7459         fp->state = BNX2X_FP_STATE_HALTING;
7460         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7461
7462         /* Wait for completion */
7463         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7464                                &(fp->state), 1);
7465         if (rc) /* timeout */
7466                 return rc;
7467
7468         /* delete cfc entry */
7469         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7470
7471         /* Wait for completion */
7472         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7473                                &(fp->state), 1);
7474         return rc;
7475 }
7476
7477 static int bnx2x_stop_leading(struct bnx2x *bp)
7478 {
7479         __le16 dsb_sp_prod_idx;
7480         /* if the other port is handling traffic,
7481            this can take a lot of time */
7482         int cnt = 500;
7483         int rc;
7484
7485         might_sleep();
7486
7487         /* Send HALT ramrod */
7488         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7489         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7490
7491         /* Wait for completion */
7492         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7493                                &(bp->fp[0].state), 1);
7494         if (rc) /* timeout */
7495                 return rc;
7496
7497         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7498
7499         /* Send PORT_DELETE ramrod */
7500         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7501
7502         /* Wait for completion to arrive on default status block
7503            we are going to reset the chip anyway
7504            so there is not much to do if this times out
7505          */
7506         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7507                 if (!cnt) {
7508                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7509                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7510                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7511 #ifdef BNX2X_STOP_ON_ERROR
7512                         bnx2x_panic();
7513 #endif
7514                         rc = -EBUSY;
7515                         break;
7516                 }
7517                 cnt--;
7518                 msleep(1);
7519                 rmb(); /* Refresh the dsb_sp_prod */
7520         }
7521         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7522         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7523
7524         return rc;
7525 }
7526
7527 static void bnx2x_reset_func(struct bnx2x *bp)
7528 {
7529         int port = BP_PORT(bp);
7530         int func = BP_FUNC(bp);
7531         int base, i;
7532
7533         /* Configure IGU */
7534         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7535         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7536
7537         /* Clear ILT */
7538         base = FUNC_ILT_BASE(func);
7539         for (i = base; i < base + ILT_PER_FUNC; i++)
7540                 bnx2x_ilt_wr(bp, i, 0);
7541 }
7542
7543 static void bnx2x_reset_port(struct bnx2x *bp)
7544 {
7545         int port = BP_PORT(bp);
7546         u32 val;
7547
7548         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7549
7550         /* Do not rcv packets to BRB */
7551         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7552         /* Do not direct rcv packets that are not for MCP to the BRB */
7553         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7554                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7555
7556         /* Configure AEU */
7557         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7558
7559         msleep(100);
7560         /* Check for BRB port occupancy */
7561         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7562         if (val)
7563                 DP(NETIF_MSG_IFDOWN,
7564                    "BRB1 is not empty  %d blocks are occupied\n", val);
7565
7566         /* TODO: Close Doorbell port? */
7567 }
7568
7569 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7570 {
7571         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7572            BP_FUNC(bp), reset_code);
7573
7574         switch (reset_code) {
7575         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7576                 bnx2x_reset_port(bp);
7577                 bnx2x_reset_func(bp);
7578                 bnx2x_reset_common(bp);
7579                 break;
7580
7581         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7582                 bnx2x_reset_port(bp);
7583                 bnx2x_reset_func(bp);
7584                 break;
7585
7586         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7587                 bnx2x_reset_func(bp);
7588                 break;
7589
7590         default:
7591                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7592                 break;
7593         }
7594 }
7595
7596 /* must be called with rtnl_lock */
7597 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7598 {
7599         int port = BP_PORT(bp);
7600         u32 reset_code = 0;
7601         int i, cnt, rc;
7602
7603         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7604
7605         bp->rx_mode = BNX2X_RX_MODE_NONE;
7606         bnx2x_set_storm_rx_mode(bp);
7607
7608         bnx2x_netif_stop(bp, 1);
7609
7610         del_timer_sync(&bp->timer);
7611         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7612                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7613         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7614
7615         /* Release IRQs */
7616         bnx2x_free_irq(bp);
7617
7618         /* Wait until tx fastpath tasks complete */
7619         for_each_tx_queue(bp, i) {
7620                 struct bnx2x_fastpath *fp = &bp->fp[i];
7621
7622                 cnt = 1000;
7623                 while (bnx2x_has_tx_work_unload(fp)) {
7624
7625                         bnx2x_tx_int(fp);
7626                         if (!cnt) {
7627                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7628                                           i);
7629 #ifdef BNX2X_STOP_ON_ERROR
7630                                 bnx2x_panic();
7631                                 return -EBUSY;
7632 #else
7633                                 break;
7634 #endif
7635                         }
7636                         cnt--;
7637                         msleep(1);
7638                 }
7639         }
7640         /* Give HW time to discard old tx messages */
7641         msleep(1);
7642
7643         if (CHIP_IS_E1(bp)) {
7644                 struct mac_configuration_cmd *config =
7645                                                 bnx2x_sp(bp, mcast_config);
7646
7647                 bnx2x_set_mac_addr_e1(bp, 0);
7648
7649                 for (i = 0; i < config->hdr.length; i++)
7650                         CAM_INVALIDATE(config->config_table[i]);
7651
7652                 config->hdr.length = i;
7653                 if (CHIP_REV_IS_SLOW(bp))
7654                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7655                 else
7656                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7657                 config->hdr.client_id = bp->fp->cl_id;
7658                 config->hdr.reserved1 = 0;
7659
7660                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7661                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7662                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7663
7664         } else { /* E1H */
7665                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7666
7667                 bnx2x_set_mac_addr_e1h(bp, 0);
7668
7669                 for (i = 0; i < MC_HASH_SIZE; i++)
7670                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7671
7672                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7673         }
7674
7675         if (unload_mode == UNLOAD_NORMAL)
7676                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7677
7678         else if (bp->flags & NO_WOL_FLAG)
7679                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7680
7681         else if (bp->wol) {
7682                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7683                 u8 *mac_addr = bp->dev->dev_addr;
7684                 u32 val;
7685                 /* The mac address is written to entries 1-4 to
7686                    preserve entry 0 which is used by the PMF */
7687                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7688
7689                 val = (mac_addr[0] << 8) | mac_addr[1];
7690                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7691
7692                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7693                       (mac_addr[4] << 8) | mac_addr[5];
7694                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7695
7696                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7697
7698         } else
7699                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7700
7701         /* Close multi and leading connections
7702            Completions for ramrods are collected in a synchronous way */
7703         for_each_nondefault_queue(bp, i)
7704                 if (bnx2x_stop_multi(bp, i))
7705                         goto unload_error;
7706
7707         rc = bnx2x_stop_leading(bp);
7708         if (rc) {
7709                 BNX2X_ERR("Stop leading failed!\n");
7710 #ifdef BNX2X_STOP_ON_ERROR
7711                 return -EBUSY;
7712 #else
7713                 goto unload_error;
7714 #endif
7715         }
7716
7717 unload_error:
7718         if (!BP_NOMCP(bp))
7719                 reset_code = bnx2x_fw_command(bp, reset_code);
7720         else {
7721                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7722                    load_count[0], load_count[1], load_count[2]);
7723                 load_count[0]--;
7724                 load_count[1 + port]--;
7725                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7726                    load_count[0], load_count[1], load_count[2]);
7727                 if (load_count[0] == 0)
7728                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7729                 else if (load_count[1 + port] == 0)
7730                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7731                 else
7732                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7733         }
7734
7735         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7736             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7737                 bnx2x__link_reset(bp);
7738
7739         /* Reset the chip */
7740         bnx2x_reset_chip(bp, reset_code);
7741
7742         /* Report UNLOAD_DONE to MCP */
7743         if (!BP_NOMCP(bp))
7744                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7745
7746         bp->port.pmf = 0;
7747
7748         /* Free SKBs, SGEs, TPA pool and driver internals */
7749         bnx2x_free_skbs(bp);
7750         for_each_rx_queue(bp, i)
7751                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7752         for_each_rx_queue(bp, i)
7753                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7754         bnx2x_free_mem(bp);
7755
7756         bp->state = BNX2X_STATE_CLOSED;
7757
7758         netif_carrier_off(bp->dev);
7759
7760         return 0;
7761 }
7762
7763 static void bnx2x_reset_task(struct work_struct *work)
7764 {
7765         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7766
7767 #ifdef BNX2X_STOP_ON_ERROR
7768         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7769                   " so reset not done to allow debug dump,\n"
7770                   " you will need to reboot when done\n");
7771         return;
7772 #endif
7773
7774         rtnl_lock();
7775
7776         if (!netif_running(bp->dev))
7777                 goto reset_task_exit;
7778
7779         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7780         bnx2x_nic_load(bp, LOAD_NORMAL);
7781
7782 reset_task_exit:
7783         rtnl_unlock();
7784 }
7785
7786 /* end of nic load/unload */
7787
7788 /* ethtool_ops */
7789
7790 /*
7791  * Init service functions
7792  */
7793
7794 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7795 {
7796         switch (func) {
7797         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7798         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7799         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7800         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7801         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7802         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7803         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7804         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7805         default:
7806                 BNX2X_ERR("Unsupported function index: %d\n", func);
7807                 return (u32)(-1);
7808         }
7809 }
7810
7811 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7812 {
7813         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7814
7815         /* Flush all outstanding writes */
7816         mmiowb();
7817
7818         /* Pretend to be function 0 */
7819         REG_WR(bp, reg, 0);
7820         /* Flush the GRC transaction (in the chip) */
7821         new_val = REG_RD(bp, reg);
7822         if (new_val != 0) {
7823                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7824                           new_val);
7825                 BUG();
7826         }
7827
7828         /* From now we are in the "like-E1" mode */
7829         bnx2x_int_disable(bp);
7830
7831         /* Flush all outstanding writes */
7832         mmiowb();
7833
7834         /* Restore the original funtion settings */
7835         REG_WR(bp, reg, orig_func);
7836         new_val = REG_RD(bp, reg);
7837         if (new_val != orig_func) {
7838                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7839                           orig_func, new_val);
7840                 BUG();
7841         }
7842 }
7843
7844 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7845 {
7846         if (CHIP_IS_E1H(bp))
7847                 bnx2x_undi_int_disable_e1h(bp, func);
7848         else
7849                 bnx2x_int_disable(bp);
7850 }
7851
7852 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7853 {
7854         u32 val;
7855
7856         /* Check if there is any driver already loaded */
7857         val = REG_RD(bp, MISC_REG_UNPREPARED);
7858         if (val == 0x1) {
7859                 /* Check if it is the UNDI driver
7860                  * UNDI driver initializes CID offset for normal bell to 0x7
7861                  */
7862                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7863                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7864                 if (val == 0x7) {
7865                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7866                         /* save our func */
7867                         int func = BP_FUNC(bp);
7868                         u32 swap_en;
7869                         u32 swap_val;
7870
7871                         /* clear the UNDI indication */
7872                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7873
7874                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7875
7876                         /* try unload UNDI on port 0 */
7877                         bp->func = 0;
7878                         bp->fw_seq =
7879                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7880                                 DRV_MSG_SEQ_NUMBER_MASK);
7881                         reset_code = bnx2x_fw_command(bp, reset_code);
7882
7883                         /* if UNDI is loaded on the other port */
7884                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7885
7886                                 /* send "DONE" for previous unload */
7887                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7888
7889                                 /* unload UNDI on port 1 */
7890                                 bp->func = 1;
7891                                 bp->fw_seq =
7892                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7893                                         DRV_MSG_SEQ_NUMBER_MASK);
7894                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7895
7896                                 bnx2x_fw_command(bp, reset_code);
7897                         }
7898
7899                         /* now it's safe to release the lock */
7900                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7901
7902                         bnx2x_undi_int_disable(bp, func);
7903
7904                         /* close input traffic and wait for it */
7905                         /* Do not rcv packets to BRB */
7906                         REG_WR(bp,
7907                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7908                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7909                         /* Do not direct rcv packets that are not for MCP to
7910                          * the BRB */
7911                         REG_WR(bp,
7912                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7913                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7914                         /* clear AEU */
7915                         REG_WR(bp,
7916                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7917                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7918                         msleep(10);
7919
7920                         /* save NIG port swap info */
7921                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7922                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7923                         /* reset device */
7924                         REG_WR(bp,
7925                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7926                                0xd3ffffff);
7927                         REG_WR(bp,
7928                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7929                                0x1403);
7930                         /* take the NIG out of reset and restore swap values */
7931                         REG_WR(bp,
7932                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7933                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7934                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7935                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7936
7937                         /* send unload done to the MCP */
7938                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7939
7940                         /* restore our func and fw_seq */
7941                         bp->func = func;
7942                         bp->fw_seq =
7943                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7944                                 DRV_MSG_SEQ_NUMBER_MASK);
7945
7946                 } else
7947                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7948         }
7949 }
7950
7951 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7952 {
7953         u32 val, val2, val3, val4, id;
7954         u16 pmc;
7955
7956         /* Get the chip revision id and number. */
7957         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7958         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7959         id = ((val & 0xffff) << 16);
7960         val = REG_RD(bp, MISC_REG_CHIP_REV);
7961         id |= ((val & 0xf) << 12);
7962         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7963         id |= ((val & 0xff) << 4);
7964         val = REG_RD(bp, MISC_REG_BOND_ID);
7965         id |= (val & 0xf);
7966         bp->common.chip_id = id;
7967         bp->link_params.chip_id = bp->common.chip_id;
7968         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7969
7970         val = (REG_RD(bp, 0x2874) & 0x55);
7971         if ((bp->common.chip_id & 0x1) ||
7972             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7973                 bp->flags |= ONE_PORT_FLAG;
7974                 BNX2X_DEV_INFO("single port device\n");
7975         }
7976
7977         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7978         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7979                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7980         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7981                        bp->common.flash_size, bp->common.flash_size);
7982
7983         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7984         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7985         bp->link_params.shmem_base = bp->common.shmem_base;
7986         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7987                        bp->common.shmem_base, bp->common.shmem2_base);
7988
7989         if (!bp->common.shmem_base ||
7990             (bp->common.shmem_base < 0xA0000) ||
7991             (bp->common.shmem_base >= 0xC0000)) {
7992                 BNX2X_DEV_INFO("MCP not active\n");
7993                 bp->flags |= NO_MCP_FLAG;
7994                 return;
7995         }
7996
7997         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7998         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7999                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8000                 BNX2X_ERR("BAD MCP validity signature\n");
8001
8002         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8003         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8004
8005         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8006                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8007                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8008
8009         bp->link_params.feature_config_flags = 0;
8010         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8011         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8012                 bp->link_params.feature_config_flags |=
8013                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8014         else
8015                 bp->link_params.feature_config_flags &=
8016                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8017
8018         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8019         bp->common.bc_ver = val;
8020         BNX2X_DEV_INFO("bc_ver %X\n", val);
8021         if (val < BNX2X_BC_VER) {
8022                 /* for now only warn
8023                  * later we might need to enforce this */
8024                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8025                           " please upgrade BC\n", BNX2X_BC_VER, val);
8026         }
8027         bp->link_params.feature_config_flags |=
8028                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8029                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8030
8031         if (BP_E1HVN(bp) == 0) {
8032                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8033                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8034         } else {
8035                 /* no WOL capability for E1HVN != 0 */
8036                 bp->flags |= NO_WOL_FLAG;
8037         }
8038         BNX2X_DEV_INFO("%sWoL capable\n",
8039                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8040
8041         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8042         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8043         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8044         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8045
8046         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8047                val, val2, val3, val4);
8048 }
8049
8050 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8051                                                     u32 switch_cfg)
8052 {
8053         int port = BP_PORT(bp);
8054         u32 ext_phy_type;
8055
8056         switch (switch_cfg) {
8057         case SWITCH_CFG_1G:
8058                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8059
8060                 ext_phy_type =
8061                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8062                 switch (ext_phy_type) {
8063                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8064                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8065                                        ext_phy_type);
8066
8067                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8068                                                SUPPORTED_10baseT_Full |
8069                                                SUPPORTED_100baseT_Half |
8070                                                SUPPORTED_100baseT_Full |
8071                                                SUPPORTED_1000baseT_Full |
8072                                                SUPPORTED_2500baseX_Full |
8073                                                SUPPORTED_TP |
8074                                                SUPPORTED_FIBRE |
8075                                                SUPPORTED_Autoneg |
8076                                                SUPPORTED_Pause |
8077                                                SUPPORTED_Asym_Pause);
8078                         break;
8079
8080                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8081                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8082                                        ext_phy_type);
8083
8084                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8085                                                SUPPORTED_10baseT_Full |
8086                                                SUPPORTED_100baseT_Half |
8087                                                SUPPORTED_100baseT_Full |
8088                                                SUPPORTED_1000baseT_Full |
8089                                                SUPPORTED_TP |
8090                                                SUPPORTED_FIBRE |
8091                                                SUPPORTED_Autoneg |
8092                                                SUPPORTED_Pause |
8093                                                SUPPORTED_Asym_Pause);
8094                         break;
8095
8096                 default:
8097                         BNX2X_ERR("NVRAM config error. "
8098                                   "BAD SerDes ext_phy_config 0x%x\n",
8099                                   bp->link_params.ext_phy_config);
8100                         return;
8101                 }
8102
8103                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8104                                            port*0x10);
8105                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8106                 break;
8107
8108         case SWITCH_CFG_10G:
8109                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8110
8111                 ext_phy_type =
8112                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8113                 switch (ext_phy_type) {
8114                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8115                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8116                                        ext_phy_type);
8117
8118                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8119                                                SUPPORTED_10baseT_Full |
8120                                                SUPPORTED_100baseT_Half |
8121                                                SUPPORTED_100baseT_Full |
8122                                                SUPPORTED_1000baseT_Full |
8123                                                SUPPORTED_2500baseX_Full |
8124                                                SUPPORTED_10000baseT_Full |
8125                                                SUPPORTED_TP |
8126                                                SUPPORTED_FIBRE |
8127                                                SUPPORTED_Autoneg |
8128                                                SUPPORTED_Pause |
8129                                                SUPPORTED_Asym_Pause);
8130                         break;
8131
8132                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8133                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8134                                        ext_phy_type);
8135
8136                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8137                                                SUPPORTED_1000baseT_Full |
8138                                                SUPPORTED_FIBRE |
8139                                                SUPPORTED_Autoneg |
8140                                                SUPPORTED_Pause |
8141                                                SUPPORTED_Asym_Pause);
8142                         break;
8143
8144                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8145                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8146                                        ext_phy_type);
8147
8148                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8149                                                SUPPORTED_2500baseX_Full |
8150                                                SUPPORTED_1000baseT_Full |
8151                                                SUPPORTED_FIBRE |
8152                                                SUPPORTED_Autoneg |
8153                                                SUPPORTED_Pause |
8154                                                SUPPORTED_Asym_Pause);
8155                         break;
8156
8157                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8158                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8159                                        ext_phy_type);
8160
8161                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8162                                                SUPPORTED_FIBRE |
8163                                                SUPPORTED_Pause |
8164                                                SUPPORTED_Asym_Pause);
8165                         break;
8166
8167                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8168                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8169                                        ext_phy_type);
8170
8171                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8172                                                SUPPORTED_1000baseT_Full |
8173                                                SUPPORTED_FIBRE |
8174                                                SUPPORTED_Pause |
8175                                                SUPPORTED_Asym_Pause);
8176                         break;
8177
8178                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8179                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8180                                        ext_phy_type);
8181
8182                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8183                                                SUPPORTED_1000baseT_Full |
8184                                                SUPPORTED_Autoneg |
8185                                                SUPPORTED_FIBRE |
8186                                                SUPPORTED_Pause |
8187                                                SUPPORTED_Asym_Pause);
8188                         break;
8189
8190                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8191                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8192                                        ext_phy_type);
8193
8194                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8195                                                SUPPORTED_1000baseT_Full |
8196                                                SUPPORTED_Autoneg |
8197                                                SUPPORTED_FIBRE |
8198                                                SUPPORTED_Pause |
8199                                                SUPPORTED_Asym_Pause);
8200                         break;
8201
8202                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8203                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8204                                        ext_phy_type);
8205
8206                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8207                                                SUPPORTED_TP |
8208                                                SUPPORTED_Autoneg |
8209                                                SUPPORTED_Pause |
8210                                                SUPPORTED_Asym_Pause);
8211                         break;
8212
8213                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8214                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8215                                        ext_phy_type);
8216
8217                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8218                                                SUPPORTED_10baseT_Full |
8219                                                SUPPORTED_100baseT_Half |
8220                                                SUPPORTED_100baseT_Full |
8221                                                SUPPORTED_1000baseT_Full |
8222                                                SUPPORTED_10000baseT_Full |
8223                                                SUPPORTED_TP |
8224                                                SUPPORTED_Autoneg |
8225                                                SUPPORTED_Pause |
8226                                                SUPPORTED_Asym_Pause);
8227                         break;
8228
8229                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8230                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8231                                   bp->link_params.ext_phy_config);
8232                         break;
8233
8234                 default:
8235                         BNX2X_ERR("NVRAM config error. "
8236                                   "BAD XGXS ext_phy_config 0x%x\n",
8237                                   bp->link_params.ext_phy_config);
8238                         return;
8239                 }
8240
8241                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8242                                            port*0x18);
8243                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8244
8245                 break;
8246
8247         default:
8248                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8249                           bp->port.link_config);
8250                 return;
8251         }
8252         bp->link_params.phy_addr = bp->port.phy_addr;
8253
8254         /* mask what we support according to speed_cap_mask */
8255         if (!(bp->link_params.speed_cap_mask &
8256                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8257                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8258
8259         if (!(bp->link_params.speed_cap_mask &
8260                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8261                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8262
8263         if (!(bp->link_params.speed_cap_mask &
8264                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8265                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8266
8267         if (!(bp->link_params.speed_cap_mask &
8268                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8269                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8270
8271         if (!(bp->link_params.speed_cap_mask &
8272                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8273                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8274                                         SUPPORTED_1000baseT_Full);
8275
8276         if (!(bp->link_params.speed_cap_mask &
8277                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8278                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8279
8280         if (!(bp->link_params.speed_cap_mask &
8281                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8282                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8283
8284         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8285 }
8286
8287 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8288 {
8289         bp->link_params.req_duplex = DUPLEX_FULL;
8290
8291         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8292         case PORT_FEATURE_LINK_SPEED_AUTO:
8293                 if (bp->port.supported & SUPPORTED_Autoneg) {
8294                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8295                         bp->port.advertising = bp->port.supported;
8296                 } else {
8297                         u32 ext_phy_type =
8298                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8299
8300                         if ((ext_phy_type ==
8301                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8302                             (ext_phy_type ==
8303                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8304                                 /* force 10G, no AN */
8305                                 bp->link_params.req_line_speed = SPEED_10000;
8306                                 bp->port.advertising =
8307                                                 (ADVERTISED_10000baseT_Full |
8308                                                  ADVERTISED_FIBRE);
8309                                 break;
8310                         }
8311                         BNX2X_ERR("NVRAM config error. "
8312                                   "Invalid link_config 0x%x"
8313                                   "  Autoneg not supported\n",
8314                                   bp->port.link_config);
8315                         return;
8316                 }
8317                 break;
8318
8319         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8320                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8321                         bp->link_params.req_line_speed = SPEED_10;
8322                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8323                                                 ADVERTISED_TP);
8324                 } else {
8325                         BNX2X_ERR("NVRAM config error. "
8326                                   "Invalid link_config 0x%x"
8327                                   "  speed_cap_mask 0x%x\n",
8328                                   bp->port.link_config,
8329                                   bp->link_params.speed_cap_mask);
8330                         return;
8331                 }
8332                 break;
8333
8334         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8335                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8336                         bp->link_params.req_line_speed = SPEED_10;
8337                         bp->link_params.req_duplex = DUPLEX_HALF;
8338                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8339                                                 ADVERTISED_TP);
8340                 } else {
8341                         BNX2X_ERR("NVRAM config error. "
8342                                   "Invalid link_config 0x%x"
8343                                   "  speed_cap_mask 0x%x\n",
8344                                   bp->port.link_config,
8345                                   bp->link_params.speed_cap_mask);
8346                         return;
8347                 }
8348                 break;
8349
8350         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8351                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8352                         bp->link_params.req_line_speed = SPEED_100;
8353                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8354                                                 ADVERTISED_TP);
8355                 } else {
8356                         BNX2X_ERR("NVRAM config error. "
8357                                   "Invalid link_config 0x%x"
8358                                   "  speed_cap_mask 0x%x\n",
8359                                   bp->port.link_config,
8360                                   bp->link_params.speed_cap_mask);
8361                         return;
8362                 }
8363                 break;
8364
8365         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8366                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8367                         bp->link_params.req_line_speed = SPEED_100;
8368                         bp->link_params.req_duplex = DUPLEX_HALF;
8369                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8370                                                 ADVERTISED_TP);
8371                 } else {
8372                         BNX2X_ERR("NVRAM config error. "
8373                                   "Invalid link_config 0x%x"
8374                                   "  speed_cap_mask 0x%x\n",
8375                                   bp->port.link_config,
8376                                   bp->link_params.speed_cap_mask);
8377                         return;
8378                 }
8379                 break;
8380
8381         case PORT_FEATURE_LINK_SPEED_1G:
8382                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8383                         bp->link_params.req_line_speed = SPEED_1000;
8384                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8385                                                 ADVERTISED_TP);
8386                 } else {
8387                         BNX2X_ERR("NVRAM config error. "
8388                                   "Invalid link_config 0x%x"
8389                                   "  speed_cap_mask 0x%x\n",
8390                                   bp->port.link_config,
8391                                   bp->link_params.speed_cap_mask);
8392                         return;
8393                 }
8394                 break;
8395
8396         case PORT_FEATURE_LINK_SPEED_2_5G:
8397                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8398                         bp->link_params.req_line_speed = SPEED_2500;
8399                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8400                                                 ADVERTISED_TP);
8401                 } else {
8402                         BNX2X_ERR("NVRAM config error. "
8403                                   "Invalid link_config 0x%x"
8404                                   "  speed_cap_mask 0x%x\n",
8405                                   bp->port.link_config,
8406                                   bp->link_params.speed_cap_mask);
8407                         return;
8408                 }
8409                 break;
8410
8411         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8412         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8413         case PORT_FEATURE_LINK_SPEED_10G_KR:
8414                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8415                         bp->link_params.req_line_speed = SPEED_10000;
8416                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8417                                                 ADVERTISED_FIBRE);
8418                 } else {
8419                         BNX2X_ERR("NVRAM config error. "
8420                                   "Invalid link_config 0x%x"
8421                                   "  speed_cap_mask 0x%x\n",
8422                                   bp->port.link_config,
8423                                   bp->link_params.speed_cap_mask);
8424                         return;
8425                 }
8426                 break;
8427
8428         default:
8429                 BNX2X_ERR("NVRAM config error. "
8430                           "BAD link speed link_config 0x%x\n",
8431                           bp->port.link_config);
8432                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8433                 bp->port.advertising = bp->port.supported;
8434                 break;
8435         }
8436
8437         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8438                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8439         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8440             !(bp->port.supported & SUPPORTED_Autoneg))
8441                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8442
8443         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8444                        "  advertising 0x%x\n",
8445                        bp->link_params.req_line_speed,
8446                        bp->link_params.req_duplex,
8447                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8448 }
8449
8450 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8451 {
8452         int port = BP_PORT(bp);
8453         u32 val, val2;
8454         u32 config;
8455         u16 i;
8456         u32 ext_phy_type;
8457
8458         bp->link_params.bp = bp;
8459         bp->link_params.port = port;
8460
8461         bp->link_params.lane_config =
8462                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8463         bp->link_params.ext_phy_config =
8464                 SHMEM_RD(bp,
8465                          dev_info.port_hw_config[port].external_phy_config);
8466         /* BCM8727_NOC => BCM8727 no over current */
8467         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8468             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8469                 bp->link_params.ext_phy_config &=
8470                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8471                 bp->link_params.ext_phy_config |=
8472                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8473                 bp->link_params.feature_config_flags |=
8474                         FEATURE_CONFIG_BCM8727_NOC;
8475         }
8476
8477         bp->link_params.speed_cap_mask =
8478                 SHMEM_RD(bp,
8479                          dev_info.port_hw_config[port].speed_capability_mask);
8480
8481         bp->port.link_config =
8482                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8483
8484         /* Get the 4 lanes xgxs config rx and tx */
8485         for (i = 0; i < 2; i++) {
8486                 val = SHMEM_RD(bp,
8487                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8488                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8489                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8490
8491                 val = SHMEM_RD(bp,
8492                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8493                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8494                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8495         }
8496
8497         /* If the device is capable of WoL, set the default state according
8498          * to the HW
8499          */
8500         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8501         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8502                    (config & PORT_FEATURE_WOL_ENABLED));
8503
8504         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8505                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8506                        bp->link_params.lane_config,
8507                        bp->link_params.ext_phy_config,
8508                        bp->link_params.speed_cap_mask, bp->port.link_config);
8509
8510         bp->link_params.switch_cfg |= (bp->port.link_config &
8511                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8512         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8513
8514         bnx2x_link_settings_requested(bp);
8515
8516         /*
8517          * If connected directly, work with the internal PHY, otherwise, work
8518          * with the external PHY
8519          */
8520         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8521         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8522                 bp->mdio.prtad = bp->link_params.phy_addr;
8523
8524         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8525                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8526                 bp->mdio.prtad =
8527                         (bp->link_params.ext_phy_config &
8528                          PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8529                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8530
8531         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8532         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8533         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8534         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8535         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8536         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8537         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8538         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8539         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8540         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8541 }
8542
8543 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8544 {
8545         int func = BP_FUNC(bp);
8546         u32 val, val2;
8547         int rc = 0;
8548
8549         bnx2x_get_common_hwinfo(bp);
8550
8551         bp->e1hov = 0;
8552         bp->e1hmf = 0;
8553         if (CHIP_IS_E1H(bp)) {
8554                 bp->mf_config =
8555                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8556
8557                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8558                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8559                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8560                         bp->e1hmf = 1;
8561                 BNX2X_DEV_INFO("%s function mode\n",
8562                                IS_E1HMF(bp) ? "multi" : "single");
8563
8564                 if (IS_E1HMF(bp)) {
8565                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8566                                                                 e1hov_tag) &
8567                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8568                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8569                                 bp->e1hov = val;
8570                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8571                                                "(0x%04x)\n",
8572                                                func, bp->e1hov, bp->e1hov);
8573                         } else {
8574                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8575                                           "  aborting\n", func);
8576                                 rc = -EPERM;
8577                         }
8578                 } else {
8579                         if (BP_E1HVN(bp)) {
8580                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8581                                           "  aborting\n", BP_E1HVN(bp));
8582                                 rc = -EPERM;
8583                         }
8584                 }
8585         }
8586
8587         if (!BP_NOMCP(bp)) {
8588                 bnx2x_get_port_hwinfo(bp);
8589
8590                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8591                               DRV_MSG_SEQ_NUMBER_MASK);
8592                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8593         }
8594
8595         if (IS_E1HMF(bp)) {
8596                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8597                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8598                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8599                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8600                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8601                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8602                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8603                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8604                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8605                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8606                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8607                                ETH_ALEN);
8608                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8609                                ETH_ALEN);
8610                 }
8611
8612                 return rc;
8613         }
8614
8615         if (BP_NOMCP(bp)) {
8616                 /* only supposed to happen on emulation/FPGA */
8617                 BNX2X_ERR("warning random MAC workaround active\n");
8618                 random_ether_addr(bp->dev->dev_addr);
8619                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8620         }
8621
8622         return rc;
8623 }
8624
8625 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8626 {
8627         int func = BP_FUNC(bp);
8628         int timer_interval;
8629         int rc;
8630
8631         /* Disable interrupt handling until HW is initialized */
8632         atomic_set(&bp->intr_sem, 1);
8633         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8634
8635         mutex_init(&bp->port.phy_mutex);
8636
8637         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8638         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8639
8640         rc = bnx2x_get_hwinfo(bp);
8641
8642         /* need to reset chip if undi was active */
8643         if (!BP_NOMCP(bp))
8644                 bnx2x_undi_unload(bp);
8645
8646         if (CHIP_REV_IS_FPGA(bp))
8647                 printk(KERN_ERR PFX "FPGA detected\n");
8648
8649         if (BP_NOMCP(bp) && (func == 0))
8650                 printk(KERN_ERR PFX
8651                        "MCP disabled, must load devices in order!\n");
8652
8653         /* Set multi queue mode */
8654         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8655             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8656                 printk(KERN_ERR PFX
8657                       "Multi disabled since int_mode requested is not MSI-X\n");
8658                 multi_mode = ETH_RSS_MODE_DISABLED;
8659         }
8660         bp->multi_mode = multi_mode;
8661
8662
8663         /* Set TPA flags */
8664         if (disable_tpa) {
8665                 bp->flags &= ~TPA_ENABLE_FLAG;
8666                 bp->dev->features &= ~NETIF_F_LRO;
8667         } else {
8668                 bp->flags |= TPA_ENABLE_FLAG;
8669                 bp->dev->features |= NETIF_F_LRO;
8670         }
8671
8672         bp->mrrs = mrrs;
8673
8674         bp->tx_ring_size = MAX_TX_AVAIL;
8675         bp->rx_ring_size = MAX_RX_AVAIL;
8676
8677         bp->rx_csum = 1;
8678
8679         bp->tx_ticks = 50;
8680         bp->rx_ticks = 25;
8681
8682         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8683         bp->current_interval = (poll ? poll : timer_interval);
8684
8685         init_timer(&bp->timer);
8686         bp->timer.expires = jiffies + bp->current_interval;
8687         bp->timer.data = (unsigned long) bp;
8688         bp->timer.function = bnx2x_timer;
8689
8690         return rc;
8691 }
8692
8693 /*
8694  * ethtool service functions
8695  */
8696
8697 /* All ethtool functions called with rtnl_lock */
8698
8699 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8700 {
8701         struct bnx2x *bp = netdev_priv(dev);
8702
8703         cmd->supported = bp->port.supported;
8704         cmd->advertising = bp->port.advertising;
8705
8706         if (netif_carrier_ok(dev)) {
8707                 cmd->speed = bp->link_vars.line_speed;
8708                 cmd->duplex = bp->link_vars.duplex;
8709         } else {
8710                 cmd->speed = bp->link_params.req_line_speed;
8711                 cmd->duplex = bp->link_params.req_duplex;
8712         }
8713         if (IS_E1HMF(bp)) {
8714                 u16 vn_max_rate;
8715
8716                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8717                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8718                 if (vn_max_rate < cmd->speed)
8719                         cmd->speed = vn_max_rate;
8720         }
8721
8722         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8723                 u32 ext_phy_type =
8724                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8725
8726                 switch (ext_phy_type) {
8727                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8728                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8729                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8730                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8731                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8732                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8733                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8734                         cmd->port = PORT_FIBRE;
8735                         break;
8736
8737                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8738                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8739                         cmd->port = PORT_TP;
8740                         break;
8741
8742                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8743                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8744                                   bp->link_params.ext_phy_config);
8745                         break;
8746
8747                 default:
8748                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8749                            bp->link_params.ext_phy_config);
8750                         break;
8751                 }
8752         } else
8753                 cmd->port = PORT_TP;
8754
8755         cmd->phy_address = bp->mdio.prtad;
8756         cmd->transceiver = XCVR_INTERNAL;
8757
8758         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8759                 cmd->autoneg = AUTONEG_ENABLE;
8760         else
8761                 cmd->autoneg = AUTONEG_DISABLE;
8762
8763         cmd->maxtxpkt = 0;
8764         cmd->maxrxpkt = 0;
8765
8766         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8767            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8768            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8769            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8770            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8771            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8772            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8773
8774         return 0;
8775 }
8776
8777 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8778 {
8779         struct bnx2x *bp = netdev_priv(dev);
8780         u32 advertising;
8781
8782         if (IS_E1HMF(bp))
8783                 return 0;
8784
8785         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8786            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8787            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8788            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8789            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8790            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8791            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8792
8793         if (cmd->autoneg == AUTONEG_ENABLE) {
8794                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8795                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8796                         return -EINVAL;
8797                 }
8798
8799                 /* advertise the requested speed and duplex if supported */
8800                 cmd->advertising &= bp->port.supported;
8801
8802                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8803                 bp->link_params.req_duplex = DUPLEX_FULL;
8804                 bp->port.advertising |= (ADVERTISED_Autoneg |
8805                                          cmd->advertising);
8806
8807         } else { /* forced speed */
8808                 /* advertise the requested speed and duplex if supported */
8809                 switch (cmd->speed) {
8810                 case SPEED_10:
8811                         if (cmd->duplex == DUPLEX_FULL) {
8812                                 if (!(bp->port.supported &
8813                                       SUPPORTED_10baseT_Full)) {
8814                                         DP(NETIF_MSG_LINK,
8815                                            "10M full not supported\n");
8816                                         return -EINVAL;
8817                                 }
8818
8819                                 advertising = (ADVERTISED_10baseT_Full |
8820                                                ADVERTISED_TP);
8821                         } else {
8822                                 if (!(bp->port.supported &
8823                                       SUPPORTED_10baseT_Half)) {
8824                                         DP(NETIF_MSG_LINK,
8825                                            "10M half not supported\n");
8826                                         return -EINVAL;
8827                                 }
8828
8829                                 advertising = (ADVERTISED_10baseT_Half |
8830                                                ADVERTISED_TP);
8831                         }
8832                         break;
8833
8834                 case SPEED_100:
8835                         if (cmd->duplex == DUPLEX_FULL) {
8836                                 if (!(bp->port.supported &
8837                                                 SUPPORTED_100baseT_Full)) {
8838                                         DP(NETIF_MSG_LINK,
8839                                            "100M full not supported\n");
8840                                         return -EINVAL;
8841                                 }
8842
8843                                 advertising = (ADVERTISED_100baseT_Full |
8844                                                ADVERTISED_TP);
8845                         } else {
8846                                 if (!(bp->port.supported &
8847                                                 SUPPORTED_100baseT_Half)) {
8848                                         DP(NETIF_MSG_LINK,
8849                                            "100M half not supported\n");
8850                                         return -EINVAL;
8851                                 }
8852
8853                                 advertising = (ADVERTISED_100baseT_Half |
8854                                                ADVERTISED_TP);
8855                         }
8856                         break;
8857
8858                 case SPEED_1000:
8859                         if (cmd->duplex != DUPLEX_FULL) {
8860                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8861                                 return -EINVAL;
8862                         }
8863
8864                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8865                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8866                                 return -EINVAL;
8867                         }
8868
8869                         advertising = (ADVERTISED_1000baseT_Full |
8870                                        ADVERTISED_TP);
8871                         break;
8872
8873                 case SPEED_2500:
8874                         if (cmd->duplex != DUPLEX_FULL) {
8875                                 DP(NETIF_MSG_LINK,
8876                                    "2.5G half not supported\n");
8877                                 return -EINVAL;
8878                         }
8879
8880                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8881                                 DP(NETIF_MSG_LINK,
8882                                    "2.5G full not supported\n");
8883                                 return -EINVAL;
8884                         }
8885
8886                         advertising = (ADVERTISED_2500baseX_Full |
8887                                        ADVERTISED_TP);
8888                         break;
8889
8890                 case SPEED_10000:
8891                         if (cmd->duplex != DUPLEX_FULL) {
8892                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8893                                 return -EINVAL;
8894                         }
8895
8896                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8897                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8898                                 return -EINVAL;
8899                         }
8900
8901                         advertising = (ADVERTISED_10000baseT_Full |
8902                                        ADVERTISED_FIBRE);
8903                         break;
8904
8905                 default:
8906                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8907                         return -EINVAL;
8908                 }
8909
8910                 bp->link_params.req_line_speed = cmd->speed;
8911                 bp->link_params.req_duplex = cmd->duplex;
8912                 bp->port.advertising = advertising;
8913         }
8914
8915         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8916            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8917            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8918            bp->port.advertising);
8919
8920         if (netif_running(dev)) {
8921                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8922                 bnx2x_link_set(bp);
8923         }
8924
8925         return 0;
8926 }
8927
8928 #define PHY_FW_VER_LEN                  10
8929
8930 static void bnx2x_get_drvinfo(struct net_device *dev,
8931                               struct ethtool_drvinfo *info)
8932 {
8933         struct bnx2x *bp = netdev_priv(dev);
8934         u8 phy_fw_ver[PHY_FW_VER_LEN];
8935
8936         strcpy(info->driver, DRV_MODULE_NAME);
8937         strcpy(info->version, DRV_MODULE_VERSION);
8938
8939         phy_fw_ver[0] = '\0';
8940         if (bp->port.pmf) {
8941                 bnx2x_acquire_phy_lock(bp);
8942                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8943                                              (bp->state != BNX2X_STATE_CLOSED),
8944                                              phy_fw_ver, PHY_FW_VER_LEN);
8945                 bnx2x_release_phy_lock(bp);
8946         }
8947
8948         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8949                  (bp->common.bc_ver & 0xff0000) >> 16,
8950                  (bp->common.bc_ver & 0xff00) >> 8,
8951                  (bp->common.bc_ver & 0xff),
8952                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8953         strcpy(info->bus_info, pci_name(bp->pdev));
8954         info->n_stats = BNX2X_NUM_STATS;
8955         info->testinfo_len = BNX2X_NUM_TESTS;
8956         info->eedump_len = bp->common.flash_size;
8957         info->regdump_len = 0;
8958 }
8959
8960 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8961 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8962
8963 static int bnx2x_get_regs_len(struct net_device *dev)
8964 {
8965         static u32 regdump_len;
8966         struct bnx2x *bp = netdev_priv(dev);
8967         int i;
8968
8969         if (regdump_len)
8970                 return regdump_len;
8971
8972         if (CHIP_IS_E1(bp)) {
8973                 for (i = 0; i < REGS_COUNT; i++)
8974                         if (IS_E1_ONLINE(reg_addrs[i].info))
8975                                 regdump_len += reg_addrs[i].size;
8976
8977                 for (i = 0; i < WREGS_COUNT_E1; i++)
8978                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8979                                 regdump_len += wreg_addrs_e1[i].size *
8980                                         (1 + wreg_addrs_e1[i].read_regs_count);
8981
8982         } else { /* E1H */
8983                 for (i = 0; i < REGS_COUNT; i++)
8984                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8985                                 regdump_len += reg_addrs[i].size;
8986
8987                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8988                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8989                                 regdump_len += wreg_addrs_e1h[i].size *
8990                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8991         }
8992         regdump_len *= 4;
8993         regdump_len += sizeof(struct dump_hdr);
8994
8995         return regdump_len;
8996 }
8997
8998 static void bnx2x_get_regs(struct net_device *dev,
8999                            struct ethtool_regs *regs, void *_p)
9000 {
9001         u32 *p = _p, i, j;
9002         struct bnx2x *bp = netdev_priv(dev);
9003         struct dump_hdr dump_hdr = {0};
9004
9005         regs->version = 0;
9006         memset(p, 0, regs->len);
9007
9008         if (!netif_running(bp->dev))
9009                 return;
9010
9011         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9012         dump_hdr.dump_sign = dump_sign_all;
9013         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9014         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9015         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9016         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9017         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9018
9019         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9020         p += dump_hdr.hdr_size + 1;
9021
9022         if (CHIP_IS_E1(bp)) {
9023                 for (i = 0; i < REGS_COUNT; i++)
9024                         if (IS_E1_ONLINE(reg_addrs[i].info))
9025                                 for (j = 0; j < reg_addrs[i].size; j++)
9026                                         *p++ = REG_RD(bp,
9027                                                       reg_addrs[i].addr + j*4);
9028
9029         } else { /* E1H */
9030                 for (i = 0; i < REGS_COUNT; i++)
9031                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9032                                 for (j = 0; j < reg_addrs[i].size; j++)
9033                                         *p++ = REG_RD(bp,
9034                                                       reg_addrs[i].addr + j*4);
9035         }
9036 }
9037
9038 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9039 {
9040         struct bnx2x *bp = netdev_priv(dev);
9041
9042         if (bp->flags & NO_WOL_FLAG) {
9043                 wol->supported = 0;
9044                 wol->wolopts = 0;
9045         } else {
9046                 wol->supported = WAKE_MAGIC;
9047                 if (bp->wol)
9048                         wol->wolopts = WAKE_MAGIC;
9049                 else
9050                         wol->wolopts = 0;
9051         }
9052         memset(&wol->sopass, 0, sizeof(wol->sopass));
9053 }
9054
9055 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9056 {
9057         struct bnx2x *bp = netdev_priv(dev);
9058
9059         if (wol->wolopts & ~WAKE_MAGIC)
9060                 return -EINVAL;
9061
9062         if (wol->wolopts & WAKE_MAGIC) {
9063                 if (bp->flags & NO_WOL_FLAG)
9064                         return -EINVAL;
9065
9066                 bp->wol = 1;
9067         } else
9068                 bp->wol = 0;
9069
9070         return 0;
9071 }
9072
9073 static u32 bnx2x_get_msglevel(struct net_device *dev)
9074 {
9075         struct bnx2x *bp = netdev_priv(dev);
9076
9077         return bp->msglevel;
9078 }
9079
9080 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9081 {
9082         struct bnx2x *bp = netdev_priv(dev);
9083
9084         if (capable(CAP_NET_ADMIN))
9085                 bp->msglevel = level;
9086 }
9087
9088 static int bnx2x_nway_reset(struct net_device *dev)
9089 {
9090         struct bnx2x *bp = netdev_priv(dev);
9091
9092         if (!bp->port.pmf)
9093                 return 0;
9094
9095         if (netif_running(dev)) {
9096                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9097                 bnx2x_link_set(bp);
9098         }
9099
9100         return 0;
9101 }
9102
9103 static u32
9104 bnx2x_get_link(struct net_device *dev)
9105 {
9106         struct bnx2x *bp = netdev_priv(dev);
9107
9108         return bp->link_vars.link_up;
9109 }
9110
9111 static int bnx2x_get_eeprom_len(struct net_device *dev)
9112 {
9113         struct bnx2x *bp = netdev_priv(dev);
9114
9115         return bp->common.flash_size;
9116 }
9117
9118 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9119 {
9120         int port = BP_PORT(bp);
9121         int count, i;
9122         u32 val = 0;
9123
9124         /* adjust timeout for emulation/FPGA */
9125         count = NVRAM_TIMEOUT_COUNT;
9126         if (CHIP_REV_IS_SLOW(bp))
9127                 count *= 100;
9128
9129         /* request access to nvram interface */
9130         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9131                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9132
9133         for (i = 0; i < count*10; i++) {
9134                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9135                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9136                         break;
9137
9138                 udelay(5);
9139         }
9140
9141         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9142                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9143                 return -EBUSY;
9144         }
9145
9146         return 0;
9147 }
9148
9149 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9150 {
9151         int port = BP_PORT(bp);
9152         int count, i;
9153         u32 val = 0;
9154
9155         /* adjust timeout for emulation/FPGA */
9156         count = NVRAM_TIMEOUT_COUNT;
9157         if (CHIP_REV_IS_SLOW(bp))
9158                 count *= 100;
9159
9160         /* relinquish nvram interface */
9161         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9162                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9163
9164         for (i = 0; i < count*10; i++) {
9165                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9166                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9167                         break;
9168
9169                 udelay(5);
9170         }
9171
9172         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9173                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9174                 return -EBUSY;
9175         }
9176
9177         return 0;
9178 }
9179
9180 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9181 {
9182         u32 val;
9183
9184         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9185
9186         /* enable both bits, even on read */
9187         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9188                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9189                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9190 }
9191
9192 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9193 {
9194         u32 val;
9195
9196         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9197
9198         /* disable both bits, even after read */
9199         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9200                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9201                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9202 }
9203
9204 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9205                                   u32 cmd_flags)
9206 {
9207         int count, i, rc;
9208         u32 val;
9209
9210         /* build the command word */
9211         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9212
9213         /* need to clear DONE bit separately */
9214         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9215
9216         /* address of the NVRAM to read from */
9217         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9218                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9219
9220         /* issue a read command */
9221         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9222
9223         /* adjust timeout for emulation/FPGA */
9224         count = NVRAM_TIMEOUT_COUNT;
9225         if (CHIP_REV_IS_SLOW(bp))
9226                 count *= 100;
9227
9228         /* wait for completion */
9229         *ret_val = 0;
9230         rc = -EBUSY;
9231         for (i = 0; i < count; i++) {
9232                 udelay(5);
9233                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9234
9235                 if (val & MCPR_NVM_COMMAND_DONE) {
9236                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9237                         /* we read nvram data in cpu order
9238                          * but ethtool sees it as an array of bytes
9239                          * converting to big-endian will do the work */
9240                         *ret_val = cpu_to_be32(val);
9241                         rc = 0;
9242                         break;
9243                 }
9244         }
9245
9246         return rc;
9247 }
9248
9249 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9250                             int buf_size)
9251 {
9252         int rc;
9253         u32 cmd_flags;
9254         __be32 val;
9255
9256         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9257                 DP(BNX2X_MSG_NVM,
9258                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9259                    offset, buf_size);
9260                 return -EINVAL;
9261         }
9262
9263         if (offset + buf_size > bp->common.flash_size) {
9264                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9265                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9266                    offset, buf_size, bp->common.flash_size);
9267                 return -EINVAL;
9268         }
9269
9270         /* request access to nvram interface */
9271         rc = bnx2x_acquire_nvram_lock(bp);
9272         if (rc)
9273                 return rc;
9274
9275         /* enable access to nvram interface */
9276         bnx2x_enable_nvram_access(bp);
9277
9278         /* read the first word(s) */
9279         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9280         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9281                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9282                 memcpy(ret_buf, &val, 4);
9283
9284                 /* advance to the next dword */
9285                 offset += sizeof(u32);
9286                 ret_buf += sizeof(u32);
9287                 buf_size -= sizeof(u32);
9288                 cmd_flags = 0;
9289         }
9290
9291         if (rc == 0) {
9292                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9293                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9294                 memcpy(ret_buf, &val, 4);
9295         }
9296
9297         /* disable access to nvram interface */
9298         bnx2x_disable_nvram_access(bp);
9299         bnx2x_release_nvram_lock(bp);
9300
9301         return rc;
9302 }
9303
9304 static int bnx2x_get_eeprom(struct net_device *dev,
9305                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9306 {
9307         struct bnx2x *bp = netdev_priv(dev);
9308         int rc;
9309
9310         if (!netif_running(dev))
9311                 return -EAGAIN;
9312
9313         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9314            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9315            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9316            eeprom->len, eeprom->len);
9317
9318         /* parameters already validated in ethtool_get_eeprom */
9319
9320         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9321
9322         return rc;
9323 }
9324
9325 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9326                                    u32 cmd_flags)
9327 {
9328         int count, i, rc;
9329
9330         /* build the command word */
9331         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9332
9333         /* need to clear DONE bit separately */
9334         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9335
9336         /* write the data */
9337         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9338
9339         /* address of the NVRAM to write to */
9340         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9341                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9342
9343         /* issue the write command */
9344         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9345
9346         /* adjust timeout for emulation/FPGA */
9347         count = NVRAM_TIMEOUT_COUNT;
9348         if (CHIP_REV_IS_SLOW(bp))
9349                 count *= 100;
9350
9351         /* wait for completion */
9352         rc = -EBUSY;
9353         for (i = 0; i < count; i++) {
9354                 udelay(5);
9355                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9356                 if (val & MCPR_NVM_COMMAND_DONE) {
9357                         rc = 0;
9358                         break;
9359                 }
9360         }
9361
9362         return rc;
9363 }
9364
9365 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9366
9367 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9368                               int buf_size)
9369 {
9370         int rc;
9371         u32 cmd_flags;
9372         u32 align_offset;
9373         __be32 val;
9374
9375         if (offset + buf_size > bp->common.flash_size) {
9376                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9377                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9378                    offset, buf_size, bp->common.flash_size);
9379                 return -EINVAL;
9380         }
9381
9382         /* request access to nvram interface */
9383         rc = bnx2x_acquire_nvram_lock(bp);
9384         if (rc)
9385                 return rc;
9386
9387         /* enable access to nvram interface */
9388         bnx2x_enable_nvram_access(bp);
9389
9390         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9391         align_offset = (offset & ~0x03);
9392         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9393
9394         if (rc == 0) {
9395                 val &= ~(0xff << BYTE_OFFSET(offset));
9396                 val |= (*data_buf << BYTE_OFFSET(offset));
9397
9398                 /* nvram data is returned as an array of bytes
9399                  * convert it back to cpu order */
9400                 val = be32_to_cpu(val);
9401
9402                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9403                                              cmd_flags);
9404         }
9405
9406         /* disable access to nvram interface */
9407         bnx2x_disable_nvram_access(bp);
9408         bnx2x_release_nvram_lock(bp);
9409
9410         return rc;
9411 }
9412
9413 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9414                              int buf_size)
9415 {
9416         int rc;
9417         u32 cmd_flags;
9418         u32 val;
9419         u32 written_so_far;
9420
9421         if (buf_size == 1)      /* ethtool */
9422                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9423
9424         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9425                 DP(BNX2X_MSG_NVM,
9426                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9427                    offset, buf_size);
9428                 return -EINVAL;
9429         }
9430
9431         if (offset + buf_size > bp->common.flash_size) {
9432                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9433                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9434                    offset, buf_size, bp->common.flash_size);
9435                 return -EINVAL;
9436         }
9437
9438         /* request access to nvram interface */
9439         rc = bnx2x_acquire_nvram_lock(bp);
9440         if (rc)
9441                 return rc;
9442
9443         /* enable access to nvram interface */
9444         bnx2x_enable_nvram_access(bp);
9445
9446         written_so_far = 0;
9447         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9448         while ((written_so_far < buf_size) && (rc == 0)) {
9449                 if (written_so_far == (buf_size - sizeof(u32)))
9450                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9451                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9452                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9453                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9454                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9455
9456                 memcpy(&val, data_buf, 4);
9457
9458                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9459
9460                 /* advance to the next dword */
9461                 offset += sizeof(u32);
9462                 data_buf += sizeof(u32);
9463                 written_so_far += sizeof(u32);
9464                 cmd_flags = 0;
9465         }
9466
9467         /* disable access to nvram interface */
9468         bnx2x_disable_nvram_access(bp);
9469         bnx2x_release_nvram_lock(bp);
9470
9471         return rc;
9472 }
9473
9474 static int bnx2x_set_eeprom(struct net_device *dev,
9475                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9476 {
9477         struct bnx2x *bp = netdev_priv(dev);
9478         int port = BP_PORT(bp);
9479         int rc = 0;
9480
9481         if (!netif_running(dev))
9482                 return -EAGAIN;
9483
9484         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9485            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9486            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9487            eeprom->len, eeprom->len);
9488
9489         /* parameters already validated in ethtool_set_eeprom */
9490
9491         /* PHY eeprom can be accessed only by the PMF */
9492         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9493             !bp->port.pmf)
9494                 return -EINVAL;
9495
9496         if (eeprom->magic == 0x50485950) {
9497                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9498                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9499
9500                 bnx2x_acquire_phy_lock(bp);
9501                 rc |= bnx2x_link_reset(&bp->link_params,
9502                                        &bp->link_vars, 0);
9503                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9504                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9505                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9506                                        MISC_REGISTERS_GPIO_HIGH, port);
9507                 bnx2x_release_phy_lock(bp);
9508                 bnx2x_link_report(bp);
9509
9510         } else if (eeprom->magic == 0x50485952) {
9511                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9512                 if ((bp->state == BNX2X_STATE_OPEN) ||
9513                     (bp->state == BNX2X_STATE_DISABLED)) {
9514                         bnx2x_acquire_phy_lock(bp);
9515                         rc |= bnx2x_link_reset(&bp->link_params,
9516                                                &bp->link_vars, 1);
9517
9518                         rc |= bnx2x_phy_init(&bp->link_params,
9519                                              &bp->link_vars);
9520                         bnx2x_release_phy_lock(bp);
9521                         bnx2x_calc_fc_adv(bp);
9522                 }
9523         } else if (eeprom->magic == 0x53985943) {
9524                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9525                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9526                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9527                         u8 ext_phy_addr =
9528                                 (bp->link_params.ext_phy_config &
9529                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9530                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9531
9532                         /* DSP Remove Download Mode */
9533                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9534                                        MISC_REGISTERS_GPIO_LOW, port);
9535
9536                         bnx2x_acquire_phy_lock(bp);
9537
9538                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9539
9540                         /* wait 0.5 sec to allow it to run */
9541                         msleep(500);
9542                         bnx2x_ext_phy_hw_reset(bp, port);
9543                         msleep(500);
9544                         bnx2x_release_phy_lock(bp);
9545                 }
9546         } else
9547                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9548
9549         return rc;
9550 }
9551
9552 static int bnx2x_get_coalesce(struct net_device *dev,
9553                               struct ethtool_coalesce *coal)
9554 {
9555         struct bnx2x *bp = netdev_priv(dev);
9556
9557         memset(coal, 0, sizeof(struct ethtool_coalesce));
9558
9559         coal->rx_coalesce_usecs = bp->rx_ticks;
9560         coal->tx_coalesce_usecs = bp->tx_ticks;
9561
9562         return 0;
9563 }
9564
9565 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9566 static int bnx2x_set_coalesce(struct net_device *dev,
9567                               struct ethtool_coalesce *coal)
9568 {
9569         struct bnx2x *bp = netdev_priv(dev);
9570
9571         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9572         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9573                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9574
9575         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9576         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9577                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9578
9579         if (netif_running(dev))
9580                 bnx2x_update_coalesce(bp);
9581
9582         return 0;
9583 }
9584
9585 static void bnx2x_get_ringparam(struct net_device *dev,
9586                                 struct ethtool_ringparam *ering)
9587 {
9588         struct bnx2x *bp = netdev_priv(dev);
9589
9590         ering->rx_max_pending = MAX_RX_AVAIL;
9591         ering->rx_mini_max_pending = 0;
9592         ering->rx_jumbo_max_pending = 0;
9593
9594         ering->rx_pending = bp->rx_ring_size;
9595         ering->rx_mini_pending = 0;
9596         ering->rx_jumbo_pending = 0;
9597
9598         ering->tx_max_pending = MAX_TX_AVAIL;
9599         ering->tx_pending = bp->tx_ring_size;
9600 }
9601
9602 static int bnx2x_set_ringparam(struct net_device *dev,
9603                                struct ethtool_ringparam *ering)
9604 {
9605         struct bnx2x *bp = netdev_priv(dev);
9606         int rc = 0;
9607
9608         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9609             (ering->tx_pending > MAX_TX_AVAIL) ||
9610             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9611                 return -EINVAL;
9612
9613         bp->rx_ring_size = ering->rx_pending;
9614         bp->tx_ring_size = ering->tx_pending;
9615
9616         if (netif_running(dev)) {
9617                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9618                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9619         }
9620
9621         return rc;
9622 }
9623
9624 static void bnx2x_get_pauseparam(struct net_device *dev,
9625                                  struct ethtool_pauseparam *epause)
9626 {
9627         struct bnx2x *bp = netdev_priv(dev);
9628
9629         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9630                            BNX2X_FLOW_CTRL_AUTO) &&
9631                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9632
9633         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9634                             BNX2X_FLOW_CTRL_RX);
9635         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9636                             BNX2X_FLOW_CTRL_TX);
9637
9638         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9639            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9640            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9641 }
9642
9643 static int bnx2x_set_pauseparam(struct net_device *dev,
9644                                 struct ethtool_pauseparam *epause)
9645 {
9646         struct bnx2x *bp = netdev_priv(dev);
9647
9648         if (IS_E1HMF(bp))
9649                 return 0;
9650
9651         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9652            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9653            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9654
9655         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9656
9657         if (epause->rx_pause)
9658                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9659
9660         if (epause->tx_pause)
9661                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9662
9663         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9664                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9665
9666         if (epause->autoneg) {
9667                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9668                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9669                         return -EINVAL;
9670                 }
9671
9672                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9673                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9674         }
9675
9676         DP(NETIF_MSG_LINK,
9677            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9678
9679         if (netif_running(dev)) {
9680                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9681                 bnx2x_link_set(bp);
9682         }
9683
9684         return 0;
9685 }
9686
9687 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9688 {
9689         struct bnx2x *bp = netdev_priv(dev);
9690         int changed = 0;
9691         int rc = 0;
9692
9693         /* TPA requires Rx CSUM offloading */
9694         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9695                 if (!(dev->features & NETIF_F_LRO)) {
9696                         dev->features |= NETIF_F_LRO;
9697                         bp->flags |= TPA_ENABLE_FLAG;
9698                         changed = 1;
9699                 }
9700
9701         } else if (dev->features & NETIF_F_LRO) {
9702                 dev->features &= ~NETIF_F_LRO;
9703                 bp->flags &= ~TPA_ENABLE_FLAG;
9704                 changed = 1;
9705         }
9706
9707         if (changed && netif_running(dev)) {
9708                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9709                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9710         }
9711
9712         return rc;
9713 }
9714
9715 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9716 {
9717         struct bnx2x *bp = netdev_priv(dev);
9718
9719         return bp->rx_csum;
9720 }
9721
9722 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9723 {
9724         struct bnx2x *bp = netdev_priv(dev);
9725         int rc = 0;
9726
9727         bp->rx_csum = data;
9728
9729         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9730            TPA'ed packets will be discarded due to wrong TCP CSUM */
9731         if (!data) {
9732                 u32 flags = ethtool_op_get_flags(dev);
9733
9734                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9735         }
9736
9737         return rc;
9738 }
9739
9740 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9741 {
9742         if (data) {
9743                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9744                 dev->features |= NETIF_F_TSO6;
9745         } else {
9746                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9747                 dev->features &= ~NETIF_F_TSO6;
9748         }
9749
9750         return 0;
9751 }
9752
9753 static const struct {
9754         char string[ETH_GSTRING_LEN];
9755 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9756         { "register_test (offline)" },
9757         { "memory_test (offline)" },
9758         { "loopback_test (offline)" },
9759         { "nvram_test (online)" },
9760         { "interrupt_test (online)" },
9761         { "link_test (online)" },
9762         { "idle check (online)" }
9763 };
9764
9765 static int bnx2x_self_test_count(struct net_device *dev)
9766 {
9767         return BNX2X_NUM_TESTS;
9768 }
9769
9770 static int bnx2x_test_registers(struct bnx2x *bp)
9771 {
9772         int idx, i, rc = -ENODEV;
9773         u32 wr_val = 0;
9774         int port = BP_PORT(bp);
9775         static const struct {
9776                 u32  offset0;
9777                 u32  offset1;
9778                 u32  mask;
9779         } reg_tbl[] = {
9780 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9781                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9782                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9783                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9784                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9785                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9786                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9787                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9788                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9789                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9790 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9791                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9792                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9793                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9794                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9795                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9796                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9797                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9798                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9799                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9800 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9801                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9802                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9803                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9804                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9805                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9806                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9807                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9808                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9809                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9810 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9811                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9812                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9813                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9814                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9815                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9816                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9817
9818                 { 0xffffffff, 0, 0x00000000 }
9819         };
9820
9821         if (!netif_running(bp->dev))
9822                 return rc;
9823
9824         /* Repeat the test twice:
9825            First by writing 0x00000000, second by writing 0xffffffff */
9826         for (idx = 0; idx < 2; idx++) {
9827
9828                 switch (idx) {
9829                 case 0:
9830                         wr_val = 0;
9831                         break;
9832                 case 1:
9833                         wr_val = 0xffffffff;
9834                         break;
9835                 }
9836
9837                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9838                         u32 offset, mask, save_val, val;
9839
9840                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9841                         mask = reg_tbl[i].mask;
9842
9843                         save_val = REG_RD(bp, offset);
9844
9845                         REG_WR(bp, offset, wr_val);
9846                         val = REG_RD(bp, offset);
9847
9848                         /* Restore the original register's value */
9849                         REG_WR(bp, offset, save_val);
9850
9851                         /* verify that value is as expected value */
9852                         if ((val & mask) != (wr_val & mask))
9853                                 goto test_reg_exit;
9854                 }
9855         }
9856
9857         rc = 0;
9858
9859 test_reg_exit:
9860         return rc;
9861 }
9862
9863 static int bnx2x_test_memory(struct bnx2x *bp)
9864 {
9865         int i, j, rc = -ENODEV;
9866         u32 val;
9867         static const struct {
9868                 u32 offset;
9869                 int size;
9870         } mem_tbl[] = {
9871                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9872                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9873                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9874                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9875                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9876                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9877                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9878
9879                 { 0xffffffff, 0 }
9880         };
9881         static const struct {
9882                 char *name;
9883                 u32 offset;
9884                 u32 e1_mask;
9885                 u32 e1h_mask;
9886         } prty_tbl[] = {
9887                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9888                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9889                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9890                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9891                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9892                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9893
9894                 { NULL, 0xffffffff, 0, 0 }
9895         };
9896
9897         if (!netif_running(bp->dev))
9898                 return rc;
9899
9900         /* Go through all the memories */
9901         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9902                 for (j = 0; j < mem_tbl[i].size; j++)
9903                         REG_RD(bp, mem_tbl[i].offset + j*4);
9904
9905         /* Check the parity status */
9906         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9907                 val = REG_RD(bp, prty_tbl[i].offset);
9908                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9909                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9910                         DP(NETIF_MSG_HW,
9911                            "%s is 0x%x\n", prty_tbl[i].name, val);
9912                         goto test_mem_exit;
9913                 }
9914         }
9915
9916         rc = 0;
9917
9918 test_mem_exit:
9919         return rc;
9920 }
9921
9922 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9923 {
9924         int cnt = 1000;
9925
9926         if (link_up)
9927                 while (bnx2x_link_test(bp) && cnt--)
9928                         msleep(10);
9929 }
9930
9931 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9932 {
9933         unsigned int pkt_size, num_pkts, i;
9934         struct sk_buff *skb;
9935         unsigned char *packet;
9936         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9937         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9938         u16 tx_start_idx, tx_idx;
9939         u16 rx_start_idx, rx_idx;
9940         u16 pkt_prod, bd_prod;
9941         struct sw_tx_bd *tx_buf;
9942         struct eth_tx_start_bd *tx_start_bd;
9943         struct eth_tx_parse_bd *pbd = NULL;
9944         dma_addr_t mapping;
9945         union eth_rx_cqe *cqe;
9946         u8 cqe_fp_flags;
9947         struct sw_rx_bd *rx_buf;
9948         u16 len;
9949         int rc = -ENODEV;
9950
9951         /* check the loopback mode */
9952         switch (loopback_mode) {
9953         case BNX2X_PHY_LOOPBACK:
9954                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9955                         return -EINVAL;
9956                 break;
9957         case BNX2X_MAC_LOOPBACK:
9958                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9959                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9960                 break;
9961         default:
9962                 return -EINVAL;
9963         }
9964
9965         /* prepare the loopback packet */
9966         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9967                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9968         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9969         if (!skb) {
9970                 rc = -ENOMEM;
9971                 goto test_loopback_exit;
9972         }
9973         packet = skb_put(skb, pkt_size);
9974         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9975         memset(packet + ETH_ALEN, 0, ETH_ALEN);
9976         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9977         for (i = ETH_HLEN; i < pkt_size; i++)
9978                 packet[i] = (unsigned char) (i & 0xff);
9979
9980         /* send the loopback packet */
9981         num_pkts = 0;
9982         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9983         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9984
9985         pkt_prod = fp_tx->tx_pkt_prod++;
9986         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9987         tx_buf->first_bd = fp_tx->tx_bd_prod;
9988         tx_buf->skb = skb;
9989         tx_buf->flags = 0;
9990
9991         bd_prod = TX_BD(fp_tx->tx_bd_prod);
9992         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9993         mapping = pci_map_single(bp->pdev, skb->data,
9994                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9995         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9996         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9997         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9998         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9999         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10000         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10001         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10002                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10003
10004         /* turn on parsing and get a BD */
10005         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10006         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10007
10008         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10009
10010         wmb();
10011
10012         fp_tx->tx_db.data.prod += 2;
10013         barrier();
10014         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10015
10016         mmiowb();
10017
10018         num_pkts++;
10019         fp_tx->tx_bd_prod += 2; /* start + pbd */
10020         bp->dev->trans_start = jiffies;
10021
10022         udelay(100);
10023
10024         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10025         if (tx_idx != tx_start_idx + num_pkts)
10026                 goto test_loopback_exit;
10027
10028         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10029         if (rx_idx != rx_start_idx + num_pkts)
10030                 goto test_loopback_exit;
10031
10032         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10033         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10034         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10035                 goto test_loopback_rx_exit;
10036
10037         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10038         if (len != pkt_size)
10039                 goto test_loopback_rx_exit;
10040
10041         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10042         skb = rx_buf->skb;
10043         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10044         for (i = ETH_HLEN; i < pkt_size; i++)
10045                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10046                         goto test_loopback_rx_exit;
10047
10048         rc = 0;
10049
10050 test_loopback_rx_exit:
10051
10052         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10053         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10054         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10055         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10056
10057         /* Update producers */
10058         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10059                              fp_rx->rx_sge_prod);
10060
10061 test_loopback_exit:
10062         bp->link_params.loopback_mode = LOOPBACK_NONE;
10063
10064         return rc;
10065 }
10066
10067 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10068 {
10069         int rc = 0, res;
10070
10071         if (!netif_running(bp->dev))
10072                 return BNX2X_LOOPBACK_FAILED;
10073
10074         bnx2x_netif_stop(bp, 1);
10075         bnx2x_acquire_phy_lock(bp);
10076
10077         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10078         if (res) {
10079                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10080                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10081         }
10082
10083         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10084         if (res) {
10085                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10086                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10087         }
10088
10089         bnx2x_release_phy_lock(bp);
10090         bnx2x_netif_start(bp);
10091
10092         return rc;
10093 }
10094
10095 #define CRC32_RESIDUAL                  0xdebb20e3
10096
10097 static int bnx2x_test_nvram(struct bnx2x *bp)
10098 {
10099         static const struct {
10100                 int offset;
10101                 int size;
10102         } nvram_tbl[] = {
10103                 {     0,  0x14 }, /* bootstrap */
10104                 {  0x14,  0xec }, /* dir */
10105                 { 0x100, 0x350 }, /* manuf_info */
10106                 { 0x450,  0xf0 }, /* feature_info */
10107                 { 0x640,  0x64 }, /* upgrade_key_info */
10108                 { 0x6a4,  0x64 },
10109                 { 0x708,  0x70 }, /* manuf_key_info */
10110                 { 0x778,  0x70 },
10111                 {     0,     0 }
10112         };
10113         __be32 buf[0x350 / 4];
10114         u8 *data = (u8 *)buf;
10115         int i, rc;
10116         u32 magic, csum;
10117
10118         rc = bnx2x_nvram_read(bp, 0, data, 4);
10119         if (rc) {
10120                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10121                 goto test_nvram_exit;
10122         }
10123
10124         magic = be32_to_cpu(buf[0]);
10125         if (magic != 0x669955aa) {
10126                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10127                 rc = -ENODEV;
10128                 goto test_nvram_exit;
10129         }
10130
10131         for (i = 0; nvram_tbl[i].size; i++) {
10132
10133                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10134                                       nvram_tbl[i].size);
10135                 if (rc) {
10136                         DP(NETIF_MSG_PROBE,
10137                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10138                         goto test_nvram_exit;
10139                 }
10140
10141                 csum = ether_crc_le(nvram_tbl[i].size, data);
10142                 if (csum != CRC32_RESIDUAL) {
10143                         DP(NETIF_MSG_PROBE,
10144                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10145                         rc = -ENODEV;
10146                         goto test_nvram_exit;
10147                 }
10148         }
10149
10150 test_nvram_exit:
10151         return rc;
10152 }
10153
10154 static int bnx2x_test_intr(struct bnx2x *bp)
10155 {
10156         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10157         int i, rc;
10158
10159         if (!netif_running(bp->dev))
10160                 return -ENODEV;
10161
10162         config->hdr.length = 0;
10163         if (CHIP_IS_E1(bp))
10164                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10165         else
10166                 config->hdr.offset = BP_FUNC(bp);
10167         config->hdr.client_id = bp->fp->cl_id;
10168         config->hdr.reserved1 = 0;
10169
10170         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10171                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10172                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10173         if (rc == 0) {
10174                 bp->set_mac_pending++;
10175                 for (i = 0; i < 10; i++) {
10176                         if (!bp->set_mac_pending)
10177                                 break;
10178                         msleep_interruptible(10);
10179                 }
10180                 if (i == 10)
10181                         rc = -ENODEV;
10182         }
10183
10184         return rc;
10185 }
10186
10187 static void bnx2x_self_test(struct net_device *dev,
10188                             struct ethtool_test *etest, u64 *buf)
10189 {
10190         struct bnx2x *bp = netdev_priv(dev);
10191
10192         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10193
10194         if (!netif_running(dev))
10195                 return;
10196
10197         /* offline tests are not supported in MF mode */
10198         if (IS_E1HMF(bp))
10199                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10200
10201         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10202                 int port = BP_PORT(bp);
10203                 u32 val;
10204                 u8 link_up;
10205
10206                 /* save current value of input enable for TX port IF */
10207                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10208                 /* disable input for TX port IF */
10209                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10210
10211                 link_up = bp->link_vars.link_up;
10212                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10213                 bnx2x_nic_load(bp, LOAD_DIAG);
10214                 /* wait until link state is restored */
10215                 bnx2x_wait_for_link(bp, link_up);
10216
10217                 if (bnx2x_test_registers(bp) != 0) {
10218                         buf[0] = 1;
10219                         etest->flags |= ETH_TEST_FL_FAILED;
10220                 }
10221                 if (bnx2x_test_memory(bp) != 0) {
10222                         buf[1] = 1;
10223                         etest->flags |= ETH_TEST_FL_FAILED;
10224                 }
10225                 buf[2] = bnx2x_test_loopback(bp, link_up);
10226                 if (buf[2] != 0)
10227                         etest->flags |= ETH_TEST_FL_FAILED;
10228
10229                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10230
10231                 /* restore input for TX port IF */
10232                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10233
10234                 bnx2x_nic_load(bp, LOAD_NORMAL);
10235                 /* wait until link state is restored */
10236                 bnx2x_wait_for_link(bp, link_up);
10237         }
10238         if (bnx2x_test_nvram(bp) != 0) {
10239                 buf[3] = 1;
10240                 etest->flags |= ETH_TEST_FL_FAILED;
10241         }
10242         if (bnx2x_test_intr(bp) != 0) {
10243                 buf[4] = 1;
10244                 etest->flags |= ETH_TEST_FL_FAILED;
10245         }
10246         if (bp->port.pmf)
10247                 if (bnx2x_link_test(bp) != 0) {
10248                         buf[5] = 1;
10249                         etest->flags |= ETH_TEST_FL_FAILED;
10250                 }
10251
10252 #ifdef BNX2X_EXTRA_DEBUG
10253         bnx2x_panic_dump(bp);
10254 #endif
10255 }
10256
10257 static const struct {
10258         long offset;
10259         int size;
10260         u8 string[ETH_GSTRING_LEN];
10261 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10262 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10263         { Q_STATS_OFFSET32(error_bytes_received_hi),
10264                                                 8, "[%d]: rx_error_bytes" },
10265         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10266                                                 8, "[%d]: rx_ucast_packets" },
10267         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10268                                                 8, "[%d]: rx_mcast_packets" },
10269         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10270                                                 8, "[%d]: rx_bcast_packets" },
10271         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10272         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10273                                          4, "[%d]: rx_phy_ip_err_discards"},
10274         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10275                                          4, "[%d]: rx_skb_alloc_discard" },
10276         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10277
10278 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10279         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10280                                                         8, "[%d]: tx_packets" }
10281 };
10282
10283 static const struct {
10284         long offset;
10285         int size;
10286         u32 flags;
10287 #define STATS_FLAGS_PORT                1
10288 #define STATS_FLAGS_FUNC                2
10289 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10290         u8 string[ETH_GSTRING_LEN];
10291 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10292 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10293                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10294         { STATS_OFFSET32(error_bytes_received_hi),
10295                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10296         { STATS_OFFSET32(total_unicast_packets_received_hi),
10297                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10298         { STATS_OFFSET32(total_multicast_packets_received_hi),
10299                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10300         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10301                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10302         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10303                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10304         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10305                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10306         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10307                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10308         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10309                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10310 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10311                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10312         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10313                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10314         { STATS_OFFSET32(no_buff_discard_hi),
10315                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10316         { STATS_OFFSET32(mac_filter_discard),
10317                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10318         { STATS_OFFSET32(xxoverflow_discard),
10319                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10320         { STATS_OFFSET32(brb_drop_hi),
10321                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10322         { STATS_OFFSET32(brb_truncate_hi),
10323                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10324         { STATS_OFFSET32(pause_frames_received_hi),
10325                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10326         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10327                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10328         { STATS_OFFSET32(nig_timer_max),
10329                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10330 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10331                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10332         { STATS_OFFSET32(rx_skb_alloc_failed),
10333                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10334         { STATS_OFFSET32(hw_csum_err),
10335                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10336
10337         { STATS_OFFSET32(total_bytes_transmitted_hi),
10338                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10339         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10340                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10341         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10342                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10343         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10344                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10345         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10346                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10347         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10348                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10349         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10350                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10351 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10352                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10353         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10354                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10355         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10356                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10357         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10358                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10359         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10360                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10361         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10362                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10363         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10364                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10365         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10366                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10367         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10368                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10369         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10370                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10371 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10372                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10373         { STATS_OFFSET32(pause_frames_sent_hi),
10374                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10375 };
10376
10377 #define IS_PORT_STAT(i) \
10378         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10379 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10380 #define IS_E1HMF_MODE_STAT(bp) \
10381                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10382
10383 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10384 {
10385         struct bnx2x *bp = netdev_priv(dev);
10386         int i, j, k;
10387
10388         switch (stringset) {
10389         case ETH_SS_STATS:
10390                 if (is_multi(bp)) {
10391                         k = 0;
10392                         for_each_rx_queue(bp, i) {
10393                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10394                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10395                                                 bnx2x_q_stats_arr[j].string, i);
10396                                 k += BNX2X_NUM_Q_STATS;
10397                         }
10398                         if (IS_E1HMF_MODE_STAT(bp))
10399                                 break;
10400                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10401                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10402                                        bnx2x_stats_arr[j].string);
10403                 } else {
10404                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10405                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10406                                         continue;
10407                                 strcpy(buf + j*ETH_GSTRING_LEN,
10408                                        bnx2x_stats_arr[i].string);
10409                                 j++;
10410                         }
10411                 }
10412                 break;
10413
10414         case ETH_SS_TEST:
10415                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10416                 break;
10417         }
10418 }
10419
10420 static int bnx2x_get_stats_count(struct net_device *dev)
10421 {
10422         struct bnx2x *bp = netdev_priv(dev);
10423         int i, num_stats;
10424
10425         if (is_multi(bp)) {
10426                 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10427                 if (!IS_E1HMF_MODE_STAT(bp))
10428                         num_stats += BNX2X_NUM_STATS;
10429         } else {
10430                 if (IS_E1HMF_MODE_STAT(bp)) {
10431                         num_stats = 0;
10432                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10433                                 if (IS_FUNC_STAT(i))
10434                                         num_stats++;
10435                 } else
10436                         num_stats = BNX2X_NUM_STATS;
10437         }
10438
10439         return num_stats;
10440 }
10441
10442 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10443                                     struct ethtool_stats *stats, u64 *buf)
10444 {
10445         struct bnx2x *bp = netdev_priv(dev);
10446         u32 *hw_stats, *offset;
10447         int i, j, k;
10448
10449         if (is_multi(bp)) {
10450                 k = 0;
10451                 for_each_rx_queue(bp, i) {
10452                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10453                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10454                                 if (bnx2x_q_stats_arr[j].size == 0) {
10455                                         /* skip this counter */
10456                                         buf[k + j] = 0;
10457                                         continue;
10458                                 }
10459                                 offset = (hw_stats +
10460                                           bnx2x_q_stats_arr[j].offset);
10461                                 if (bnx2x_q_stats_arr[j].size == 4) {
10462                                         /* 4-byte counter */
10463                                         buf[k + j] = (u64) *offset;
10464                                         continue;
10465                                 }
10466                                 /* 8-byte counter */
10467                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10468                         }
10469                         k += BNX2X_NUM_Q_STATS;
10470                 }
10471                 if (IS_E1HMF_MODE_STAT(bp))
10472                         return;
10473                 hw_stats = (u32 *)&bp->eth_stats;
10474                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10475                         if (bnx2x_stats_arr[j].size == 0) {
10476                                 /* skip this counter */
10477                                 buf[k + j] = 0;
10478                                 continue;
10479                         }
10480                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10481                         if (bnx2x_stats_arr[j].size == 4) {
10482                                 /* 4-byte counter */
10483                                 buf[k + j] = (u64) *offset;
10484                                 continue;
10485                         }
10486                         /* 8-byte counter */
10487                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10488                 }
10489         } else {
10490                 hw_stats = (u32 *)&bp->eth_stats;
10491                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10492                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10493                                 continue;
10494                         if (bnx2x_stats_arr[i].size == 0) {
10495                                 /* skip this counter */
10496                                 buf[j] = 0;
10497                                 j++;
10498                                 continue;
10499                         }
10500                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10501                         if (bnx2x_stats_arr[i].size == 4) {
10502                                 /* 4-byte counter */
10503                                 buf[j] = (u64) *offset;
10504                                 j++;
10505                                 continue;
10506                         }
10507                         /* 8-byte counter */
10508                         buf[j] = HILO_U64(*offset, *(offset + 1));
10509                         j++;
10510                 }
10511         }
10512 }
10513
10514 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10515 {
10516         struct bnx2x *bp = netdev_priv(dev);
10517         int port = BP_PORT(bp);
10518         int i;
10519
10520         if (!netif_running(dev))
10521                 return 0;
10522
10523         if (!bp->port.pmf)
10524                 return 0;
10525
10526         if (data == 0)
10527                 data = 2;
10528
10529         for (i = 0; i < (data * 2); i++) {
10530                 if ((i % 2) == 0)
10531                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10532                                       bp->link_params.hw_led_mode,
10533                                       bp->link_params.chip_id);
10534                 else
10535                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10536                                       bp->link_params.hw_led_mode,
10537                                       bp->link_params.chip_id);
10538
10539                 msleep_interruptible(500);
10540                 if (signal_pending(current))
10541                         break;
10542         }
10543
10544         if (bp->link_vars.link_up)
10545                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10546                               bp->link_vars.line_speed,
10547                               bp->link_params.hw_led_mode,
10548                               bp->link_params.chip_id);
10549
10550         return 0;
10551 }
10552
10553 static struct ethtool_ops bnx2x_ethtool_ops = {
10554         .get_settings           = bnx2x_get_settings,
10555         .set_settings           = bnx2x_set_settings,
10556         .get_drvinfo            = bnx2x_get_drvinfo,
10557         .get_regs_len           = bnx2x_get_regs_len,
10558         .get_regs               = bnx2x_get_regs,
10559         .get_wol                = bnx2x_get_wol,
10560         .set_wol                = bnx2x_set_wol,
10561         .get_msglevel           = bnx2x_get_msglevel,
10562         .set_msglevel           = bnx2x_set_msglevel,
10563         .nway_reset             = bnx2x_nway_reset,
10564         .get_link               = bnx2x_get_link,
10565         .get_eeprom_len         = bnx2x_get_eeprom_len,
10566         .get_eeprom             = bnx2x_get_eeprom,
10567         .set_eeprom             = bnx2x_set_eeprom,
10568         .get_coalesce           = bnx2x_get_coalesce,
10569         .set_coalesce           = bnx2x_set_coalesce,
10570         .get_ringparam          = bnx2x_get_ringparam,
10571         .set_ringparam          = bnx2x_set_ringparam,
10572         .get_pauseparam         = bnx2x_get_pauseparam,
10573         .set_pauseparam         = bnx2x_set_pauseparam,
10574         .get_rx_csum            = bnx2x_get_rx_csum,
10575         .set_rx_csum            = bnx2x_set_rx_csum,
10576         .get_tx_csum            = ethtool_op_get_tx_csum,
10577         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10578         .set_flags              = bnx2x_set_flags,
10579         .get_flags              = ethtool_op_get_flags,
10580         .get_sg                 = ethtool_op_get_sg,
10581         .set_sg                 = ethtool_op_set_sg,
10582         .get_tso                = ethtool_op_get_tso,
10583         .set_tso                = bnx2x_set_tso,
10584         .self_test_count        = bnx2x_self_test_count,
10585         .self_test              = bnx2x_self_test,
10586         .get_strings            = bnx2x_get_strings,
10587         .phys_id                = bnx2x_phys_id,
10588         .get_stats_count        = bnx2x_get_stats_count,
10589         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10590 };
10591
10592 /* end of ethtool_ops */
10593
10594 /****************************************************************************
10595 * General service functions
10596 ****************************************************************************/
10597
10598 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10599 {
10600         u16 pmcsr;
10601
10602         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10603
10604         switch (state) {
10605         case PCI_D0:
10606                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10607                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10608                                        PCI_PM_CTRL_PME_STATUS));
10609
10610                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10611                         /* delay required during transition out of D3hot */
10612                         msleep(20);
10613                 break;
10614
10615         case PCI_D3hot:
10616                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10617                 pmcsr |= 3;
10618
10619                 if (bp->wol)
10620                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10621
10622                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10623                                       pmcsr);
10624
10625                 /* No more memory access after this point until
10626                 * device is brought back to D0.
10627                 */
10628                 break;
10629
10630         default:
10631                 return -EINVAL;
10632         }
10633         return 0;
10634 }
10635
10636 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10637 {
10638         u16 rx_cons_sb;
10639
10640         /* Tell compiler that status block fields can change */
10641         barrier();
10642         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10643         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10644                 rx_cons_sb++;
10645         return (fp->rx_comp_cons != rx_cons_sb);
10646 }
10647
10648 /*
10649  * net_device service functions
10650  */
10651
10652 static int bnx2x_poll(struct napi_struct *napi, int budget)
10653 {
10654         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10655                                                  napi);
10656         struct bnx2x *bp = fp->bp;
10657         int work_done = 0;
10658
10659 #ifdef BNX2X_STOP_ON_ERROR
10660         if (unlikely(bp->panic))
10661                 goto poll_panic;
10662 #endif
10663
10664         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10665         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10666
10667         bnx2x_update_fpsb_idx(fp);
10668
10669         if (bnx2x_has_rx_work(fp)) {
10670                 work_done = bnx2x_rx_int(fp, budget);
10671
10672                 /* must not complete if we consumed full budget */
10673                 if (work_done >= budget)
10674                         goto poll_again;
10675         }
10676
10677         /* bnx2x_has_rx_work() reads the status block, thus we need to
10678          * ensure that status block indices have been actually read
10679          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10680          * so that we won't write the "newer" value of the status block to IGU
10681          * (if there was a DMA right after bnx2x_has_rx_work and
10682          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10683          * may be postponed to right before bnx2x_ack_sb). In this case
10684          * there will never be another interrupt until there is another update
10685          * of the status block, while there is still unhandled work.
10686          */
10687         rmb();
10688
10689         if (!bnx2x_has_rx_work(fp)) {
10690 #ifdef BNX2X_STOP_ON_ERROR
10691 poll_panic:
10692 #endif
10693                 napi_complete(napi);
10694
10695                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10696                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10697                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10698                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10699         }
10700
10701 poll_again:
10702         return work_done;
10703 }
10704
10705
10706 /* we split the first BD into headers and data BDs
10707  * to ease the pain of our fellow microcode engineers
10708  * we use one mapping for both BDs
10709  * So far this has only been observed to happen
10710  * in Other Operating Systems(TM)
10711  */
10712 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10713                                    struct bnx2x_fastpath *fp,
10714                                    struct sw_tx_bd *tx_buf,
10715                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10716                                    u16 bd_prod, int nbd)
10717 {
10718         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10719         struct eth_tx_bd *d_tx_bd;
10720         dma_addr_t mapping;
10721         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10722
10723         /* first fix first BD */
10724         h_tx_bd->nbd = cpu_to_le16(nbd);
10725         h_tx_bd->nbytes = cpu_to_le16(hlen);
10726
10727         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10728            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10729            h_tx_bd->addr_lo, h_tx_bd->nbd);
10730
10731         /* now get a new data BD
10732          * (after the pbd) and fill it */
10733         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10734         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10735
10736         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10737                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10738
10739         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10740         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10741         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10742
10743         /* this marks the BD as one that has no individual mapping */
10744         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10745
10746         DP(NETIF_MSG_TX_QUEUED,
10747            "TSO split data size is %d (%x:%x)\n",
10748            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10749
10750         /* update tx_bd */
10751         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10752
10753         return bd_prod;
10754 }
10755
10756 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10757 {
10758         if (fix > 0)
10759                 csum = (u16) ~csum_fold(csum_sub(csum,
10760                                 csum_partial(t_header - fix, fix, 0)));
10761
10762         else if (fix < 0)
10763                 csum = (u16) ~csum_fold(csum_add(csum,
10764                                 csum_partial(t_header, -fix, 0)));
10765
10766         return swab16(csum);
10767 }
10768
10769 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10770 {
10771         u32 rc;
10772
10773         if (skb->ip_summed != CHECKSUM_PARTIAL)
10774                 rc = XMIT_PLAIN;
10775
10776         else {
10777                 if (skb->protocol == htons(ETH_P_IPV6)) {
10778                         rc = XMIT_CSUM_V6;
10779                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10780                                 rc |= XMIT_CSUM_TCP;
10781
10782                 } else {
10783                         rc = XMIT_CSUM_V4;
10784                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10785                                 rc |= XMIT_CSUM_TCP;
10786                 }
10787         }
10788
10789         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10790                 rc |= XMIT_GSO_V4;
10791
10792         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10793                 rc |= XMIT_GSO_V6;
10794
10795         return rc;
10796 }
10797
10798 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10799 /* check if packet requires linearization (packet is too fragmented)
10800    no need to check fragmentation if page size > 8K (there will be no
10801    violation to FW restrictions) */
10802 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10803                              u32 xmit_type)
10804 {
10805         int to_copy = 0;
10806         int hlen = 0;
10807         int first_bd_sz = 0;
10808
10809         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10810         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10811
10812                 if (xmit_type & XMIT_GSO) {
10813                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10814                         /* Check if LSO packet needs to be copied:
10815                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10816                         int wnd_size = MAX_FETCH_BD - 3;
10817                         /* Number of windows to check */
10818                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10819                         int wnd_idx = 0;
10820                         int frag_idx = 0;
10821                         u32 wnd_sum = 0;
10822
10823                         /* Headers length */
10824                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10825                                 tcp_hdrlen(skb);
10826
10827                         /* Amount of data (w/o headers) on linear part of SKB*/
10828                         first_bd_sz = skb_headlen(skb) - hlen;
10829
10830                         wnd_sum  = first_bd_sz;
10831
10832                         /* Calculate the first sum - it's special */
10833                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10834                                 wnd_sum +=
10835                                         skb_shinfo(skb)->frags[frag_idx].size;
10836
10837                         /* If there was data on linear skb data - check it */
10838                         if (first_bd_sz > 0) {
10839                                 if (unlikely(wnd_sum < lso_mss)) {
10840                                         to_copy = 1;
10841                                         goto exit_lbl;
10842                                 }
10843
10844                                 wnd_sum -= first_bd_sz;
10845                         }
10846
10847                         /* Others are easier: run through the frag list and
10848                            check all windows */
10849                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10850                                 wnd_sum +=
10851                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10852
10853                                 if (unlikely(wnd_sum < lso_mss)) {
10854                                         to_copy = 1;
10855                                         break;
10856                                 }
10857                                 wnd_sum -=
10858                                         skb_shinfo(skb)->frags[wnd_idx].size;
10859                         }
10860                 } else {
10861                         /* in non-LSO too fragmented packet should always
10862                            be linearized */
10863                         to_copy = 1;
10864                 }
10865         }
10866
10867 exit_lbl:
10868         if (unlikely(to_copy))
10869                 DP(NETIF_MSG_TX_QUEUED,
10870                    "Linearization IS REQUIRED for %s packet. "
10871                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10872                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10873                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10874
10875         return to_copy;
10876 }
10877 #endif
10878
10879 /* called with netif_tx_lock
10880  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10881  * netif_wake_queue()
10882  */
10883 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10884 {
10885         struct bnx2x *bp = netdev_priv(dev);
10886         struct bnx2x_fastpath *fp, *fp_stat;
10887         struct netdev_queue *txq;
10888         struct sw_tx_bd *tx_buf;
10889         struct eth_tx_start_bd *tx_start_bd;
10890         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10891         struct eth_tx_parse_bd *pbd = NULL;
10892         u16 pkt_prod, bd_prod;
10893         int nbd, fp_index;
10894         dma_addr_t mapping;
10895         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10896         int i;
10897         u8 hlen = 0;
10898         __le16 pkt_size = 0;
10899
10900 #ifdef BNX2X_STOP_ON_ERROR
10901         if (unlikely(bp->panic))
10902                 return NETDEV_TX_BUSY;
10903 #endif
10904
10905         fp_index = skb_get_queue_mapping(skb);
10906         txq = netdev_get_tx_queue(dev, fp_index);
10907
10908         fp = &bp->fp[fp_index + bp->num_rx_queues];
10909         fp_stat = &bp->fp[fp_index];
10910
10911         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10912                 fp_stat->eth_q_stats.driver_xoff++;
10913                 netif_tx_stop_queue(txq);
10914                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10915                 return NETDEV_TX_BUSY;
10916         }
10917
10918         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10919            "  gso type %x  xmit_type %x\n",
10920            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10921            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10922
10923 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10924         /* First, check if we need to linearize the skb (due to FW
10925            restrictions). No need to check fragmentation if page size > 8K
10926            (there will be no violation to FW restrictions) */
10927         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10928                 /* Statistics of linearization */
10929                 bp->lin_cnt++;
10930                 if (skb_linearize(skb) != 0) {
10931                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10932                            "silently dropping this SKB\n");
10933                         dev_kfree_skb_any(skb);
10934                         return NETDEV_TX_OK;
10935                 }
10936         }
10937 #endif
10938
10939         /*
10940         Please read carefully. First we use one BD which we mark as start,
10941         then we have a parsing info BD (used for TSO or xsum),
10942         and only then we have the rest of the TSO BDs.
10943         (don't forget to mark the last one as last,
10944         and to unmap only AFTER you write to the BD ...)
10945         And above all, all pdb sizes are in words - NOT DWORDS!
10946         */
10947
10948         pkt_prod = fp->tx_pkt_prod++;
10949         bd_prod = TX_BD(fp->tx_bd_prod);
10950
10951         /* get a tx_buf and first BD */
10952         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10953         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10954
10955         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10956         tx_start_bd->general_data = (UNICAST_ADDRESS <<
10957                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10958         /* header nbd */
10959         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10960
10961         /* remember the first BD of the packet */
10962         tx_buf->first_bd = fp->tx_bd_prod;
10963         tx_buf->skb = skb;
10964         tx_buf->flags = 0;
10965
10966         DP(NETIF_MSG_TX_QUEUED,
10967            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10968            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10969
10970 #ifdef BCM_VLAN
10971         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10972             (bp->flags & HW_VLAN_TX_FLAG)) {
10973                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10974                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10975         } else
10976 #endif
10977                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10978
10979         /* turn on parsing and get a BD */
10980         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10981         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10982
10983         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10984
10985         if (xmit_type & XMIT_CSUM) {
10986                 hlen = (skb_network_header(skb) - skb->data) / 2;
10987
10988                 /* for now NS flag is not used in Linux */
10989                 pbd->global_data =
10990                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10991                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10992
10993                 pbd->ip_hlen = (skb_transport_header(skb) -
10994                                 skb_network_header(skb)) / 2;
10995
10996                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10997
10998                 pbd->total_hlen = cpu_to_le16(hlen);
10999                 hlen = hlen*2;
11000
11001                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11002
11003                 if (xmit_type & XMIT_CSUM_V4)
11004                         tx_start_bd->bd_flags.as_bitfield |=
11005                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11006                 else
11007                         tx_start_bd->bd_flags.as_bitfield |=
11008                                                 ETH_TX_BD_FLAGS_IPV6;
11009
11010                 if (xmit_type & XMIT_CSUM_TCP) {
11011                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11012
11013                 } else {
11014                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11015
11016                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11017
11018                         DP(NETIF_MSG_TX_QUEUED,
11019                            "hlen %d  fix %d  csum before fix %x\n",
11020                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11021
11022                         /* HW bug: fixup the CSUM */
11023                         pbd->tcp_pseudo_csum =
11024                                 bnx2x_csum_fix(skb_transport_header(skb),
11025                                                SKB_CS(skb), fix);
11026
11027                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11028                            pbd->tcp_pseudo_csum);
11029                 }
11030         }
11031
11032         mapping = pci_map_single(bp->pdev, skb->data,
11033                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11034
11035         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11036         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11037         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11038         tx_start_bd->nbd = cpu_to_le16(nbd);
11039         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11040         pkt_size = tx_start_bd->nbytes;
11041
11042         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11043            "  nbytes %d  flags %x  vlan %x\n",
11044            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11045            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11046            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11047
11048         if (xmit_type & XMIT_GSO) {
11049
11050                 DP(NETIF_MSG_TX_QUEUED,
11051                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11052                    skb->len, hlen, skb_headlen(skb),
11053                    skb_shinfo(skb)->gso_size);
11054
11055                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11056
11057                 if (unlikely(skb_headlen(skb) > hlen))
11058                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11059                                                  hlen, bd_prod, ++nbd);
11060
11061                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11062                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11063                 pbd->tcp_flags = pbd_tcp_flags(skb);
11064
11065                 if (xmit_type & XMIT_GSO_V4) {
11066                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11067                         pbd->tcp_pseudo_csum =
11068                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11069                                                           ip_hdr(skb)->daddr,
11070                                                           0, IPPROTO_TCP, 0));
11071
11072                 } else
11073                         pbd->tcp_pseudo_csum =
11074                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11075                                                         &ipv6_hdr(skb)->daddr,
11076                                                         0, IPPROTO_TCP, 0));
11077
11078                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11079         }
11080         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11081
11082         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11083                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11084
11085                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11086                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11087                 if (total_pkt_bd == NULL)
11088                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11089
11090                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11091                                        frag->size, PCI_DMA_TODEVICE);
11092
11093                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11094                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11095                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11096                 le16_add_cpu(&pkt_size, frag->size);
11097
11098                 DP(NETIF_MSG_TX_QUEUED,
11099                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11100                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11101                    le16_to_cpu(tx_data_bd->nbytes));
11102         }
11103
11104         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11105
11106         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11107
11108         /* now send a tx doorbell, counting the next BD
11109          * if the packet contains or ends with it
11110          */
11111         if (TX_BD_POFF(bd_prod) < nbd)
11112                 nbd++;
11113
11114         if (total_pkt_bd != NULL)
11115                 total_pkt_bd->total_pkt_bytes = pkt_size;
11116
11117         if (pbd)
11118                 DP(NETIF_MSG_TX_QUEUED,
11119                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11120                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11121                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11122                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11123                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11124
11125         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11126
11127         /*
11128          * Make sure that the BD data is updated before updating the producer
11129          * since FW might read the BD right after the producer is updated.
11130          * This is only applicable for weak-ordered memory model archs such
11131          * as IA-64. The following barrier is also mandatory since FW will
11132          * assumes packets must have BDs.
11133          */
11134         wmb();
11135
11136         fp->tx_db.data.prod += nbd;
11137         barrier();
11138         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11139
11140         mmiowb();
11141
11142         fp->tx_bd_prod += nbd;
11143
11144         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11145                 netif_tx_stop_queue(txq);
11146                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11147                    if we put Tx into XOFF state. */
11148                 smp_mb();
11149                 fp_stat->eth_q_stats.driver_xoff++;
11150                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11151                         netif_tx_wake_queue(txq);
11152         }
11153         fp_stat->tx_pkt++;
11154
11155         return NETDEV_TX_OK;
11156 }
11157
11158 /* called with rtnl_lock */
11159 static int bnx2x_open(struct net_device *dev)
11160 {
11161         struct bnx2x *bp = netdev_priv(dev);
11162
11163         netif_carrier_off(dev);
11164
11165         bnx2x_set_power_state(bp, PCI_D0);
11166
11167         return bnx2x_nic_load(bp, LOAD_OPEN);
11168 }
11169
11170 /* called with rtnl_lock */
11171 static int bnx2x_close(struct net_device *dev)
11172 {
11173         struct bnx2x *bp = netdev_priv(dev);
11174
11175         /* Unload the driver, release IRQs */
11176         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11177         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11178                 if (!CHIP_REV_IS_SLOW(bp))
11179                         bnx2x_set_power_state(bp, PCI_D3hot);
11180
11181         return 0;
11182 }
11183
11184 /* called with netif_tx_lock from dev_mcast.c */
11185 static void bnx2x_set_rx_mode(struct net_device *dev)
11186 {
11187         struct bnx2x *bp = netdev_priv(dev);
11188         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11189         int port = BP_PORT(bp);
11190
11191         if (bp->state != BNX2X_STATE_OPEN) {
11192                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11193                 return;
11194         }
11195
11196         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11197
11198         if (dev->flags & IFF_PROMISC)
11199                 rx_mode = BNX2X_RX_MODE_PROMISC;
11200
11201         else if ((dev->flags & IFF_ALLMULTI) ||
11202                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11203                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11204
11205         else { /* some multicasts */
11206                 if (CHIP_IS_E1(bp)) {
11207                         int i, old, offset;
11208                         struct dev_mc_list *mclist;
11209                         struct mac_configuration_cmd *config =
11210                                                 bnx2x_sp(bp, mcast_config);
11211
11212                         for (i = 0, mclist = dev->mc_list;
11213                              mclist && (i < dev->mc_count);
11214                              i++, mclist = mclist->next) {
11215
11216                                 config->config_table[i].
11217                                         cam_entry.msb_mac_addr =
11218                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11219                                 config->config_table[i].
11220                                         cam_entry.middle_mac_addr =
11221                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11222                                 config->config_table[i].
11223                                         cam_entry.lsb_mac_addr =
11224                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11225                                 config->config_table[i].cam_entry.flags =
11226                                                         cpu_to_le16(port);
11227                                 config->config_table[i].
11228                                         target_table_entry.flags = 0;
11229                                 config->config_table[i].target_table_entry.
11230                                         clients_bit_vector =
11231                                                 cpu_to_le32(1 << BP_L_ID(bp));
11232                                 config->config_table[i].
11233                                         target_table_entry.vlan_id = 0;
11234
11235                                 DP(NETIF_MSG_IFUP,
11236                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11237                                    config->config_table[i].
11238                                                 cam_entry.msb_mac_addr,
11239                                    config->config_table[i].
11240                                                 cam_entry.middle_mac_addr,
11241                                    config->config_table[i].
11242                                                 cam_entry.lsb_mac_addr);
11243                         }
11244                         old = config->hdr.length;
11245                         if (old > i) {
11246                                 for (; i < old; i++) {
11247                                         if (CAM_IS_INVALID(config->
11248                                                            config_table[i])) {
11249                                                 /* already invalidated */
11250                                                 break;
11251                                         }
11252                                         /* invalidate */
11253                                         CAM_INVALIDATE(config->
11254                                                        config_table[i]);
11255                                 }
11256                         }
11257
11258                         if (CHIP_REV_IS_SLOW(bp))
11259                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11260                         else
11261                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11262
11263                         config->hdr.length = i;
11264                         config->hdr.offset = offset;
11265                         config->hdr.client_id = bp->fp->cl_id;
11266                         config->hdr.reserved1 = 0;
11267
11268                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11269                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11270                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11271                                       0);
11272                 } else { /* E1H */
11273                         /* Accept one or more multicasts */
11274                         struct dev_mc_list *mclist;
11275                         u32 mc_filter[MC_HASH_SIZE];
11276                         u32 crc, bit, regidx;
11277                         int i;
11278
11279                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11280
11281                         for (i = 0, mclist = dev->mc_list;
11282                              mclist && (i < dev->mc_count);
11283                              i++, mclist = mclist->next) {
11284
11285                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11286                                    mclist->dmi_addr);
11287
11288                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11289                                 bit = (crc >> 24) & 0xff;
11290                                 regidx = bit >> 5;
11291                                 bit &= 0x1f;
11292                                 mc_filter[regidx] |= (1 << bit);
11293                         }
11294
11295                         for (i = 0; i < MC_HASH_SIZE; i++)
11296                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11297                                        mc_filter[i]);
11298                 }
11299         }
11300
11301         bp->rx_mode = rx_mode;
11302         bnx2x_set_storm_rx_mode(bp);
11303 }
11304
11305 /* called with rtnl_lock */
11306 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11307 {
11308         struct sockaddr *addr = p;
11309         struct bnx2x *bp = netdev_priv(dev);
11310
11311         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11312                 return -EINVAL;
11313
11314         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11315         if (netif_running(dev)) {
11316                 if (CHIP_IS_E1(bp))
11317                         bnx2x_set_mac_addr_e1(bp, 1);
11318                 else
11319                         bnx2x_set_mac_addr_e1h(bp, 1);
11320         }
11321
11322         return 0;
11323 }
11324
11325 /* called with rtnl_lock */
11326 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11327                            int devad, u16 addr)
11328 {
11329         struct bnx2x *bp = netdev_priv(netdev);
11330         u16 value;
11331         int rc;
11332         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11333
11334         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11335            prtad, devad, addr);
11336
11337         if (prtad != bp->mdio.prtad) {
11338                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11339                    prtad, bp->mdio.prtad);
11340                 return -EINVAL;
11341         }
11342
11343         /* The HW expects different devad if CL22 is used */
11344         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11345
11346         bnx2x_acquire_phy_lock(bp);
11347         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11348                              devad, addr, &value);
11349         bnx2x_release_phy_lock(bp);
11350         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11351
11352         if (!rc)
11353                 rc = value;
11354         return rc;
11355 }
11356
11357 /* called with rtnl_lock */
11358 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11359                             u16 addr, u16 value)
11360 {
11361         struct bnx2x *bp = netdev_priv(netdev);
11362         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11363         int rc;
11364
11365         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11366                            " value 0x%x\n", prtad, devad, addr, value);
11367
11368         if (prtad != bp->mdio.prtad) {
11369                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11370                    prtad, bp->mdio.prtad);
11371                 return -EINVAL;
11372         }
11373
11374         /* The HW expects different devad if CL22 is used */
11375         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11376
11377         bnx2x_acquire_phy_lock(bp);
11378         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11379                               devad, addr, value);
11380         bnx2x_release_phy_lock(bp);
11381         return rc;
11382 }
11383
11384 /* called with rtnl_lock */
11385 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11386 {
11387         struct bnx2x *bp = netdev_priv(dev);
11388         struct mii_ioctl_data *mdio = if_mii(ifr);
11389
11390         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11391            mdio->phy_id, mdio->reg_num, mdio->val_in);
11392
11393         if (!netif_running(dev))
11394                 return -EAGAIN;
11395
11396         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11397 }
11398
11399 /* called with rtnl_lock */
11400 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11401 {
11402         struct bnx2x *bp = netdev_priv(dev);
11403         int rc = 0;
11404
11405         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11406             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11407                 return -EINVAL;
11408
11409         /* This does not race with packet allocation
11410          * because the actual alloc size is
11411          * only updated as part of load
11412          */
11413         dev->mtu = new_mtu;
11414
11415         if (netif_running(dev)) {
11416                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11417                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11418         }
11419
11420         return rc;
11421 }
11422
11423 static void bnx2x_tx_timeout(struct net_device *dev)
11424 {
11425         struct bnx2x *bp = netdev_priv(dev);
11426
11427 #ifdef BNX2X_STOP_ON_ERROR
11428         if (!bp->panic)
11429                 bnx2x_panic();
11430 #endif
11431         /* This allows the netif to be shutdown gracefully before resetting */
11432         schedule_work(&bp->reset_task);
11433 }
11434
11435 #ifdef BCM_VLAN
11436 /* called with rtnl_lock */
11437 static void bnx2x_vlan_rx_register(struct net_device *dev,
11438                                    struct vlan_group *vlgrp)
11439 {
11440         struct bnx2x *bp = netdev_priv(dev);
11441
11442         bp->vlgrp = vlgrp;
11443
11444         /* Set flags according to the required capabilities */
11445         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11446
11447         if (dev->features & NETIF_F_HW_VLAN_TX)
11448                 bp->flags |= HW_VLAN_TX_FLAG;
11449
11450         if (dev->features & NETIF_F_HW_VLAN_RX)
11451                 bp->flags |= HW_VLAN_RX_FLAG;
11452
11453         if (netif_running(dev))
11454                 bnx2x_set_client_config(bp);
11455 }
11456
11457 #endif
11458
11459 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11460 static void poll_bnx2x(struct net_device *dev)
11461 {
11462         struct bnx2x *bp = netdev_priv(dev);
11463
11464         disable_irq(bp->pdev->irq);
11465         bnx2x_interrupt(bp->pdev->irq, dev);
11466         enable_irq(bp->pdev->irq);
11467 }
11468 #endif
11469
11470 static const struct net_device_ops bnx2x_netdev_ops = {
11471         .ndo_open               = bnx2x_open,
11472         .ndo_stop               = bnx2x_close,
11473         .ndo_start_xmit         = bnx2x_start_xmit,
11474         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11475         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11476         .ndo_validate_addr      = eth_validate_addr,
11477         .ndo_do_ioctl           = bnx2x_ioctl,
11478         .ndo_change_mtu         = bnx2x_change_mtu,
11479         .ndo_tx_timeout         = bnx2x_tx_timeout,
11480 #ifdef BCM_VLAN
11481         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11482 #endif
11483 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484         .ndo_poll_controller    = poll_bnx2x,
11485 #endif
11486 };
11487
11488 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11489                                     struct net_device *dev)
11490 {
11491         struct bnx2x *bp;
11492         int rc;
11493
11494         SET_NETDEV_DEV(dev, &pdev->dev);
11495         bp = netdev_priv(dev);
11496
11497         bp->dev = dev;
11498         bp->pdev = pdev;
11499         bp->flags = 0;
11500         bp->func = PCI_FUNC(pdev->devfn);
11501
11502         rc = pci_enable_device(pdev);
11503         if (rc) {
11504                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11505                 goto err_out;
11506         }
11507
11508         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11509                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11510                        " aborting\n");
11511                 rc = -ENODEV;
11512                 goto err_out_disable;
11513         }
11514
11515         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11516                 printk(KERN_ERR PFX "Cannot find second PCI device"
11517                        " base address, aborting\n");
11518                 rc = -ENODEV;
11519                 goto err_out_disable;
11520         }
11521
11522         if (atomic_read(&pdev->enable_cnt) == 1) {
11523                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11524                 if (rc) {
11525                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11526                                " aborting\n");
11527                         goto err_out_disable;
11528                 }
11529
11530                 pci_set_master(pdev);
11531                 pci_save_state(pdev);
11532         }
11533
11534         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11535         if (bp->pm_cap == 0) {
11536                 printk(KERN_ERR PFX "Cannot find power management"
11537                        " capability, aborting\n");
11538                 rc = -EIO;
11539                 goto err_out_release;
11540         }
11541
11542         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11543         if (bp->pcie_cap == 0) {
11544                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11545                        " aborting\n");
11546                 rc = -EIO;
11547                 goto err_out_release;
11548         }
11549
11550         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11551                 bp->flags |= USING_DAC_FLAG;
11552                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11553                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11554                                " failed, aborting\n");
11555                         rc = -EIO;
11556                         goto err_out_release;
11557                 }
11558
11559         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11560                 printk(KERN_ERR PFX "System does not support DMA,"
11561                        " aborting\n");
11562                 rc = -EIO;
11563                 goto err_out_release;
11564         }
11565
11566         dev->mem_start = pci_resource_start(pdev, 0);
11567         dev->base_addr = dev->mem_start;
11568         dev->mem_end = pci_resource_end(pdev, 0);
11569
11570         dev->irq = pdev->irq;
11571
11572         bp->regview = pci_ioremap_bar(pdev, 0);
11573         if (!bp->regview) {
11574                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11575                 rc = -ENOMEM;
11576                 goto err_out_release;
11577         }
11578
11579         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11580                                         min_t(u64, BNX2X_DB_SIZE,
11581                                               pci_resource_len(pdev, 2)));
11582         if (!bp->doorbells) {
11583                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11584                 rc = -ENOMEM;
11585                 goto err_out_unmap;
11586         }
11587
11588         bnx2x_set_power_state(bp, PCI_D0);
11589
11590         /* clean indirect addresses */
11591         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11592                                PCICFG_VENDOR_ID_OFFSET);
11593         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11594         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11595         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11596         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11597
11598         dev->watchdog_timeo = TX_TIMEOUT;
11599
11600         dev->netdev_ops = &bnx2x_netdev_ops;
11601         dev->ethtool_ops = &bnx2x_ethtool_ops;
11602         dev->features |= NETIF_F_SG;
11603         dev->features |= NETIF_F_HW_CSUM;
11604         if (bp->flags & USING_DAC_FLAG)
11605                 dev->features |= NETIF_F_HIGHDMA;
11606         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11607         dev->features |= NETIF_F_TSO6;
11608 #ifdef BCM_VLAN
11609         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11610         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11611
11612         dev->vlan_features |= NETIF_F_SG;
11613         dev->vlan_features |= NETIF_F_HW_CSUM;
11614         if (bp->flags & USING_DAC_FLAG)
11615                 dev->vlan_features |= NETIF_F_HIGHDMA;
11616         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11617         dev->vlan_features |= NETIF_F_TSO6;
11618 #endif
11619
11620         /* get_port_hwinfo() will set prtad and mmds properly */
11621         bp->mdio.prtad = MDIO_PRTAD_NONE;
11622         bp->mdio.mmds = 0;
11623         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11624         bp->mdio.dev = dev;
11625         bp->mdio.mdio_read = bnx2x_mdio_read;
11626         bp->mdio.mdio_write = bnx2x_mdio_write;
11627
11628         return 0;
11629
11630 err_out_unmap:
11631         if (bp->regview) {
11632                 iounmap(bp->regview);
11633                 bp->regview = NULL;
11634         }
11635         if (bp->doorbells) {
11636                 iounmap(bp->doorbells);
11637                 bp->doorbells = NULL;
11638         }
11639
11640 err_out_release:
11641         if (atomic_read(&pdev->enable_cnt) == 1)
11642                 pci_release_regions(pdev);
11643
11644 err_out_disable:
11645         pci_disable_device(pdev);
11646         pci_set_drvdata(pdev, NULL);
11647
11648 err_out:
11649         return rc;
11650 }
11651
11652 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11653 {
11654         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11655
11656         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11657         return val;
11658 }
11659
11660 /* return value of 1=2.5GHz 2=5GHz */
11661 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11662 {
11663         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11664
11665         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11666         return val;
11667 }
11668 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11669 {
11670         struct bnx2x_fw_file_hdr *fw_hdr;
11671         struct bnx2x_fw_file_section *sections;
11672         u16 *ops_offsets;
11673         u32 offset, len, num_ops;
11674         int i;
11675         const struct firmware *firmware = bp->firmware;
11676         const u8 * fw_ver;
11677
11678         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11679                 return -EINVAL;
11680
11681         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11682         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11683
11684         /* Make sure none of the offsets and sizes make us read beyond
11685          * the end of the firmware data */
11686         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11687                 offset = be32_to_cpu(sections[i].offset);
11688                 len = be32_to_cpu(sections[i].len);
11689                 if (offset + len > firmware->size) {
11690                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11691                         return -EINVAL;
11692                 }
11693         }
11694
11695         /* Likewise for the init_ops offsets */
11696         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11697         ops_offsets = (u16 *)(firmware->data + offset);
11698         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11699
11700         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11701                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11702                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11703                         return -EINVAL;
11704                 }
11705         }
11706
11707         /* Check FW version */
11708         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11709         fw_ver = firmware->data + offset;
11710         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11711             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11712             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11713             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11714                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11715                                     " Should be %d.%d.%d.%d\n",
11716                        fw_ver[0], fw_ver[1], fw_ver[2],
11717                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11718                        BCM_5710_FW_MINOR_VERSION,
11719                        BCM_5710_FW_REVISION_VERSION,
11720                        BCM_5710_FW_ENGINEERING_VERSION);
11721                 return -EINVAL;
11722         }
11723
11724         return 0;
11725 }
11726
11727 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11728 {
11729         u32 i;
11730         const __be32 *source = (const __be32*)_source;
11731         u32 *target = (u32*)_target;
11732
11733         for (i = 0; i < n/4; i++)
11734                 target[i] = be32_to_cpu(source[i]);
11735 }
11736
11737 /*
11738    Ops array is stored in the following format:
11739    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11740  */
11741 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11742 {
11743         u32 i, j, tmp;
11744         const __be32 *source = (const __be32*)_source;
11745         struct raw_op *target = (struct raw_op*)_target;
11746
11747         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11748                 tmp = be32_to_cpu(source[j]);
11749                 target[i].op = (tmp >> 24) & 0xff;
11750                 target[i].offset =  tmp & 0xffffff;
11751                 target[i].raw_data = be32_to_cpu(source[j+1]);
11752         }
11753 }
11754 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11755 {
11756         u32 i;
11757         u16 *target = (u16*)_target;
11758         const __be16 *source = (const __be16*)_source;
11759
11760         for (i = 0; i < n/2; i++)
11761                 target[i] = be16_to_cpu(source[i]);
11762 }
11763
11764 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11765         do {   \
11766                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11767                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11768                 if (!bp->arr) { \
11769                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11770                         goto lbl; \
11771                 } \
11772                 func(bp->firmware->data + \
11773                         be32_to_cpu(fw_hdr->arr.offset), \
11774                         (u8*)bp->arr, len); \
11775         } while (0)
11776
11777
11778 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11779 {
11780         char fw_file_name[40] = {0};
11781         int rc, offset;
11782         struct bnx2x_fw_file_hdr *fw_hdr;
11783
11784         /* Create a FW file name */
11785         if (CHIP_IS_E1(bp))
11786                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11787         else
11788                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11789
11790         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11791                 BCM_5710_FW_MAJOR_VERSION,
11792                 BCM_5710_FW_MINOR_VERSION,
11793                 BCM_5710_FW_REVISION_VERSION,
11794                 BCM_5710_FW_ENGINEERING_VERSION);
11795
11796         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11797
11798         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11799         if (rc) {
11800                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11801                 goto request_firmware_exit;
11802         }
11803
11804         rc = bnx2x_check_firmware(bp);
11805         if (rc) {
11806                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11807                 goto request_firmware_exit;
11808         }
11809
11810         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11811
11812         /* Initialize the pointers to the init arrays */
11813         /* Blob */
11814         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11815
11816         /* Opcodes */
11817         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11818
11819         /* Offsets */
11820         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11821
11822         /* STORMs firmware */
11823         bp->tsem_int_table_data = bp->firmware->data +
11824                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11825         bp->tsem_pram_data      = bp->firmware->data +
11826                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11827         bp->usem_int_table_data = bp->firmware->data +
11828                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11829         bp->usem_pram_data      = bp->firmware->data +
11830                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11831         bp->xsem_int_table_data = bp->firmware->data +
11832                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11833         bp->xsem_pram_data      = bp->firmware->data +
11834                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11835         bp->csem_int_table_data = bp->firmware->data +
11836                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11837         bp->csem_pram_data      = bp->firmware->data +
11838                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11839
11840         return 0;
11841 init_offsets_alloc_err:
11842         kfree(bp->init_ops);
11843 init_ops_alloc_err:
11844         kfree(bp->init_data);
11845 request_firmware_exit:
11846         release_firmware(bp->firmware);
11847
11848         return rc;
11849 }
11850
11851
11852
11853 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11854                                     const struct pci_device_id *ent)
11855 {
11856         static int version_printed;
11857         struct net_device *dev = NULL;
11858         struct bnx2x *bp;
11859         int rc;
11860
11861         if (version_printed++ == 0)
11862                 printk(KERN_INFO "%s", version);
11863
11864         /* dev zeroed in init_etherdev */
11865         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11866         if (!dev) {
11867                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11868                 return -ENOMEM;
11869         }
11870
11871         bp = netdev_priv(dev);
11872         bp->msglevel = debug;
11873
11874         rc = bnx2x_init_dev(pdev, dev);
11875         if (rc < 0) {
11876                 free_netdev(dev);
11877                 return rc;
11878         }
11879
11880         pci_set_drvdata(pdev, dev);
11881
11882         rc = bnx2x_init_bp(bp);
11883         if (rc)
11884                 goto init_one_exit;
11885
11886         /* Set init arrays */
11887         rc = bnx2x_init_firmware(bp, &pdev->dev);
11888         if (rc) {
11889                 printk(KERN_ERR PFX "Error loading firmware\n");
11890                 goto init_one_exit;
11891         }
11892
11893         rc = register_netdev(dev);
11894         if (rc) {
11895                 dev_err(&pdev->dev, "Cannot register net device\n");
11896                 goto init_one_exit;
11897         }
11898
11899         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11900                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11901                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11902                bnx2x_get_pcie_width(bp),
11903                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11904                dev->base_addr, bp->pdev->irq);
11905         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11906
11907         return 0;
11908
11909 init_one_exit:
11910         if (bp->regview)
11911                 iounmap(bp->regview);
11912
11913         if (bp->doorbells)
11914                 iounmap(bp->doorbells);
11915
11916         free_netdev(dev);
11917
11918         if (atomic_read(&pdev->enable_cnt) == 1)
11919                 pci_release_regions(pdev);
11920
11921         pci_disable_device(pdev);
11922         pci_set_drvdata(pdev, NULL);
11923
11924         return rc;
11925 }
11926
11927 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11928 {
11929         struct net_device *dev = pci_get_drvdata(pdev);
11930         struct bnx2x *bp;
11931
11932         if (!dev) {
11933                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11934                 return;
11935         }
11936         bp = netdev_priv(dev);
11937
11938         unregister_netdev(dev);
11939
11940         kfree(bp->init_ops_offsets);
11941         kfree(bp->init_ops);
11942         kfree(bp->init_data);
11943         release_firmware(bp->firmware);
11944
11945         if (bp->regview)
11946                 iounmap(bp->regview);
11947
11948         if (bp->doorbells)
11949                 iounmap(bp->doorbells);
11950
11951         free_netdev(dev);
11952
11953         if (atomic_read(&pdev->enable_cnt) == 1)
11954                 pci_release_regions(pdev);
11955
11956         pci_disable_device(pdev);
11957         pci_set_drvdata(pdev, NULL);
11958 }
11959
11960 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11961 {
11962         struct net_device *dev = pci_get_drvdata(pdev);
11963         struct bnx2x *bp;
11964
11965         if (!dev) {
11966                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11967                 return -ENODEV;
11968         }
11969         bp = netdev_priv(dev);
11970
11971         rtnl_lock();
11972
11973         pci_save_state(pdev);
11974
11975         if (!netif_running(dev)) {
11976                 rtnl_unlock();
11977                 return 0;
11978         }
11979
11980         netif_device_detach(dev);
11981
11982         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11983
11984         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11985
11986         rtnl_unlock();
11987
11988         return 0;
11989 }
11990
11991 static int bnx2x_resume(struct pci_dev *pdev)
11992 {
11993         struct net_device *dev = pci_get_drvdata(pdev);
11994         struct bnx2x *bp;
11995         int rc;
11996
11997         if (!dev) {
11998                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11999                 return -ENODEV;
12000         }
12001         bp = netdev_priv(dev);
12002
12003         rtnl_lock();
12004
12005         pci_restore_state(pdev);
12006
12007         if (!netif_running(dev)) {
12008                 rtnl_unlock();
12009                 return 0;
12010         }
12011
12012         bnx2x_set_power_state(bp, PCI_D0);
12013         netif_device_attach(dev);
12014
12015         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12016
12017         rtnl_unlock();
12018
12019         return rc;
12020 }
12021
12022 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12023 {
12024         int i;
12025
12026         bp->state = BNX2X_STATE_ERROR;
12027
12028         bp->rx_mode = BNX2X_RX_MODE_NONE;
12029
12030         bnx2x_netif_stop(bp, 0);
12031
12032         del_timer_sync(&bp->timer);
12033         bp->stats_state = STATS_STATE_DISABLED;
12034         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12035
12036         /* Release IRQs */
12037         bnx2x_free_irq(bp);
12038
12039         if (CHIP_IS_E1(bp)) {
12040                 struct mac_configuration_cmd *config =
12041                                                 bnx2x_sp(bp, mcast_config);
12042
12043                 for (i = 0; i < config->hdr.length; i++)
12044                         CAM_INVALIDATE(config->config_table[i]);
12045         }
12046
12047         /* Free SKBs, SGEs, TPA pool and driver internals */
12048         bnx2x_free_skbs(bp);
12049         for_each_rx_queue(bp, i)
12050                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12051         for_each_rx_queue(bp, i)
12052                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12053         bnx2x_free_mem(bp);
12054
12055         bp->state = BNX2X_STATE_CLOSED;
12056
12057         netif_carrier_off(bp->dev);
12058
12059         return 0;
12060 }
12061
12062 static void bnx2x_eeh_recover(struct bnx2x *bp)
12063 {
12064         u32 val;
12065
12066         mutex_init(&bp->port.phy_mutex);
12067
12068         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12069         bp->link_params.shmem_base = bp->common.shmem_base;
12070         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12071
12072         if (!bp->common.shmem_base ||
12073             (bp->common.shmem_base < 0xA0000) ||
12074             (bp->common.shmem_base >= 0xC0000)) {
12075                 BNX2X_DEV_INFO("MCP not active\n");
12076                 bp->flags |= NO_MCP_FLAG;
12077                 return;
12078         }
12079
12080         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12081         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12082                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12083                 BNX2X_ERR("BAD MCP validity signature\n");
12084
12085         if (!BP_NOMCP(bp)) {
12086                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12087                               & DRV_MSG_SEQ_NUMBER_MASK);
12088                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12089         }
12090 }
12091
12092 /**
12093  * bnx2x_io_error_detected - called when PCI error is detected
12094  * @pdev: Pointer to PCI device
12095  * @state: The current pci connection state
12096  *
12097  * This function is called after a PCI bus error affecting
12098  * this device has been detected.
12099  */
12100 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12101                                                 pci_channel_state_t state)
12102 {
12103         struct net_device *dev = pci_get_drvdata(pdev);
12104         struct bnx2x *bp = netdev_priv(dev);
12105
12106         rtnl_lock();
12107
12108         netif_device_detach(dev);
12109
12110         if (state == pci_channel_io_perm_failure) {
12111                 rtnl_unlock();
12112                 return PCI_ERS_RESULT_DISCONNECT;
12113         }
12114
12115         if (netif_running(dev))
12116                 bnx2x_eeh_nic_unload(bp);
12117
12118         pci_disable_device(pdev);
12119
12120         rtnl_unlock();
12121
12122         /* Request a slot reset */
12123         return PCI_ERS_RESULT_NEED_RESET;
12124 }
12125
12126 /**
12127  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12128  * @pdev: Pointer to PCI device
12129  *
12130  * Restart the card from scratch, as if from a cold-boot.
12131  */
12132 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12133 {
12134         struct net_device *dev = pci_get_drvdata(pdev);
12135         struct bnx2x *bp = netdev_priv(dev);
12136
12137         rtnl_lock();
12138
12139         if (pci_enable_device(pdev)) {
12140                 dev_err(&pdev->dev,
12141                         "Cannot re-enable PCI device after reset\n");
12142                 rtnl_unlock();
12143                 return PCI_ERS_RESULT_DISCONNECT;
12144         }
12145
12146         pci_set_master(pdev);
12147         pci_restore_state(pdev);
12148
12149         if (netif_running(dev))
12150                 bnx2x_set_power_state(bp, PCI_D0);
12151
12152         rtnl_unlock();
12153
12154         return PCI_ERS_RESULT_RECOVERED;
12155 }
12156
12157 /**
12158  * bnx2x_io_resume - called when traffic can start flowing again
12159  * @pdev: Pointer to PCI device
12160  *
12161  * This callback is called when the error recovery driver tells us that
12162  * its OK to resume normal operation.
12163  */
12164 static void bnx2x_io_resume(struct pci_dev *pdev)
12165 {
12166         struct net_device *dev = pci_get_drvdata(pdev);
12167         struct bnx2x *bp = netdev_priv(dev);
12168
12169         rtnl_lock();
12170
12171         bnx2x_eeh_recover(bp);
12172
12173         if (netif_running(dev))
12174                 bnx2x_nic_load(bp, LOAD_NORMAL);
12175
12176         netif_device_attach(dev);
12177
12178         rtnl_unlock();
12179 }
12180
12181 static struct pci_error_handlers bnx2x_err_handler = {
12182         .error_detected = bnx2x_io_error_detected,
12183         .slot_reset     = bnx2x_io_slot_reset,
12184         .resume         = bnx2x_io_resume,
12185 };
12186
12187 static struct pci_driver bnx2x_pci_driver = {
12188         .name        = DRV_MODULE_NAME,
12189         .id_table    = bnx2x_pci_tbl,
12190         .probe       = bnx2x_init_one,
12191         .remove      = __devexit_p(bnx2x_remove_one),
12192         .suspend     = bnx2x_suspend,
12193         .resume      = bnx2x_resume,
12194         .err_handler = &bnx2x_err_handler,
12195 };
12196
12197 static int __init bnx2x_init(void)
12198 {
12199         int ret;
12200
12201         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12202         if (bnx2x_wq == NULL) {
12203                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12204                 return -ENOMEM;
12205         }
12206
12207         ret = pci_register_driver(&bnx2x_pci_driver);
12208         if (ret) {
12209                 printk(KERN_ERR PFX "Cannot register driver\n");
12210                 destroy_workqueue(bnx2x_wq);
12211         }
12212         return ret;
12213 }
12214
12215 static void __exit bnx2x_cleanup(void)
12216 {
12217         pci_unregister_driver(&bnx2x_pci_driver);
12218
12219         destroy_workqueue(bnx2x_wq);
12220 }
12221
12222 module_init(bnx2x_init);
12223 module_exit(bnx2x_cleanup);
12224
12225